hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
183caf396bc1256c82cabbb0be19086053534d64 | 5,217 | use crate::formula::*;
mod proposition;
mod test;
pub trait PropLogic<T> {
fn eval(&self, v: &mut dyn FnMut(&T) -> bool) -> bool;
fn psimplify(&self) -> Formula<T>;
fn psimplify1(&self) -> Formula<T>;
fn nnf1(&self) -> Formula<T>;
fn nnf(&self) -> Formula<T>;
}
impl<T> PropLogic<T> for Formula<T> {
/// eval for propositions, from 2.2
fn eval(&self, v: &mut dyn FnMut(&T) -> bool) -> bool {
match *self.kind {
FormulaKind::False => false,
FormulaKind::True => true,
FormulaKind::Atom(ref a) => v(a),
FormulaKind::Not(ref a) => !a.eval(v),
FormulaKind::Or(ref a, ref b) => a.eval(v) || b.eval(v),
FormulaKind::And(ref a, ref b) => a.eval(v) && b.eval(v),
FormulaKind::Implies(ref a, ref b) => !a.eval(v) || b.eval(v),
FormulaKind::Iff(ref a, ref b) => a.eval(v) == b.eval(v),
FormulaKind::ForAll(..) | FormulaKind::Exists(..) => {
panic!("forall/exists not present in prop logic")
}
}
}
fn psimplify(&self) -> Formula<T> {
match *self.kind {
FormulaKind::Not(ref f) => f.psimplify().not().psimplify1(),
FormulaKind::And(ref p, ref q) => {
formula!(and {p.psimplify()} {q.psimplify()}).psimplify1()
}
FormulaKind::Or(ref p, ref q) => {
formula!(or {p.psimplify()} {q.psimplify()}).psimplify1()
}
FormulaKind::Implies(ref p, ref q) => {
formula!(implies {p.psimplify()} {q.psimplify()}).psimplify1()
}
FormulaKind::Iff(ref p, ref q) => {
formula!(iff {p.psimplify()} {q.psimplify()}).psimplify1()
}
_ => self.clone(),
}
}
fn psimplify1(&self) -> Formula<T> {
match *self.kind {
FormulaKind::False => self.clone(),
FormulaKind::True => self.clone(),
FormulaKind::Atom(_) => self.clone(),
FormulaKind::Not(ref f) => match *f.kind {
FormulaKind::False => formula!(true),
FormulaKind::True => formula!(false),
FormulaKind::Not(ref g) => g.clone(),
_ => self.clone(),
},
FormulaKind::And(ref f, ref g) => {
if f.is_false() || g.is_false() {
formula!(false)
} else if f.is_true() {
g.clone()
} else if g.is_true() {
f.clone()
} else {
self.clone()
}
}
FormulaKind::Or(ref f, ref g) => {
if f.is_true() || g.is_true() {
formula!(true)
} else if f.is_false() {
g.clone()
} else if g.is_false() {
f.clone()
} else {
self.clone()
}
}
FormulaKind::Implies(ref f, ref g) => {
if f.is_false() || g.is_true() {
formula!(true)
} else if f.is_true() {
g.clone()
} else if g.is_false() {
f.not()
} else {
self.clone()
}
}
FormulaKind::Iff(ref f, ref g) => {
if f.is_true() {
g.clone()
} else if g.is_true() {
f.clone()
} else if f.is_false() {
g.not()
} else if g.is_false() {
f.not()
} else {
self.clone()
}
}
FormulaKind::ForAll(..) | FormulaKind::Exists(..) => {
panic!("forall/exists not present in prop logic")
}
}
}
fn nnf1(&self) -> Formula<T> {
match *self.kind {
FormulaKind::And(ref f, ref g) => formula!(and {f.nnf1()} {g.nnf1()}),
FormulaKind::Or(ref f, ref g) => formula!(or {f.nnf1()} {g.nnf1()}),
FormulaKind::Implies(ref f, ref g) => formula!(or {f.not().nnf1()} {g.nnf1()}),
FormulaKind::Iff(ref f, ref g) => {
formula!(or (and {f.nnf1()} {g.nnf1()}) (and {f.not().nnf1()} {g.not().nnf1()}))
}
FormulaKind::Not(ref f) => match *f.kind {
FormulaKind::Not(ref g) => g.nnf1(),
FormulaKind::And(ref p, ref q) => formula!(or {p.not().nnf1()} {q.not().nnf1()}),
FormulaKind::Or(ref p, ref q) => formula!(and {p.not().nnf1()} {q.not().nnf1()}),
FormulaKind::Implies(ref p, ref q) => formula!(and {p.nnf1()} {q.not().nnf1()}),
FormulaKind::Iff(ref p, ref q) => formula!(or
(and {p.nnf1()} {q.not().nnf1()})
(and {p.not().nnf1()} {q.nnf1()})),
_ => self.clone(),
},
_ => self.clone(),
}
}
fn nnf(&self) -> Formula<T> {
self.psimplify().nnf1()
}
}
| 35.25 | 97 | 0.414414 |
72af7c045d3f6019628c221fa8b49de568f46fdf | 6,707 | /*
* Copyright 2019 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Utilities to implement `Display` (or `repr`) for Starlark values.
use std::fmt::{self, Display, Write};
const INDENT: &str = " ";
fn subwriter<T: Display>(indent: &'static str, f: &mut fmt::Formatter, v: &T) -> fmt::Result {
if f.alternate() {
write!(indenter::indented(f).with_str(indent), "{:#}", v)
} else {
Display::fmt(v, f)
}
}
/// Helper for display implementation of starlark container-y types.
///
/// For "normal" display, produces output like (with `prefix="prefix[", suffix="]"`):
///
/// `prefix[]`
/// `prefix[1]`
/// `prefix[1, 2]`
///
/// For "alternate" display, produces output like:
///
/// `prefix[]`
/// `prefix[ 1 ]`
/// ```ignore
/// prefix[
/// 1,
/// 2
/// ]
/// ```
///
/// This doesn't propagate the flags on the Formatter other than alternate.
// TODO(cjhopman): Starlark values don't really do anything with the rest of the flags so
// propagating them hasn't been necessary, but it would be easy enough to implement if we wanted to.
pub fn display_container<T: Display, Iter: ExactSizeIterator<Item = T>>(
f: &mut fmt::Formatter,
prefix: &str,
suffix: &str,
items: Iter,
) -> fmt::Result {
let (separator, outer, indent) = match (f.alternate(), items.len()) {
// We want to be formatted as `{prefix}item1, item2{suffix}`, like `[1, 2]` for lists.
(false, _) => (", ", "", ""),
// We want to be formatted as `{prefix}{suffix}`, like `[]` for lists.
(true, 0) => ("", "", ""),
// We want to be formatted as `{prefix} item {suffix}`, like `[ item ]` for lists
(true, 1) => ("", " ", ""),
// We want to be formatted as `{prefix}\n item1,\n item2\n{suffix}`, for lists like:
// ```
// [
// item1,
// item2
// ]
// ```
_ => (",\n", "\n", INDENT),
};
f.write_str(prefix)?;
f.write_str(outer)?;
for (i, value) in items.into_iter().enumerate() {
if i != 0 {
f.write_str(separator)?;
}
subwriter(indent, f, &value)?;
}
f.write_str(outer)?;
f.write_str(suffix)
}
/// Helper for display implementation of starlark keyed container-y types (like dict, struct).
///
/// Similar to [display_container] where each key-value pair is printed as `<key><separator><value>`
pub fn display_keyed_container<K: Display, V: Display, Iter: ExactSizeIterator<Item = (K, V)>>(
f: &mut fmt::Formatter,
prefix: &str,
suffix: &str,
separator: &str,
items: Iter,
) -> fmt::Result {
struct Wrapper<'a, K: Display, V: Display>(K, V, &'a str);
impl<'a, K: Display, V: Display> fmt::Display for Wrapper<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)?;
f.write_str(self.2)?;
fmt::Display::fmt(&self.1, f)
}
}
display_container(
f,
prefix,
suffix,
items.map(|(k, v)| Wrapper(k, v, separator)),
)
}
#[cfg(test)]
mod tests {
use std::fmt;
use indexmap::{indexmap, IndexMap};
use indoc::indoc;
use super::*;
#[test]
fn test_container() {
struct Wrapped(Vec<u32>);
impl fmt::Display for Wrapped {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_container(f, "prefix[", "]", self.0.iter())
}
}
assert_eq!("prefix[]", format!("{:}", Wrapped(vec![])));
assert_eq!("prefix[1]", format!("{:}", Wrapped(vec![1])));
assert_eq!("prefix[1, 2]", format!("{:}", Wrapped(vec![1, 2])));
assert_eq!("prefix[1, 2, 3]", format!("{:}", Wrapped(vec![1, 2, 3])));
assert_eq!("prefix[]", format!("{:#}", Wrapped(vec![])));
assert_eq!("prefix[ 1 ]", format!("{:#}", Wrapped(vec![1])));
assert_eq!(
indoc!(
"prefix[
1,
2
]"
),
format!("{:#}", Wrapped(vec![1, 2])),
);
assert_eq!(
indoc!(
"prefix[
1,
2,
3
]"
),
format!("{:#}", Wrapped(vec![1, 2, 3])),
);
}
#[test]
fn test_keyed_container() {
struct Wrapped(IndexMap<u32, &'static str>);
impl fmt::Display for Wrapped {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
display_keyed_container(
f,
"prefix[",
"]",
": ",
// just wrap with `"` to make it clearer in the output
self.0.iter().map(|(k, v)| (k, format!("\"{}\"", v))),
)
}
}
assert_eq!("prefix[]", format!("{:}", Wrapped(indexmap! {})));
assert_eq!(
"prefix[1: \"1\"]",
format!("{:}", Wrapped(indexmap! {1 => "1"}))
);
assert_eq!(
"prefix[1: \"1\", 2: \"2\"]",
format!("{:}", Wrapped(indexmap! {1 => "1", 2 => "2"})),
);
assert_eq!(
"prefix[1: \"1\", 2: \"2\", 3: \"3\"]",
format!("{:}", Wrapped(indexmap! {1 => "1", 2 => "2", 3 => "3"})),
);
assert_eq!("prefix[]", format!("{:#}", Wrapped(indexmap! {})));
assert_eq!(
"prefix[ 1: \"1\" ]",
format!("{:#}", Wrapped(indexmap! {1 => "1"}))
);
assert_eq!(
indoc!(
"prefix[
1: \"1\",
2: \"2\"
]"
),
format!("{:#}", Wrapped(indexmap! {1 => "1", 2 => "2"})),
);
assert_eq!(
indoc!(
"prefix[
1: \"1\",
2: \"2\",
3: \"3\"
]"
),
format!("{:#}", Wrapped(indexmap! {1 => "1", 2 => "2", 3 => "3"})),
);
}
}
| 31.050926 | 100 | 0.476964 |
fc00ca3a76942e88b4a02fcd7d635117a9a88ab3 | 4,380 | extern crate mazth;
///md5mesh file format
pub mod mesh {
use super::mazth::quat::Quat;
#[derive(Debug, Clone)]
pub struct Md5Mesh {
pub _shader: String,
pub _numverts: u64,
pub _numtris: u64,
pub _numweights: u64,
pub _verts: Vec< Md5Vert >,
pub _tris: Vec< Md5Tri >,
pub _weights: Vec< Md5Weight >,
}
#[derive(Debug, Clone)]
pub struct Md5Joint {
pub _name: String,
pub _parent_index: i64,
pub _pos: [f32;3],
pub _orient: [f32;3],
pub _rot: Quat<f32>,
}
#[derive(Debug, Clone)]
pub struct Md5Vert {
pub _index: u64,
pub _tex_coords: [f32;2],
pub _weight_start: u64,
pub _weight_count: u64,
pub _normal: [f32;3],
pub _pos: [f32;3],
}
#[derive(Debug)]
#[derive(Copy)]
#[derive(Clone)]
pub struct Md5Tri {
pub _index: u64,
pub _vert_indices: [u64;3],
}
#[derive(Debug, Clone)]
pub struct Md5Weight {
pub _index: u64,
pub _joint_index: u64,
pub _weight_bias: f32,
pub _pos: [f32;3],
}
#[derive(Debug, Clone)]
pub struct Md5MeshRoot {
pub _md5ver: u64,
pub _cmdline: String,
pub _numjoints: u64,
pub _nummeshes: u64,
pub _joints: Vec< Md5Joint >,
pub _meshes: Vec< Md5Mesh >,
}
impl Md5MeshRoot {
pub fn init() -> Md5MeshRoot {
Md5MeshRoot {
_md5ver: 0u64,
_cmdline: String::from(""),
_numjoints: 0u64,
_nummeshes: 0u64,
_joints: vec![],
_meshes: vec![],
}
}
}
}
///md5anim file format
pub mod anim {
#[derive(Debug)]
pub struct JointHierarchy {
pub _name: String,
pub _parent: i64,
pub _flags: u64,
pub _start_index: u64,
}
#[derive(Debug)]
pub struct Bound {
pub _min: [f32;3],
pub _max: [f32;3],
}
#[derive(Debug)]
pub struct FrameJoint {
pub _index: u64,
pub _pos: [f32;3],
pub _orient: [f32;3],
}
#[derive(Debug, Default)]
pub struct Frame {
pub _index: u64,
pub _data: Vec< f32 >,
}
#[derive(Debug)]
pub struct Md5AnimRoot {
pub _md5ver: u64,
pub _cmdline: String,
pub _numframes: u64,
pub _numjoints: u64,
pub _framerate: u64,
pub _num_animated_components: u64,
pub _hierarchy: Vec< JointHierarchy >,
pub _bounds: Vec< Bound >,
pub _baseframe: Vec< FrameJoint >,
pub _frames: Vec< Frame >,
}
impl Md5AnimRoot {
pub fn init() -> Md5AnimRoot {
Md5AnimRoot {
_md5ver: 0u64,
_cmdline: String::from(""),
_numframes: 0u64,
_numjoints: 0u64,
_framerate: 0u64,
_num_animated_components: 0u64,
_hierarchy: vec![],
_bounds: vec![],
_baseframe: vec![],
_frames: vec![],
}
}
}
}
///md5rig file format
pub mod rig {
use super::mazth::quat::Quat;
#[derive(Debug, Clone)]
pub struct RigJoint {
pub _name: String,
pub _parent: i64,
pub _pos: [f32;3],
pub _orient: Quat<f32>,
}
#[derive(Debug, Clone)]
pub struct PoseJoints {
pub _joints: Vec< RigJoint >,
// pub _bbox_lower: [f32;3], //todo
// pub _bbox_upper: [f32;3],
}
#[derive(Debug, Clone)]
pub struct PoseCollection {
pub _frames: Vec< PoseJoints >,
pub _framerate: u64,
}
}
pub mod compute {
///md5compute format
#[derive(Debug, Clone)]
pub struct VertCompute {
pub _pos: [f32;3],
pub _normal: [f32;3],
pub _tc: [f32;2],
}
#[derive(Debug, Clone)]
pub struct MeshCompute {
pub _verts: Vec< VertCompute >,
}
#[derive(Debug, Clone)]
pub struct ComputeCollection {
// pub _meshcomputes: Vec< MeshCompute >, //use batch instead
pub _bbox_lower: [f32;3],
pub _bbox_upper: [f32;3],
pub _batch_vert: Vec< f32 >,
pub _batch_normal: Vec< f32 >,
pub _batch_tc: Vec< f32 >,
}
}
| 22.8125 | 69 | 0.5121 |
9c996a93b9532ff400dc3b3a56a5b11f2a116f78 | 325 | struct X {
x: String,
}
impl Drop for X {
fn drop(&mut self) {
println!("value: {}", self.x);
}
}
fn main() {
let x = X { x: "hello".to_string() };
match x {
//~^ ERROR cannot move out of type `X`, which implements the `Drop` trait
X { x: y } => println!("contents: {}", y)
}
}
| 17.105263 | 77 | 0.489231 |
bbdbdd54f91f68d6ab6d90672aa5976cf4182ebf | 650 | // xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use foo::zed;
use bar::baz;
mod foo {
pub mod zed {
pub fn baz() { debug!("baz"); }
}
}
mod bar {
pub use foo::zed::baz;
}
pub fn main() { baz(); }
| 27.083333 | 68 | 0.675385 |
4a1a9d20f72884d136666cab8113a6cec66e511d | 1,248 | #[warn(unused_imports)]
#![no_std]
#[macro_use]
mod vga;
mod fs;
mod memory;
const ELFIdent: i16 = 16;
#[packed]
struct ELFHeader {
e_ident: ELFIdent,
e_type: u16,
e_machine: u16,
e_version: u32,
e_entry: u32,
e_phoff: u32,
e_shoff: u32,
e_flags: u32,
e_ehsize: u16,
e_phentsize: u16,
e_phnum: u16,
e_shentsize: u16,
e_shnum: u16,
e_shstrndx: u16,
}
struct ProgramHeader {
p_type: u32,
p_flags64: u32,
p_vaddr: u32,
p_paddr: u32,
p_filesz: u32,
p_memsz: u32,
p_flags32: u32,
p_align: u32,
}
struct SectionHeader {
sh_name: u32,
sh_type: u32,
sh_flags: u32,
sh_addr: u32,
sh_offset: u32,
sh_size: u32,
sh_link: u32,
sh_info: u32,
sh_addralign: u32,
sh_entsize: u32,
}
unsafe fn jmp(addr: u32) {
asm!("jmp *($0)"
:
: "r" (addr));
}
// Executes a file starting at `addr`
pub unsafe fn exec(addr: uint) {
let bytes: &[u8] = transmute(Slice {data: (addr as *u8), len: 100});
let header = elf::read_header(bytes);
assert(header.e_ident.ei_mag.slice(1,4) == "ELF");
// Read the program header and load the program into memory at
// the right address
jmp(header.e_entry);
}
| 18.086957 | 72 | 0.600962 |
dd91a483bbe551f2961c458c321ef3ad333ea66b | 2,708 | use super::{LspError, LspErrorResponse, LspNotification, LspRequest, LspResponse};
use serde_json::Value;
use std::io::{self, Write as _};
pub(super) struct LspSender<W: io::Write> {
out: io::BufWriter<W>,
}
impl<W: io::Write> LspSender<W> {
pub(crate) fn new(out: W) -> LspSender<W> {
LspSender {
out: io::BufWriter::new(out),
}
}
fn do_send(&mut self, content: &[u8]) {
let content_length = content.len();
let content = String::from_utf8_lossy(content);
write!(
self.out,
"Content-Length: {}\r\n\r\n{}",
content_length, content
)
.unwrap();
self.out.flush().unwrap();
trace!(
"lsp-sender/send Content-Length: {}\r\n\r\n{}",
content_length,
if content_length < 0x1000 {
&content
} else {
"TOO_LONG"
}
);
}
pub(crate) fn send_request<P: serde::Serialize>(&mut self, id: i64, method: &str, params: P) {
let mut buf = Vec::new();
serde_json::to_writer(
&mut buf,
&LspRequest::<P> {
jsonrpc: "2.0".to_string(),
id,
method: method.to_string(),
params,
},
)
.unwrap();
self.do_send(&buf);
}
pub(crate) fn send_notification<P: serde::Serialize>(&mut self, method: &str, params: P) {
let mut buf = Vec::new();
serde_json::to_writer(
&mut buf,
&LspNotification::<P> {
jsonrpc: "2.0".to_string(),
method: method.to_string(),
params,
},
)
.unwrap();
self.do_send(&buf);
}
pub(crate) fn send_response<R: serde::Serialize>(&mut self, id: i64, result: R) {
let mut buf = Vec::new();
serde_json::to_writer(
&mut buf,
&LspResponse::<R> {
jsonrpc: "2.0".to_string(),
id,
result,
},
)
.unwrap();
self.do_send(&buf);
}
pub(crate) fn send_error_code(&mut self, id: Option<Value>, code: i64, msg: &str) {
let mut buf = Vec::new();
serde_json::to_writer(
&mut buf,
&LspErrorResponse {
jsonrpc: "2.0".to_string(),
id: id.unwrap_or(Value::Null),
error: LspError {
code,
msg: msg.to_string(),
// data: Value::Null,
},
},
)
.unwrap();
self.do_send(&buf);
}
}
| 25.790476 | 98 | 0.453102 |
720251e1dc33aa6eab9e97104a56c449fb214491 | 317 | // Align to https://github.com/nodejs/node/blob/master/lib/path.js
/// `nodejs_path::posix` provides access to POSIX specific implementations of the path methods.
pub mod posix;
/// `nodejs_path::win32` provides access to Windows-specific implementations of the path methods.
pub mod win32;
pub(crate) mod shared;
| 35.222222 | 98 | 0.766562 |
f55f47bbc59dec9b9c253aae9ea0881f44f8eafb | 662 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let args = vec!("foobie", "asdf::asdf");
let arr: Vec<&str> = args[1].as_slice().split_str("::").collect();
assert_eq!(arr[0], "asdf");
assert_eq!(arr[0], "asdf");
}
| 36.777778 | 70 | 0.68429 |
48bfef47cd7a4aa74fc58bf5caa85c2e6947a0fa | 628 | #GLOBAL
#END
#VS
in vec3 Position0;
in vec3 Normal0;
out vec4 Position_VSPS;
out vec3 Normal_VSPS;
uniform mat4 Camera;
uniform mat4 Transform;
void main()
{
vec4 loc = ( vec4(Position0, 1) * Transform);
gl_Position = Position_VSPS = (loc * Camera);
Normal_VSPS = ( vec4(Normal0, 0) * Transform).xyz;
}
#END
#PS
in vec4 Position_VSPS;
in vec3 Normal_VSPS;
out vec4 glFragColorOut[1];
uniform vec3 LightDirection;
uniform vec4 LightColor;
uniform vec4 Diffuse;
void main()
{
vec3 normal = normalize(Normal_VSPS);
float light = dot(-LightDirection, normal);
glFragColorOut[0] = Diffuse * LightColor * light;
}
#END
| 15.7 | 51 | 0.729299 |
2661d811c43cb4cda9abc4bae6b4c11aecb3cae1 | 32,141 | // Copyright (c) 2016-2019, The Tor Project, Inc. */
// See LICENSE for licensing information */
use std::collections::hash_map;
use std::collections::HashMap;
use std::ffi::CStr;
use std::fmt;
use std::str;
use std::str::FromStr;
use std::string::String;
use external::c_tor_version_as_new_as;
use errors::ProtoverError;
use protoset::ProtoSet;
use protoset::Version;
/// The first version of Tor that included "proto" entries in its descriptors.
/// Authorities should use this to decide whether to guess proto lines.
///
/// C_RUST_COUPLED:
/// protover.h `FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS`
const FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS: &'static str = "0.2.9.3-alpha";
/// The maximum number of subprotocol version numbers we will attempt to expand
/// before concluding that someone is trying to DoS us
///
/// C_RUST_COUPLED: protover.c `MAX_PROTOCOLS_TO_EXPAND`
const MAX_PROTOCOLS_TO_EXPAND: usize = (1 << 16);
/// The maximum size an `UnknownProtocol`'s name may be.
pub(crate) const MAX_PROTOCOL_NAME_LENGTH: usize = 100;
/// Known subprotocols in Tor. Indicates which subprotocol a relay supports.
///
/// C_RUST_COUPLED: protover.h `protocol_type_t`
#[derive(Clone, Hash, Eq, PartialEq, Debug)]
pub enum Protocol {
Cons,
Desc,
DirCache,
HSDir,
HSIntro,
HSRend,
Link,
LinkAuth,
Microdesc,
Relay,
}
impl fmt::Display for Protocol {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// Translates a string representation of a protocol into a Proto type.
/// Error if the string is an unrecognized protocol name.
///
/// C_RUST_COUPLED: protover.c `PROTOCOL_NAMES`
impl FromStr for Protocol {
type Err = ProtoverError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"Cons" => Ok(Protocol::Cons),
"Desc" => Ok(Protocol::Desc),
"DirCache" => Ok(Protocol::DirCache),
"HSDir" => Ok(Protocol::HSDir),
"HSIntro" => Ok(Protocol::HSIntro),
"HSRend" => Ok(Protocol::HSRend),
"Link" => Ok(Protocol::Link),
"LinkAuth" => Ok(Protocol::LinkAuth),
"Microdesc" => Ok(Protocol::Microdesc),
"Relay" => Ok(Protocol::Relay),
_ => Err(ProtoverError::UnknownProtocol),
}
}
}
/// A protocol string which is not one of the `Protocols` we currently know
/// about.
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct UnknownProtocol(String);
impl fmt::Display for UnknownProtocol {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
fn is_valid_proto(s: &str) -> bool {
s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}
impl FromStr for UnknownProtocol {
type Err = ProtoverError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if !is_valid_proto(s) {
Err(ProtoverError::InvalidProtocol)
} else if s.len() <= MAX_PROTOCOL_NAME_LENGTH {
Ok(UnknownProtocol(s.to_string()))
} else {
Err(ProtoverError::ExceedsNameLimit)
}
}
}
impl UnknownProtocol {
/// Create an `UnknownProtocol`, ignoring whether or not it
/// exceeds MAX_PROTOCOL_NAME_LENGTH.
fn from_str_any_len(s: &str) -> Result<Self, ProtoverError> {
if !is_valid_proto(s) {
return Err(ProtoverError::InvalidProtocol);
}
Ok(UnknownProtocol(s.to_string()))
}
}
impl From<Protocol> for UnknownProtocol {
fn from(p: Protocol) -> UnknownProtocol {
UnknownProtocol(p.to_string())
}
}
#[cfg(feature = "test_linking_hack")]
fn have_linkauth_v1() -> bool {
true
}
#[cfg(not(feature = "test_linking_hack"))]
fn have_linkauth_v1() -> bool {
use external::c_tor_is_using_nss;
!c_tor_is_using_nss()
}
/// Get a CStr representation of current supported protocols, for
/// passing to C, or for converting to a `&str` for Rust.
///
/// # Returns
///
/// An `&'static CStr` whose value is the existing protocols supported by tor.
/// Returned data is in the format as follows:
///
/// "HSDir=1-1 LinkAuth=1"
///
/// # Note
///
/// Rust code can use the `&'static CStr` as a normal `&'a str` by
/// calling `protover::get_supported_protocols`.
///
// C_RUST_COUPLED: protover.c `protover_get_supported_protocols`
pub(crate) fn get_supported_protocols_cstr() -> &'static CStr {
if !have_linkauth_v1() {
cstr!(
"Cons=1-2 \
Desc=1-2 \
DirCache=1-2 \
HSDir=1-2 \
HSIntro=3-4 \
HSRend=1-2 \
Link=1-5 \
LinkAuth=3 \
Microdesc=1-2 \
Relay=1-2"
)
} else {
cstr!(
"Cons=1-2 \
Desc=1-2 \
DirCache=1-2 \
HSDir=1-2 \
HSIntro=3-4 \
HSRend=1-2 \
Link=1-5 \
LinkAuth=1,3 \
Microdesc=1-2 \
Relay=1-2"
)
}
}
/// A map of protocol names to the versions of them which are supported.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ProtoEntry(HashMap<Protocol, ProtoSet>);
impl Default for ProtoEntry {
fn default() -> ProtoEntry {
ProtoEntry(HashMap::new())
}
}
impl ProtoEntry {
/// Get an iterator over the `Protocol`s and their `ProtoSet`s in this `ProtoEntry`.
pub fn iter(&self) -> hash_map::Iter<Protocol, ProtoSet> {
self.0.iter()
}
/// Translate the supported tor versions from a string into a
/// ProtoEntry, which is useful when looking up a specific
/// subprotocol.
pub fn supported() -> Result<Self, ProtoverError> {
let supported_cstr: &'static CStr = get_supported_protocols_cstr();
let supported: &str = supported_cstr.to_str().unwrap_or("");
supported.parse()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn get(&self, protocol: &Protocol) -> Option<&ProtoSet> {
self.0.get(protocol)
}
pub fn insert(&mut self, key: Protocol, value: ProtoSet) {
self.0.insert(key, value);
}
pub fn remove(&mut self, key: &Protocol) -> Option<ProtoSet> {
self.0.remove(key)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl FromStr for ProtoEntry {
type Err = ProtoverError;
/// Parse a string of subprotocol types and their version numbers.
///
/// # Inputs
///
/// * A `protocol_entry` string, comprised of a keywords, an "=" sign, and
/// one or more version numbers, each separated by a space. For example,
/// `"Cons=3-4 HSDir=1"`.
///
/// # Returns
///
/// A `Result` whose `Ok` value is a `ProtoEntry`.
/// Otherwise, the `Err` value of this `Result` is a `ProtoverError`.
fn from_str(protocol_entry: &str) -> Result<ProtoEntry, ProtoverError> {
let mut proto_entry: ProtoEntry = ProtoEntry::default();
let entries = protocol_entry.split(' ');
for entry in entries {
let mut parts = entry.splitn(2, '=');
let proto = match parts.next() {
Some(n) => n,
None => return Err(ProtoverError::Unparseable),
};
let vers = match parts.next() {
Some(n) => n,
None => return Err(ProtoverError::Unparseable),
};
let versions: ProtoSet = vers.parse()?;
let proto_name: Protocol = proto.parse()?;
proto_entry.insert(proto_name, versions);
if proto_entry.len() > MAX_PROTOCOLS_TO_EXPAND {
return Err(ProtoverError::ExceedsMax);
}
}
Ok(proto_entry)
}
}
/// Generate an implementation of `ToString` for either a `ProtoEntry` or an
/// `UnvalidatedProtoEntry`.
macro_rules! impl_to_string_for_proto_entry {
($t:ty) => {
impl ToString for $t {
fn to_string(&self) -> String {
let mut parts: Vec<String> = Vec::new();
for (protocol, versions) in self.iter() {
parts.push(format!("{}={}", protocol.to_string(), versions.to_string()));
}
parts.sort_unstable();
parts.join(" ")
}
}
};
}
impl_to_string_for_proto_entry!(ProtoEntry);
impl_to_string_for_proto_entry!(UnvalidatedProtoEntry);
/// A `ProtoEntry`, but whose `Protocols` can be any `UnknownProtocol`, not just
/// the supported ones enumerated in `Protocols`. The protocol versions are
/// validated, however.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct UnvalidatedProtoEntry(HashMap<UnknownProtocol, ProtoSet>);
impl Default for UnvalidatedProtoEntry {
fn default() -> UnvalidatedProtoEntry {
UnvalidatedProtoEntry(HashMap::new())
}
}
impl UnvalidatedProtoEntry {
/// Get an iterator over the `Protocol`s and their `ProtoSet`s in this `ProtoEntry`.
pub fn iter(&self) -> hash_map::Iter<UnknownProtocol, ProtoSet> {
self.0.iter()
}
pub fn get(&self, protocol: &UnknownProtocol) -> Option<&ProtoSet> {
self.0.get(protocol)
}
pub fn insert(&mut self, key: UnknownProtocol, value: ProtoSet) {
self.0.insert(key, value);
}
pub fn remove(&mut self, key: &UnknownProtocol) -> Option<ProtoSet> {
self.0.remove(key)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn len(&self) -> usize {
let mut total: usize = 0;
for (_, versions) in self.iter() {
total += versions.len();
}
total
}
/// Determine if we support every protocol a client supports, and if not,
/// determine which protocols we do not have support for.
///
/// # Returns
///
/// Optionally, return parameters which the client supports but which we do not.
///
/// # Examples
/// ```
/// use protover::UnvalidatedProtoEntry;
///
/// let protocols: UnvalidatedProtoEntry = "LinkAuth=1 Microdesc=1-2 Relay=2".parse().unwrap();
/// let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
/// assert_eq!(true, unsupported.is_none());
///
/// let protocols: UnvalidatedProtoEntry = "Link=1-2 Wombat=9".parse().unwrap();
/// let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
/// assert_eq!(true, unsupported.is_some());
/// assert_eq!("Wombat=9", &unsupported.unwrap().to_string());
/// ```
pub fn all_supported(&self) -> Option<UnvalidatedProtoEntry> {
let mut unsupported: UnvalidatedProtoEntry = UnvalidatedProtoEntry::default();
let supported: ProtoEntry = match ProtoEntry::supported() {
Ok(x) => x,
Err(_) => return None,
};
for (protocol, versions) in self.iter() {
let is_supported: Result<Protocol, ProtoverError> = protocol.0.parse();
let supported_protocol: Protocol;
// If the protocol wasn't even in the enum, then we definitely don't
// know about it and don't support any of its versions.
if is_supported.is_err() {
if !versions.is_empty() {
unsupported.insert(protocol.clone(), versions.clone());
}
continue;
} else {
supported_protocol = is_supported.unwrap();
}
let maybe_supported_versions: Option<&ProtoSet> = supported.get(&supported_protocol);
let supported_versions: &ProtoSet;
// If the protocol wasn't in the map, then we don't know about it
// and don't support any of its versions. Add its versions to the
// map (if it has versions).
if maybe_supported_versions.is_none() {
if !versions.is_empty() {
unsupported.insert(protocol.clone(), versions.clone());
}
continue;
} else {
supported_versions = maybe_supported_versions.unwrap();
}
let unsupported_versions = versions.and_not_in(supported_versions);
if !unsupported_versions.is_empty() {
unsupported.insert(protocol.clone(), unsupported_versions);
}
}
if unsupported.is_empty() {
return None;
}
Some(unsupported)
}
/// Determine if we have support for some protocol and version.
///
/// # Inputs
///
/// * `proto`, an `UnknownProtocol` to test support for
/// * `vers`, a `Version` which we will go on to determine whether the
/// specified protocol supports.
///
/// # Return
///
/// Returns `true` iff this `UnvalidatedProtoEntry` includes support for the
/// indicated protocol and version, and `false` otherwise.
///
/// # Examples
///
/// ```
/// # use std::str::FromStr;
/// use protover::*;
/// # use protover::errors::ProtoverError;
///
/// # fn do_test () -> Result<UnvalidatedProtoEntry, ProtoverError> {
/// let proto: UnvalidatedProtoEntry = "Link=3-4 Cons=1 Doggo=3-5".parse()?;
/// assert_eq!(true, proto.supports_protocol(&Protocol::Cons.into(), &1));
/// assert_eq!(false, proto.supports_protocol(&Protocol::Cons.into(), &5));
/// assert_eq!(true, proto.supports_protocol(&UnknownProtocol::from_str("Doggo")?, &4));
/// # Ok(proto)
/// # } fn main () { do_test(); }
/// ```
pub fn supports_protocol(&self, proto: &UnknownProtocol, vers: &Version) -> bool {
let supported_versions: &ProtoSet = match self.get(proto) {
Some(n) => n,
None => return false,
};
supported_versions.contains(&vers)
}
/// As `UnvalidatedProtoEntry::supports_protocol()`, but also returns `true`
/// if any later version of the protocol is supported.
///
/// # Examples
/// ```
/// use protover::*;
/// # use protover::errors::ProtoverError;
///
/// # fn do_test () -> Result<UnvalidatedProtoEntry, ProtoverError> {
/// let proto: UnvalidatedProtoEntry = "Link=3-4 Cons=5".parse()?;
///
/// assert_eq!(true, proto.supports_protocol_or_later(&Protocol::Cons.into(), &5));
/// assert_eq!(true, proto.supports_protocol_or_later(&Protocol::Cons.into(), &4));
/// assert_eq!(false, proto.supports_protocol_or_later(&Protocol::Cons.into(), &6));
/// # Ok(proto)
/// # } fn main () { do_test(); }
/// ```
pub fn supports_protocol_or_later(&self, proto: &UnknownProtocol, vers: &Version) -> bool {
let supported_versions: &ProtoSet = match self.get(&proto) {
Some(n) => n,
None => return false,
};
supported_versions.iter().any(|v| v.1 >= *vers)
}
/// Split a string containing (potentially) several protocols and their
/// versions into a `Vec` of tuples of string in `(protocol, versions)`
/// form.
///
/// # Inputs
///
/// A &str in the form `"Link=3-4 Cons=5"`.
///
/// # Returns
///
/// A `Result` whose `Ok` variant is a `Vec<(&str, &str)>` of `(protocol,
/// versions)`, or whose `Err` variant is a `ProtoverError`.
///
/// # Errors
///
/// This will error with a `ProtoverError::Unparseable` if any of the
/// following are true:
///
/// * If a protocol name is an empty string, e.g. `"Cons=1,3 =3-5"`.
/// * If an entry has no equals sign, e.g. `"Cons=1,3 Desc"`.
/// * If there is leading or trailing whitespace, e.g. `" Cons=1,3 Link=3"`.
/// * If there is any other extra whitespice, e.g. `"Cons=1,3 Link=3"`.
fn parse_protocol_and_version_str<'a>(
protocol_string: &'a str,
) -> Result<Vec<(&'a str, &'a str)>, ProtoverError> {
let mut protovers: Vec<(&str, &str)> = Vec::new();
for subproto in protocol_string.split(' ') {
let mut parts = subproto.splitn(2, '=');
let name = match parts.next() {
Some("") => return Err(ProtoverError::Unparseable),
Some(n) => n,
None => return Err(ProtoverError::Unparseable),
};
let vers = match parts.next() {
Some(n) => n,
None => return Err(ProtoverError::Unparseable),
};
protovers.push((name, vers));
}
Ok(protovers)
}
}
impl FromStr for UnvalidatedProtoEntry {
type Err = ProtoverError;
/// Parses a protocol list without validating the protocol names.
///
/// # Inputs
///
/// * `protocol_string`, a string comprised of keys and values, both which are
/// strings. The keys are the protocol names while values are a string
/// representation of the supported versions.
///
/// The input is _not_ expected to be a subset of the Protocol types
///
/// # Returns
///
/// A `Result` whose `Ok` value is an `UnvalidatedProtoEntry`.
///
/// The returned `Result`'s `Err` value is an `ProtoverError`.
///
/// # Errors
///
/// This function will error if:
///
/// * The protocol string does not follow the "protocol_name=version_list"
/// expected format, or
/// * If the version string is malformed. See `impl FromStr for ProtoSet`.
fn from_str(protocol_string: &str) -> Result<UnvalidatedProtoEntry, ProtoverError> {
let mut parsed: UnvalidatedProtoEntry = UnvalidatedProtoEntry::default();
let parts: Vec<(&str, &str)> =
UnvalidatedProtoEntry::parse_protocol_and_version_str(protocol_string)?;
for &(name, vers) in parts.iter() {
let versions = ProtoSet::from_str(vers)?;
let protocol = UnknownProtocol::from_str(name)?;
parsed.insert(protocol, versions);
}
Ok(parsed)
}
}
impl UnvalidatedProtoEntry {
/// Create an `UnknownProtocol`, ignoring whether or not it
/// exceeds MAX_PROTOCOL_NAME_LENGTH.
pub(crate) fn from_str_any_len(
protocol_string: &str,
) -> Result<UnvalidatedProtoEntry, ProtoverError> {
let mut parsed: UnvalidatedProtoEntry = UnvalidatedProtoEntry::default();
let parts: Vec<(&str, &str)> =
UnvalidatedProtoEntry::parse_protocol_and_version_str(protocol_string)?;
for &(name, vers) in parts.iter() {
let versions = ProtoSet::from_str(vers)?;
let protocol = UnknownProtocol::from_str_any_len(name)?;
parsed.insert(protocol, versions);
}
Ok(parsed)
}
}
/// Pretend a `ProtoEntry` is actually an `UnvalidatedProtoEntry`.
impl From<ProtoEntry> for UnvalidatedProtoEntry {
fn from(proto_entry: ProtoEntry) -> UnvalidatedProtoEntry {
let mut unvalidated: UnvalidatedProtoEntry = UnvalidatedProtoEntry::default();
for (protocol, versions) in proto_entry.iter() {
unvalidated.insert(UnknownProtocol::from(protocol.clone()), versions.clone());
}
unvalidated
}
}
/// A mapping of protocols to a count of how many times each of their `Version`s
/// were voted for or supported.
///
/// # Warning
///
/// The "protocols" are *not* guaranteed to be known/supported `Protocol`s, in
/// order to allow new subprotocols to be introduced even if Directory
/// Authorities don't yet know of them.
pub struct ProtoverVote(HashMap<UnknownProtocol, HashMap<Version, usize>>);
impl Default for ProtoverVote {
fn default() -> ProtoverVote {
ProtoverVote(HashMap::new())
}
}
impl IntoIterator for ProtoverVote {
type Item = (UnknownProtocol, HashMap<Version, usize>);
type IntoIter = hash_map::IntoIter<UnknownProtocol, HashMap<Version, usize>>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl ProtoverVote {
pub fn entry(
&mut self,
key: UnknownProtocol,
) -> hash_map::Entry<UnknownProtocol, HashMap<Version, usize>> {
self.0.entry(key)
}
/// Protocol voting implementation.
///
/// Given a slice of `UnvalidatedProtoEntry`s and a vote `threshold`, return
/// a new `UnvalidatedProtoEntry` encoding all of the protocols that are
/// listed by at least `threshold` of the inputs.
///
/// # Examples
///
/// ```
/// use protover::ProtoverVote;
/// use protover::UnvalidatedProtoEntry;
///
/// let protos: &[UnvalidatedProtoEntry] = &["Link=3-4".parse().unwrap(),
/// "Link=3".parse().unwrap()];
/// let vote = ProtoverVote::compute(protos, &2);
/// assert_eq!("Link=3", vote.to_string());
/// ```
// C_RUST_COUPLED: protover.c protover_compute_vote
pub fn compute(
proto_entries: &[UnvalidatedProtoEntry],
threshold: &usize,
) -> UnvalidatedProtoEntry {
let mut all_count: ProtoverVote = ProtoverVote::default();
let mut final_output: UnvalidatedProtoEntry = UnvalidatedProtoEntry::default();
if proto_entries.is_empty() {
return final_output;
}
// parse and collect all of the protos and their versions and collect them
for vote in proto_entries {
// C_RUST_DIFFERS: This doesn't actually differ, bu this check on
// the total is here to make it match. Because the C version calls
// expand_protocol_list() which checks if there would be too many
// subprotocols *or* individual version numbers, i.e. more than
// MAX_PROTOCOLS_TO_EXPAND, and does this *per vote*, we need to
// match it's behaviour and ensure we're not allowing more than it
// would.
if vote.len() > MAX_PROTOCOLS_TO_EXPAND {
continue;
}
for (protocol, versions) in vote.iter() {
let supported_vers: &mut HashMap<Version, usize> =
all_count.entry(protocol.clone()).or_insert(HashMap::new());
for version in versions.clone().expand() {
let counter: &mut usize = supported_vers.entry(version).or_insert(0);
*counter += 1;
}
}
}
for (protocol, mut versions) in all_count {
// Go through and remove versions that are less than the threshold
versions.retain(|_, count| *count as usize >= *threshold);
if versions.len() > 0 {
let voted_versions: Vec<Version> = versions.keys().cloned().collect();
let voted_protoset: ProtoSet = ProtoSet::from(voted_versions);
final_output.insert(protocol, voted_protoset);
}
}
final_output
}
}
/// Returns a boolean indicating whether the given protocol and version is
/// supported in any of the existing Tor protocols
///
/// # Examples
/// ```
/// use protover::is_supported_here;
/// use protover::Protocol;
///
/// let is_supported = is_supported_here(&Protocol::Link, &10);
/// assert_eq!(false, is_supported);
///
/// let is_supported = is_supported_here(&Protocol::Link, &1);
/// assert_eq!(true, is_supported);
/// ```
pub fn is_supported_here(proto: &Protocol, vers: &Version) -> bool {
let currently_supported: ProtoEntry = match ProtoEntry::supported() {
Ok(result) => result,
Err(_) => return false,
};
let supported_versions = match currently_supported.get(proto) {
Some(n) => n,
None => return false,
};
supported_versions.contains(vers)
}
/// Since older versions of Tor cannot infer their own subprotocols,
/// determine which subprotocols are supported by older Tor versions.
///
/// # Inputs
///
/// * `version`, a string comprised of "[0-9a-z.-]"
///
/// # Returns
///
/// A `&'static CStr` encoding a list of protocol names and supported
/// versions. The string takes the following format:
///
/// "HSDir=1-1 LinkAuth=1"
///
/// This function returns the protocols that are supported by the version input,
/// only for tor versions older than `FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS`
/// (but not older than 0.2.4.19). For newer tors (or older than 0.2.4.19), it
/// returns an empty string.
///
/// # Note
///
/// This function is meant to be called for/within FFI code. If you'd
/// like to use this code in Rust, please see `compute_for_old_tor()`.
//
// C_RUST_COUPLED: src/rust/protover.c `compute_for_old_tor`
pub(crate) fn compute_for_old_tor_cstr(version: &str) -> &'static CStr {
let empty: &'static CStr = cstr!("");
if c_tor_version_as_new_as(version, FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS) {
return empty;
}
if c_tor_version_as_new_as(version, "0.2.9.1-alpha") {
return cstr!(
"Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1-2 \
Link=1-4 LinkAuth=1 Microdesc=1-2 Relay=1-2"
);
}
if c_tor_version_as_new_as(version, "0.2.7.5") {
return cstr!(
"Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 \
Link=1-4 LinkAuth=1 Microdesc=1-2 Relay=1-2"
);
}
if c_tor_version_as_new_as(version, "0.2.4.19") {
return cstr!(
"Cons=1 Desc=1 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 \
Link=1-4 LinkAuth=1 Microdesc=1 Relay=1-2"
);
}
empty
}
/// Since older versions of Tor cannot infer their own subprotocols,
/// determine which subprotocols are supported by older Tor versions.
///
/// # Inputs
///
/// * `version`, a string comprised of "[0-9a-z.-]"
///
/// # Returns
///
/// A `Result` whose `Ok` value is an `&'static str` encoding a list of protocol
/// names and supported versions. The string takes the following format:
///
/// "HSDir=1-1 LinkAuth=1"
///
/// This function returns the protocols that are supported by the version input,
/// only for tor versions older than `FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS`.
/// (but not older than 0.2.4.19). For newer tors (or older than 0.2.4.19), its
/// `Ok` `Result` contains an empty string.
///
/// Otherwise, its `Err` contains a `ProtoverError::Unparseable` if the
/// `version` string was invalid utf-8.
///
/// # Note
///
/// This function is meant to be called for/within non-FFI Rust code.
//
// C_RUST_COUPLED: src/rust/protover.c `compute_for_old_tor`
pub fn compute_for_old_tor(version: &str) -> Result<&'static str, ProtoverError> {
// .to_str() fails with a Utf8Error if it couldn't validate the
// utf-8, so convert that here into an Unparseable ProtoverError.
compute_for_old_tor_cstr(version)
.to_str()
.or(Err(ProtoverError::Unparseable))
}
#[cfg(test)]
mod test {
use std::str::FromStr;
use std::string::ToString;
use super::*;
macro_rules! parse_proto {
($e:expr) => {{
let proto: Result<UnknownProtocol, _> = $e.parse();
let proto2 = UnknownProtocol::from_str_any_len($e);
assert_eq!(proto, proto2);
proto
}};
}
#[test]
fn test_protocol_from_str() {
assert!(parse_proto!("Cons").is_ok());
assert!(parse_proto!("123").is_ok());
assert!(parse_proto!("1-2-3").is_ok());
let err = Err(ProtoverError::InvalidProtocol);
assert_eq!(err, parse_proto!("a_b_c"));
assert_eq!(err, parse_proto!("a b"));
assert_eq!(err, parse_proto!("a,"));
assert_eq!(err, parse_proto!("b."));
assert_eq!(err, parse_proto!("é"));
}
macro_rules! assert_protoentry_is_parseable {
($e:expr) => {
let protoentry: Result<ProtoEntry, ProtoverError> = $e.parse();
assert!(protoentry.is_ok(), format!("{:?}", protoentry.err()));
};
}
macro_rules! assert_protoentry_is_unparseable {
($e:expr) => {
let protoentry: Result<ProtoEntry, ProtoverError> = $e.parse();
assert!(protoentry.is_err());
};
}
#[test]
fn test_protoentry_from_str_multiple_protocols_multiple_versions() {
assert_protoentry_is_parseable!("Cons=3-4 Link=1,3-5");
}
#[test]
fn test_protoentry_from_str_empty() {
assert_protoentry_is_unparseable!("");
}
#[test]
fn test_protoentry_from_str_single_protocol_single_version() {
assert_protoentry_is_parseable!("HSDir=1");
}
#[test]
fn test_protoentry_from_str_unknown_protocol() {
assert_protoentry_is_unparseable!("Ducks=5-7,8");
}
#[test]
fn test_protoentry_from_str_allowed_number_of_versions() {
assert_protoentry_is_parseable!("Desc=1-4294967294");
}
#[test]
fn test_protoentry_from_str_too_many_versions() {
assert_protoentry_is_unparseable!("Desc=1-4294967295");
}
#[test]
fn test_protoentry_from_str_() {
assert_protoentry_is_unparseable!("");
}
#[test]
fn test_protoentry_all_supported_single_protocol_single_version() {
let protocol: UnvalidatedProtoEntry = "Cons=1".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn test_protoentry_all_supported_multiple_protocol_multiple_versions() {
let protocols: UnvalidatedProtoEntry = "Link=3-4 Desc=2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn test_protoentry_all_supported_three_values() {
let protocols: UnvalidatedProtoEntry = "LinkAuth=1 Microdesc=1-2 Relay=2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn test_protoentry_all_supported_unknown_protocol() {
let protocols: UnvalidatedProtoEntry = "Wombat=9".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("Wombat=9", &unsupported.unwrap().to_string());
}
#[test]
fn test_protoentry_all_supported_unsupported_high_version() {
let protocols: UnvalidatedProtoEntry = "HSDir=12-100".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("HSDir=12-100", &unsupported.unwrap().to_string());
}
#[test]
fn test_protoentry_all_supported_unsupported_low_version() {
let protocols: UnvalidatedProtoEntry = "HSIntro=2-3".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("HSIntro=2", &unsupported.unwrap().to_string());
}
#[test]
fn test_contract_protocol_list() {
let mut versions = "";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
versions = "1";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
versions = "1-2";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
versions = "1,3";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
versions = "1-4";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
versions = "1,3,5-7";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
versions = "1-3,500";
assert_eq!(
String::from(versions),
ProtoSet::from_str(&versions).unwrap().to_string()
);
}
}
| 33.066872 | 99 | 0.604835 |
cc8b2b5d8357e6ac738b884c0df17265baab2fa2 | 9,482 | use crate::comp_assets::CompAssets;
use crate::minesweeper::{IndexHelper, MineState};
use crate::visual_grid::{TileCoordinate, VisualGrid};
use bindings::windows::{
foundation::{
numerics::{Vector2, Vector3},
TimeSpan,
},
graphics::SizeInt32,
ui::{
composition::{
AnimationIterationBehavior, CompositionBatchTypes, CompositionBorderMode, Compositor,
ContainerVisual, SpriteVisual,
},
Colors,
},
};
use std::collections::VecDeque;
use std::time::Duration;
pub struct CompUI {
compositor: Compositor,
_root: SpriteVisual,
parent_size: Vector2,
game_board_margin: Vector2,
index_helper: IndexHelper,
game_board: VisualGrid,
assets: CompAssets,
mine_animation_playing: bool,
}
impl CompUI {
pub fn new(
parent_visual: &ContainerVisual,
parent_size: &Vector2,
grid_size_in_tiles: &SizeInt32,
) -> windows::Result<Self> {
let compositor = parent_visual.compositor()?;
let root = compositor.create_sprite_visual()?;
root.set_relative_size_adjustment(Vector2 { x: 1.0, y: 1.0 })?;
root.set_brush(compositor.create_color_brush_with_color(Colors::white()?)?)?;
root.set_border_mode(CompositionBorderMode::Hard)?;
parent_visual.children()?.insert_at_top(&root)?;
let tile_size = Vector2 { x: 25.0, y: 25.0 };
let game_board = VisualGrid::new(
&compositor,
grid_size_in_tiles,
&tile_size,
&Vector2 { x: 2.5, y: 2.5 },
)?;
let game_board_margin = Vector2 { x: 100.0, y: 100.0 };
let game_board_visual = game_board.root();
game_board_visual.set_relative_offset_adjustment(Vector3 {
x: 0.5,
y: 0.5,
z: 0.0,
})?;
game_board_visual.set_anchor_point(Vector2 { x: 0.5, y: 0.5 })?;
root.children()?.insert_at_top(game_board_visual)?;
let selection_visual = game_board.selection_visual();
root.children()?.insert_at_top(selection_visual)?;
let assets = CompAssets::new(&compositor, &tile_size)?;
Ok(Self {
compositor,
_root: root,
parent_size: parent_size.clone(),
game_board_margin,
index_helper: IndexHelper::new(grid_size_in_tiles.width, grid_size_in_tiles.height),
game_board,
assets,
mine_animation_playing: false,
})
}
pub fn hit_test(&self, point: &Vector2) -> windows::Result<Option<TileCoordinate>> {
let window_size = &self.parent_size;
let scale = self.compute_scale_factor()?;
let real_board_size = self.game_board.size()? * scale;
let real_offset = (window_size - real_board_size) / 2.0;
let point = (point - real_offset) / scale;
Ok(self.game_board.hit_test(&point))
}
pub fn resize(&mut self, new_size: &Vector2) -> windows::Result<()> {
self.parent_size = new_size.clone();
self.update_board_scale(new_size)?;
Ok(())
}
pub fn select_tile(&mut self, tile_coordinate: Option<TileCoordinate>) -> windows::Result<()> {
self.game_board.select_tile(tile_coordinate)
}
pub fn current_selected_tile(&self) -> Option<TileCoordinate> {
self.game_board.current_selected_tile()
}
pub fn update_tile_with_state(
&self,
tile_coordinate: &TileCoordinate,
mine_state: MineState,
) -> windows::Result<()> {
let visual = self
.game_board
.get_tile(tile_coordinate.x, tile_coordinate.y)
.unwrap();
visual.set_brush(self.assets.get_color_brush_from_mine_state(mine_state))?;
Ok(())
}
pub fn reset(&mut self, grid_size_in_tiles: &SizeInt32) -> windows::Result<()> {
self.game_board.reset(grid_size_in_tiles)?;
self.index_helper = IndexHelper::new(grid_size_in_tiles.width, grid_size_in_tiles.height);
for visual in self.game_board.tiles_iter() {
visual.set_brush(
self.assets
.get_color_brush_from_mine_state(MineState::Empty),
)?;
}
self.update_board_scale(&self.parent_size.clone())?;
self.mine_animation_playing = false;
Ok(())
}
pub fn update_tile_as_mine(&self, tile_coordinate: &TileCoordinate) -> windows::Result<()> {
let visual = self
.game_board
.get_tile(tile_coordinate.x, tile_coordinate.y)
.unwrap();
visual.set_brush(&self.assets.get_mine_brush())?;
Ok(())
}
pub fn update_tile_with_mine_count(
&self,
tile_coordinate: &TileCoordinate,
num_mines: i32,
) -> windows::Result<()> {
let visual = self
.game_board
.get_tile(tile_coordinate.x, tile_coordinate.y)
.unwrap();
visual.set_brush(self.assets.get_color_brush_from_mine_count(num_mines))?;
if num_mines > 0 {
let shape = self.assets.get_shape_from_mine_count(num_mines);
let shape_visual = self.compositor.create_shape_visual()?;
shape_visual.set_relative_size_adjustment(Vector2 { x: 1.0, y: 1.0 })?;
shape_visual.shapes()?.append(shape)?;
shape_visual.set_border_mode(CompositionBorderMode::Soft)?;
visual.children()?.insert_at_top(shape_visual)?;
}
Ok(())
}
pub fn play_mine_animations(
&mut self,
mut mine_indices: VecDeque<usize>,
mut mines_per_ring: VecDeque<i32>,
) -> windows::Result<()> {
// Create an animation batch so that we can know when the animations complete
let batch = self
.compositor
.create_scoped_batch(CompositionBatchTypes::Animation)?;
let animation_delay_step = Duration::from_millis(100);
let mut current_delay = Duration::from_millis(0);
let mut current_mines_count = 0;
while !mine_indices.is_empty() {
let mine_index = *mine_indices.front().unwrap();
self.play_mine_animation(mine_index, &TimeSpan::from(current_delay))?;
current_mines_count += 1;
let mines_on_current_level = *mines_per_ring.front().unwrap();
if current_mines_count == mines_on_current_level {
current_mines_count = 0;
mines_per_ring.pop_front().unwrap();
current_delay += animation_delay_step;
}
mine_indices.pop_front().unwrap();
}
// Subscribe to the completion event and complete the batch
// TODO: events
batch.end()?;
self.mine_animation_playing = true;
Ok(())
}
pub fn is_animation_playing(&self) -> bool {
self.mine_animation_playing
}
fn compute_scale_factor_from_size(&self, window_size: &Vector2) -> windows::Result<f32> {
let board_size = self.game_board.size()?;
let board_size = board_size + &self.game_board_margin;
let window_ratio = window_size.x / window_size.y;
let board_ratio = board_size.x / board_size.y;
let scale_factor = if window_ratio > board_ratio {
window_size.y / board_size.y
} else {
window_size.x / board_size.x
};
Ok(scale_factor)
}
fn compute_scale_factor(&self) -> windows::Result<f32> {
self.compute_scale_factor_from_size(&self.parent_size)
}
fn update_board_scale(&mut self, window_size: &Vector2) -> windows::Result<()> {
let scale_factor = self.compute_scale_factor_from_size(window_size)?;
self.game_board.root().set_scale(Vector3 {
x: scale_factor,
y: scale_factor,
z: 1.0,
})?;
Ok(())
}
fn play_mine_animation(&self, index: usize, delay: &TimeSpan) -> windows::Result<()> {
let visual = self
.game_board
.get_tile(
self.index_helper.compute_x_from_index(index),
self.index_helper.compute_y_from_index(index),
)
.unwrap();
// First, we need to promote the visual to the top.
let parent_children = visual.parent()?.children()?;
parent_children.remove(visual)?;
parent_children.insert_at_top(visual)?;
// Make sure the visual has the mine brush
visual.set_brush(&self.assets.get_mine_brush())?;
// Play the animation
let animation = self.compositor.create_vector3_key_frame_animation()?;
animation.insert_key_frame(
0.0,
Vector3 {
x: 1.0,
y: 1.0,
z: 1.0,
},
)?;
animation.insert_key_frame(
0.7,
Vector3 {
x: 2.0,
y: 2.0,
z: 1.0,
},
)?;
animation.insert_key_frame(
1.0,
Vector3 {
x: 1.0,
y: 1.0,
z: 1.0,
},
)?;
animation.set_duration(TimeSpan::from(Duration::from_millis(600)))?;
animation.set_delay_time(delay)?;
animation.set_iteration_behavior(AnimationIterationBehavior::Count)?;
animation.set_iteration_count(1)?;
visual.start_animation("Scale", animation)?;
Ok(())
}
}
| 32.696552 | 99 | 0.592702 |
90b941139a99096aaaba77eb54c874798c0b13dd | 9,074 | //! Handles the creation of a new container
use std::fs;
use std::path::{Path, PathBuf};
use std::process;
use anyhow::{bail, Result};
use clap::Clap;
use nix::sched;
use nix::unistd;
use nix::unistd::{Gid, Uid};
use crate::container::{Container, ContainerStatus};
use crate::namespaces::Namespaces;
use crate::notify_socket::NotifyListener;
use crate::process::{fork, Process};
use crate::rootfs;
use crate::rootless::{lookup_map_binaries, should_use_rootless, Rootless};
use crate::stdio::FileDescriptor;
use crate::tty;
use crate::utils;
use crate::{capabilities, command::Command};
use crate::{cgroups, rootless};
/// This is the main structure which stores various commandline options given by
/// high-level container runtime
#[derive(Clap, Debug)]
pub struct Create {
/// File to write pid of the container created
// note that in the end, container is just another process
#[clap(short, long)]
pid_file: Option<String>,
/// path to the bundle directory, containing config.json and root filesystem
#[clap(short, long, default_value = ".")]
bundle: PathBuf,
/// Unix socket (file) path , which will receive file descriptor of the writing end of the pseudoterminal
#[clap(short, long)]
console_socket: Option<PathBuf>,
/// name of the container instance to be started
pub container_id: String,
}
// One thing to note is that in the end, container is just another process in Linux
// it has specific/different control group, namespace, using which program executing in it
// can be given impression that is is running on a complete system, but on the system which
// it is running, it is just another process, and has attributes such as pid, file descriptors, etc.
// associated with it like any other process.
impl Create {
/// Starts a new container process
pub fn exec(
&self,
root_path: PathBuf,
systemd_cgroup: bool,
command: impl Command,
) -> Result<()> {
// create a directory for the container to store state etc.
// if already present, return error
let bundle_canonicalized = fs::canonicalize(&self.bundle)
.unwrap_or_else(|_| panic!("failed to canonicalied {:?}", &self.bundle));
let container_dir = root_path.join(&self.container_id);
log::debug!("container directory will be {:?}", container_dir);
if !container_dir.exists() {
fs::create_dir(&container_dir).unwrap();
} else {
bail!("{} already exists", self.container_id)
}
// change directory to the bundle directory, and load configuration,
// copy that to the container's directory
unistd::chdir(&self.bundle)?;
let spec = oci_spec::Spec::load("config.json")?;
fs::copy("config.json", container_dir.join("config.json"))?;
log::debug!("spec: {:?}", spec);
// convert path to absolute path, as relative path will be evaluated
// relative to where youki command is executed, and will be difficult to manipulate
let container_dir = fs::canonicalize(container_dir)?;
unistd::chdir(&*container_dir)?;
log::debug!("{:?}", &container_dir);
let container = Container::new(
&self.container_id,
ContainerStatus::Creating,
None,
bundle_canonicalized.to_str().unwrap(),
&container_dir,
)?;
container.save()?;
let mut notify_socket: NotifyListener = NotifyListener::new(&container_dir)?;
// convert path of root file system of the container to absolute path
let rootfs = fs::canonicalize(&spec.root.path)?;
// if socket file path is given in commandline options,
// get file descriptors of console socket
let csocketfd = if let Some(console_socket) = &self.console_socket {
Some(tty::setup_console_socket(&container_dir, console_socket)?)
} else {
None
};
let process = run_container(
self.pid_file.as_ref(),
&mut notify_socket,
rootfs,
spec,
csocketfd,
systemd_cgroup,
container,
command,
)?;
// the run_container forks the process, so not after return if in
// parent process, exit ; as the work of creating the container is done
if let Process::Parent(_) = process {
process::exit(0);
}
// if in the child process after fork, then just return
Ok(())
}
}
/// Fork the process and actually start the container process
fn run_container<P: AsRef<Path>>(
pid_file: Option<P>,
notify_socket: &mut NotifyListener,
rootfs: PathBuf,
spec: oci_spec::Spec,
csocketfd: Option<FileDescriptor>,
systemd_cgroup: bool,
container: Container,
command: impl Command,
) -> Result<Process> {
// disable core dump for the process, check https://man7.org/linux/man-pages/man2/prctl.2.html for more information
prctl::set_dumpable(false).unwrap();
// get Linux specific section of OCI spec,
// refer https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md for more information
let linux = spec.linux.as_ref().unwrap();
let namespaces: Namespaces = linux.namespaces.clone().into();
let rootless = if should_use_rootless() {
log::debug!("rootless container should be created");
log::warn!(
"resource constraints and multi id mapping is unimplemented for rootless containers"
);
rootless::validate(&spec)?;
let mut rootless = Rootless::from(linux);
if let Some((uid_binary, gid_binary)) = lookup_map_binaries(linux)? {
rootless.newuidmap = Some(uid_binary);
rootless.newgidmap = Some(gid_binary);
}
Some(rootless)
} else {
None
};
let cgroups_path = utils::get_cgroup_path(&linux.cgroups_path, container.id());
let cmanager = cgroups::common::create_cgroup_manager(&cgroups_path, systemd_cgroup)?;
// first fork, which creates process, which will later create actual container process
match fork::fork_first(pid_file, rootless, linux, &container, cmanager)? {
// In the parent process, which called run_container
Process::Parent(parent) => Ok(Process::Parent(parent)),
// in child process
Process::Child(child) => {
// set limits and namespaces to the process
for rlimit in spec.process.rlimits.iter() {
command.set_rlimit(rlimit)?
}
command.set_id(Uid::from_raw(0), Gid::from_raw(0))?;
let without = sched::CloneFlags::CLONE_NEWUSER;
namespaces.apply_unshare(without)?;
// set up tty if specified
if let Some(csocketfd) = csocketfd {
tty::setup_console(csocketfd)?;
}
// set namespaces
namespaces.apply_setns()?;
// fork second time, which will later create container
match fork::fork_init(child)? {
Process::Child(_child) => unreachable!(),
// This is actually the child process after fork
Process::Init(mut init) => {
// setup args and env vars as in the spec
let spec_args: &Vec<String> = &spec.process.args.clone();
let envs: &Vec<String> = &spec.process.env.clone();
// prepare process
init_process(spec, command, rootfs, namespaces)?;
init.ready()?;
notify_socket.wait_for_container_start()?;
// actually run the command / program to be run in container
utils::do_exec(&spec_args[0], spec_args, envs)?;
// the command / program is done executing
container.update_status(ContainerStatus::Stopped)?.save()?;
Ok(Process::Init(init))
}
Process::Parent(_) => unreachable!(),
}
}
_ => unreachable!(),
}
}
/// setup hostname, rootfs for the container process
fn init_process(
spec: oci_spec::Spec,
command: impl Command,
rootfs: PathBuf,
namespaces: Namespaces,
) -> Result<()> {
let proc = spec.process.clone();
command.set_hostname(&spec.hostname.as_str())?;
if spec.process.no_new_privileges {
let _ = prctl::set_no_new_privileges(true);
}
rootfs::prepare_rootfs(
&spec,
&rootfs,
namespaces
.clone_flags
.contains(sched::CloneFlags::CLONE_NEWUSER),
)?;
// change the root of filesystem of the process to the rootfs
command.pivot_rootfs(&rootfs)?;
command.set_id(Uid::from_raw(proc.user.uid), Gid::from_raw(proc.user.gid))?;
capabilities::reset_effective(&command)?;
if let Some(caps) = &proc.capabilities {
capabilities::drop_privileges(&caps, &command)?;
}
Ok(())
}
| 37.495868 | 119 | 0.620234 |
bbc4691d963c6405ff08bb5156fd4e092b082d97 | 19,739 | //! Implementation of `std::os` functionality for unix systems
#![allow(unused_imports)] // lots of cfg code here
#[cfg(all(test, target_env = "gnu"))]
mod tests;
use crate::os::unix::prelude::*;
use crate::error::Error as StdError;
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::iter;
use crate::mem;
use crate::path::{self, PathBuf};
use crate::ptr;
use crate::slice;
use crate::str;
use crate::sys::cvt;
use crate::sys::fd;
use crate::sys::memchr;
use crate::sys::rwlock::{RWLockReadGuard, StaticRWLock};
use crate::sys_common::mutex::{StaticMutex, StaticMutexGuard};
use crate::vec;
use libc::{c_char, c_int, c_void};
const TMPBUF_SZ: usize = 128;
cfg_if::cfg_if! {
if #[cfg(target_os = "redox")] {
const PATH_SEPARATOR: u8 = b';';
} else {
const PATH_SEPARATOR: u8 = b':';
}
}
extern "C" {
#[cfg(not(any(target_os = "dragonfly", target_os = "vxworks")))]
#[cfg_attr(
any(
target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia",
target_os = "l4re"
),
link_name = "__errno_location"
)]
#[cfg_attr(
any(
target_os = "netbsd",
target_os = "openbsd",
target_os = "android",
target_os = "redox",
target_env = "newlib"
),
link_name = "__errno"
)]
#[cfg_attr(any(target_os = "solaris", target_os = "illumos"), link_name = "___errno")]
#[cfg_attr(
any(target_os = "macos", target_os = "ios", target_os = "freebsd"),
link_name = "__error"
)]
#[cfg_attr(target_os = "haiku", link_name = "_errnop")]
fn errno_location() -> *mut c_int;
}
/// Returns the platform-specific value of errno
#[cfg(not(any(target_os = "dragonfly", target_os = "vxworks")))]
pub fn errno() -> i32 {
unsafe { (*errno_location()) as i32 }
}
/// Sets the platform-specific value of errno
#[cfg(all(not(target_os = "linux"), not(target_os = "dragonfly"), not(target_os = "vxworks")))] // needed for readdir and syscall!
#[allow(dead_code)] // but not all target cfgs actually end up using it
pub fn set_errno(e: i32) {
unsafe { *errno_location() = e as c_int }
}
#[cfg(target_os = "vxworks")]
pub fn errno() -> i32 {
unsafe { libc::errnoGet() }
}
#[cfg(target_os = "dragonfly")]
pub fn errno() -> i32 {
extern "C" {
#[thread_local]
static errno: c_int;
}
unsafe { errno as i32 }
}
#[cfg(target_os = "dragonfly")]
pub fn set_errno(e: i32) {
extern "C" {
#[thread_local]
static mut errno: c_int;
}
unsafe {
errno = e;
}
}
/// Gets a detailed string description for the given error number.
pub fn error_string(errno: i32) -> String {
extern "C" {
#[cfg_attr(any(target_os = "linux", target_env = "newlib"), link_name = "__xpg_strerror_r")]
fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int;
}
let mut buf = [0 as c_char; TMPBUF_SZ];
let p = buf.as_mut_ptr();
unsafe {
if strerror_r(errno as c_int, p, buf.len()) < 0 {
panic!("strerror_r failure");
}
let p = p as *const _;
str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned()
}
}
pub fn getcwd() -> io::Result<PathBuf> {
let mut buf = Vec::with_capacity(512);
loop {
unsafe {
let ptr = buf.as_mut_ptr() as *mut libc::c_char;
if !libc::getcwd(ptr, buf.capacity()).is_null() {
let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len();
buf.set_len(len);
buf.shrink_to_fit();
return Ok(PathBuf::from(OsString::from_vec(buf)));
} else {
let error = io::Error::last_os_error();
if error.raw_os_error() != Some(libc::ERANGE) {
return Err(error);
}
}
// Trigger the internal buffer resizing logic of `Vec` by requiring
// more space than the current capacity.
let cap = buf.capacity();
buf.set_len(cap);
buf.reserve(1);
}
}
}
pub fn chdir(p: &path::Path) -> io::Result<()> {
let p: &OsStr = p.as_ref();
let p = CString::new(p.as_bytes())?;
if unsafe { libc::chdir(p.as_ptr()) } != 0 {
return Err(io::Error::last_os_error());
}
Ok(())
}
pub struct SplitPaths<'a> {
iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>, fn(&'a [u8]) -> PathBuf>,
}
pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> {
fn bytes_to_path(b: &[u8]) -> PathBuf {
PathBuf::from(<OsStr as OsStrExt>::from_bytes(b))
}
fn is_separator(b: &u8) -> bool {
*b == PATH_SEPARATOR
}
let unparsed = unparsed.as_bytes();
SplitPaths {
iter: unparsed
.split(is_separator as fn(&u8) -> bool)
.map(bytes_to_path as fn(&[u8]) -> PathBuf),
}
}
impl<'a> Iterator for SplitPaths<'a> {
type Item = PathBuf;
fn next(&mut self) -> Option<PathBuf> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[derive(Debug)]
pub struct JoinPathsError;
pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError>
where
I: Iterator<Item = T>,
T: AsRef<OsStr>,
{
let mut joined = Vec::new();
for (i, path) in paths.enumerate() {
let path = path.as_ref().as_bytes();
if i > 0 {
joined.push(PATH_SEPARATOR)
}
if path.contains(&PATH_SEPARATOR) {
return Err(JoinPathsError);
}
joined.extend_from_slice(path);
}
Ok(OsStringExt::from_vec(joined))
}
impl fmt::Display for JoinPathsError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "path segment contains separator `{}`", char::from(PATH_SEPARATOR))
}
}
impl StdError for JoinPathsError {
#[allow(deprecated)]
fn description(&self) -> &str {
"failed to join paths"
}
}
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
let mut mib = [
libc::CTL_KERN as c_int,
libc::KERN_PROC as c_int,
libc::KERN_PROC_PATHNAME as c_int,
-1 as c_int,
];
let mut sz = 0;
cvt(libc::sysctl(
mib.as_mut_ptr(),
mib.len() as libc::c_uint,
ptr::null_mut(),
&mut sz,
ptr::null_mut(),
0,
))?;
if sz == 0 {
return Err(io::Error::last_os_error());
}
let mut v: Vec<u8> = Vec::with_capacity(sz);
cvt(libc::sysctl(
mib.as_mut_ptr(),
mib.len() as libc::c_uint,
v.as_mut_ptr() as *mut libc::c_void,
&mut sz,
ptr::null_mut(),
0,
))?;
if sz == 0 {
return Err(io::Error::last_os_error());
}
v.set_len(sz - 1); // chop off trailing NUL
Ok(PathBuf::from(OsString::from_vec(v)))
}
}
#[cfg(target_os = "netbsd")]
pub fn current_exe() -> io::Result<PathBuf> {
fn sysctl() -> io::Result<PathBuf> {
unsafe {
let mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, -1, libc::KERN_PROC_PATHNAME];
let mut path_len: usize = 0;
cvt(libc::sysctl(
mib.as_ptr(),
mib.len() as libc::c_uint,
ptr::null_mut(),
&mut path_len,
ptr::null(),
0,
))?;
if path_len <= 1 {
return Err(io::Error::new_const(
io::ErrorKind::Other,
&"KERN_PROC_PATHNAME sysctl returned zero-length string",
));
}
let mut path: Vec<u8> = Vec::with_capacity(path_len);
cvt(libc::sysctl(
mib.as_ptr(),
mib.len() as libc::c_uint,
path.as_ptr() as *mut libc::c_void,
&mut path_len,
ptr::null(),
0,
))?;
path.set_len(path_len - 1); // chop off NUL
Ok(PathBuf::from(OsString::from_vec(path)))
}
}
fn procfs() -> io::Result<PathBuf> {
let curproc_exe = path::Path::new("/proc/curproc/exe");
if curproc_exe.is_file() {
return crate::fs::read_link(curproc_exe);
}
Err(io::Error::new_const(
io::ErrorKind::Other,
&"/proc/curproc/exe doesn't point to regular file.",
))
}
sysctl().or_else(|_| procfs())
}
#[cfg(target_os = "openbsd")]
pub fn current_exe() -> io::Result<PathBuf> {
unsafe {
let mut mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, libc::getpid(), libc::KERN_PROC_ARGV];
let mib = mib.as_mut_ptr();
let mut argv_len = 0;
cvt(libc::sysctl(mib, 4, ptr::null_mut(), &mut argv_len, ptr::null_mut(), 0))?;
let mut argv = Vec::<*const libc::c_char>::with_capacity(argv_len as usize);
cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _, &mut argv_len, ptr::null_mut(), 0))?;
argv.set_len(argv_len as usize);
if argv[0].is_null() {
return Err(io::Error::new_const(io::ErrorKind::Other, &"no current exe available"));
}
let argv0 = CStr::from_ptr(argv[0]).to_bytes();
if argv0[0] == b'.' || argv0.iter().any(|b| *b == b'/') {
crate::fs::canonicalize(OsStr::from_bytes(argv0))
} else {
Ok(PathBuf::from(OsStr::from_bytes(argv0)))
}
}
}
#[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))]
pub fn current_exe() -> io::Result<PathBuf> {
match crate::fs::read_link("/proc/self/exe") {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => Err(io::Error::new_const(
io::ErrorKind::Other,
&"no /proc/self/exe available. Is /proc mounted?",
)),
other => other,
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub fn current_exe() -> io::Result<PathBuf> {
extern "C" {
fn _NSGetExecutablePath(buf: *mut libc::c_char, bufsize: *mut u32) -> libc::c_int;
}
unsafe {
let mut sz: u32 = 0;
_NSGetExecutablePath(ptr::null_mut(), &mut sz);
if sz == 0 {
return Err(io::Error::last_os_error());
}
let mut v: Vec<u8> = Vec::with_capacity(sz as usize);
let err = _NSGetExecutablePath(v.as_mut_ptr() as *mut i8, &mut sz);
if err != 0 {
return Err(io::Error::last_os_error());
}
v.set_len(sz as usize - 1); // chop off trailing NUL
Ok(PathBuf::from(OsString::from_vec(v)))
}
}
#[cfg(any(target_os = "solaris", target_os = "illumos"))]
pub fn current_exe() -> io::Result<PathBuf> {
extern "C" {
fn getexecname() -> *const c_char;
}
unsafe {
let path = getexecname();
if path.is_null() {
Err(io::Error::last_os_error())
} else {
let filename = CStr::from_ptr(path).to_bytes();
let path = PathBuf::from(<OsStr as OsStrExt>::from_bytes(filename));
// Prepend a current working directory to the path if
// it doesn't contain an absolute pathname.
if filename[0] == b'/' { Ok(path) } else { getcwd().map(|cwd| cwd.join(path)) }
}
}
}
#[cfg(target_os = "haiku")]
pub fn current_exe() -> io::Result<PathBuf> {
// Use Haiku's image info functions
#[repr(C)]
struct image_info {
id: i32,
type_: i32,
sequence: i32,
init_order: i32,
init_routine: *mut libc::c_void, // function pointer
term_routine: *mut libc::c_void, // function pointer
device: libc::dev_t,
node: libc::ino_t,
name: [libc::c_char; 1024], // MAXPATHLEN
text: *mut libc::c_void,
data: *mut libc::c_void,
text_size: i32,
data_size: i32,
api_version: i32,
abi: i32,
}
unsafe {
extern "C" {
fn _get_next_image_info(
team_id: i32,
cookie: *mut i32,
info: *mut image_info,
size: i32,
) -> i32;
}
let mut info: image_info = mem::zeroed();
let mut cookie: i32 = 0;
// the executable can be found at team id 0
let result =
_get_next_image_info(0, &mut cookie, &mut info, mem::size_of::<image_info>() as i32);
if result != 0 {
use crate::io::ErrorKind;
Err(io::Error::new_const(ErrorKind::Other, &"Error getting executable path"))
} else {
let name = CStr::from_ptr(info.name.as_ptr()).to_bytes();
Ok(PathBuf::from(OsStr::from_bytes(name)))
}
}
}
#[cfg(target_os = "redox")]
pub fn current_exe() -> io::Result<PathBuf> {
crate::fs::read_to_string("sys:exe").map(PathBuf::from)
}
#[cfg(any(target_os = "fuchsia", target_os = "l4re"))]
pub fn current_exe() -> io::Result<PathBuf> {
use crate::io::ErrorKind;
Err(io::Error::new_const(ErrorKind::Unsupported, &"Not yet implemented!"))
}
#[cfg(target_os = "vxworks")]
pub fn current_exe() -> io::Result<PathBuf> {
#[cfg(test)]
use realstd::env;
#[cfg(not(test))]
use crate::env;
let exe_path = env::args().next().unwrap();
let path = path::Path::new(&exe_path);
path.canonicalize()
}
pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
}
impl !Send for Env {}
impl !Sync for Env {}
impl Iterator for Env {
type Item = (OsString, OsString);
fn next(&mut self) -> Option<(OsString, OsString)> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[cfg(target_os = "macos")]
pub unsafe fn environ() -> *mut *const *const c_char {
extern "C" {
fn _NSGetEnviron() -> *mut *const *const c_char;
}
_NSGetEnviron()
}
#[cfg(not(target_os = "macos"))]
pub unsafe fn environ() -> *mut *const *const c_char {
extern "C" {
static mut environ: *const *const c_char;
}
ptr::addr_of_mut!(environ)
}
static ENV_LOCK: StaticRWLock = StaticRWLock::new();
pub fn env_read_lock() -> RWLockReadGuard {
ENV_LOCK.read_with_guard()
}
/// Returns a vector of (variable, value) byte-vector pairs for all the
/// environment variables of the current process.
pub fn env() -> Env {
unsafe {
let _guard = env_read_lock();
let mut environ = *environ();
let mut result = Vec::new();
if !environ.is_null() {
while !(*environ).is_null() {
if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) {
result.push(key_value);
}
environ = environ.add(1);
}
}
return Env { iter: result.into_iter() };
}
fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
// Strategy (copied from glibc): Variable name and value are separated
// by an ASCII equals sign '='. Since a variable name must not be
// empty, allow variable names starting with an equals sign. Skip all
// malformed lines.
if input.is_empty() {
return None;
}
let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1);
pos.map(|p| {
(
OsStringExt::from_vec(input[..p].to_vec()),
OsStringExt::from_vec(input[p + 1..].to_vec()),
)
})
}
}
pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
let k = CString::new(k.as_bytes())?;
unsafe {
let _guard = env_read_lock();
let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
let ret = if s.is_null() {
None
} else {
Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
};
Ok(ret)
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
let k = CString::new(k.as_bytes())?;
let v = CString::new(v.as_bytes())?;
unsafe {
let _guard = ENV_LOCK.write_with_guard();
cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
}
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
let nbuf = CString::new(n.as_bytes())?;
unsafe {
let _guard = ENV_LOCK.write_with_guard();
cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
}
}
pub fn page_size() -> usize {
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
pub fn temp_dir() -> PathBuf {
crate::env::var_os("TMPDIR").map(PathBuf::from).unwrap_or_else(|| {
if cfg!(target_os = "android") {
PathBuf::from("/data/local/tmp")
} else {
PathBuf::from("/tmp")
}
})
}
pub fn home_dir() -> Option<PathBuf> {
return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from);
#[cfg(any(
target_os = "android",
target_os = "ios",
target_os = "emscripten",
target_os = "redox",
target_os = "vxworks"
))]
unsafe fn fallback() -> Option<OsString> {
None
}
#[cfg(not(any(
target_os = "android",
target_os = "ios",
target_os = "emscripten",
target_os = "redox",
target_os = "vxworks"
)))]
unsafe fn fallback() -> Option<OsString> {
let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) {
n if n < 0 => 512 as usize,
n => n as usize,
};
let mut buf = Vec::with_capacity(amt);
let mut passwd: libc::passwd = mem::zeroed();
let mut result = ptr::null_mut();
match libc::getpwuid_r(
libc::getuid(),
&mut passwd,
buf.as_mut_ptr(),
buf.capacity(),
&mut result,
) {
0 if !result.is_null() => {
let ptr = passwd.pw_dir as *const _;
let bytes = CStr::from_ptr(ptr).to_bytes().to_vec();
Some(OsStringExt::from_vec(bytes))
}
_ => None,
}
}
}
pub fn exit(code: i32) -> ! {
unsafe { libc::exit(code as c_int) }
}
pub fn getpid() -> u32 {
unsafe { libc::getpid() as u32 }
}
pub fn getppid() -> u32 {
unsafe { libc::getppid() as u32 }
}
#[cfg(all(target_env = "gnu", not(target_os = "vxworks")))]
pub fn glibc_version() -> Option<(usize, usize)> {
if let Some(Ok(version_str)) = glibc_version_cstr().map(CStr::to_str) {
parse_glibc_version(version_str)
} else {
None
}
}
#[cfg(all(target_env = "gnu", not(target_os = "vxworks")))]
fn glibc_version_cstr() -> Option<&'static CStr> {
weak! {
fn gnu_get_libc_version() -> *const libc::c_char
}
if let Some(f) = gnu_get_libc_version.get() {
unsafe { Some(CStr::from_ptr(f())) }
} else {
None
}
}
// Returns Some((major, minor)) if the string is a valid "x.y" version,
// ignoring any extra dot-separated parts. Otherwise return None.
#[cfg(all(target_env = "gnu", not(target_os = "vxworks")))]
fn parse_glibc_version(version: &str) -> Option<(usize, usize)> {
let mut parsed_ints = version.split('.').map(str::parse::<usize>).fuse();
match (parsed_ints.next(), parsed_ints.next()) {
(Some(Ok(major)), Some(Ok(minor))) => Some((major, minor)),
_ => None,
}
}
| 29.461194 | 130 | 0.544101 |
f851725058d4b689a4d9f6a037dee81889036d76 | 88,842 | //! The Rust abstract syntax tree module.
//!
//! This module contains common structures forming the language AST.
//! Two main entities in the module are [`Item`] (which represents an AST element with
//! additional metadata), and [`ItemKind`] (which represents a concrete type and contains
//! information specific to the type of the item).
//!
//! Other module items worth mentioning:
//! - [`Ty`] and [`TyKind`]: A parsed Rust type.
//! - [`Expr`] and [`ExprKind`]: A parsed Rust expression.
//! - [`Pat`] and [`PatKind`]: A parsed Rust pattern. Patterns are often dual to expressions.
//! - [`Stmt`] and [`StmtKind`]: An executable action that does not return a value.
//! - [`FnDecl`], [`FnHeader`] and [`Param`]: Metadata associated with a function declaration.
//! - [`Generics`], [`GenericParam`], [`WhereClause`]: Metadata associated with generic parameters.
//! - [`EnumDef`] and [`Variant`]: Enum declaration.
//! - [`Lit`] and [`LitKind`]: Literal expressions.
//! - [`MacroDef`], [`MacStmtStyle`], [`MacCall`], [`MacDelimiter`]: Macro definition and invocation.
//! - [`Attribute`]: Metadata associated with item.
//! - [`UnOp`], [`BinOp`], and [`BinOpKind`]: Unary and binary operators.
pub use crate::util::parser::ExprPrecedence;
pub use GenericArgs::*;
pub use UnsafeSource::*;
use crate::ptr::P;
use crate::token::{self, CommentKind, DelimToken, Token};
use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_macros::HashStable_Generic;
use rustc_serialize::{self, Decoder, Encoder};
use rustc_span::source_map::{respan, Spanned};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use std::cmp::Ordering;
use std::convert::TryFrom;
use std::fmt;
#[cfg(test)]
mod tests;
/// A "Label" is an identifier of some point in sources,
/// e.g. in the following code:
///
/// ```rust
/// 'outer: loop {
/// break 'outer;
/// }
/// ```
///
/// `'outer` is a label.
#[derive(Clone, Encodable, Decodable, Copy, HashStable_Generic)]
pub struct Label {
pub ident: Ident,
}
impl fmt::Debug for Label {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "label({:?})", self.ident)
}
}
/// A "Lifetime" is an annotation of the scope in which variable
/// can be used, e.g. `'a` in `&'a i32`.
#[derive(Clone, Encodable, Decodable, Copy)]
pub struct Lifetime {
pub id: NodeId,
pub ident: Ident,
}
impl fmt::Debug for Lifetime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "lifetime({}: {})", self.id, self)
}
}
impl fmt::Display for Lifetime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.ident.name)
}
}
/// A "Path" is essentially Rust's notion of a name.
///
/// It's represented as a sequence of identifiers,
/// along with a bunch of supporting information.
///
/// E.g., `std::cmp::PartialEq`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Path {
pub span: Span,
/// The segments in the path: the things separated by `::`.
/// Global paths begin with `kw::PathRoot`.
pub segments: Vec<PathSegment>,
pub tokens: Option<LazyTokenStream>,
}
impl PartialEq<Symbol> for Path {
#[inline]
fn eq(&self, symbol: &Symbol) -> bool {
self.segments.len() == 1 && { self.segments[0].ident.name == *symbol }
}
}
impl<CTX> HashStable<CTX> for Path {
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
self.segments.len().hash_stable(hcx, hasher);
for segment in &self.segments {
segment.ident.name.hash_stable(hcx, hasher);
}
}
}
impl Path {
// Convert a span and an identifier to the corresponding
// one-segment path.
pub fn from_ident(ident: Ident) -> Path {
Path { segments: vec![PathSegment::from_ident(ident)], span: ident.span, tokens: None }
}
pub fn is_global(&self) -> bool {
!self.segments.is_empty() && self.segments[0].ident.name == kw::PathRoot
}
}
/// A segment of a path: an identifier, an optional lifetime, and a set of types.
///
/// E.g., `std`, `String` or `Box<T>`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct PathSegment {
/// The identifier portion of this path segment.
pub ident: Ident,
pub id: NodeId,
/// Type/lifetime parameters attached to this path. They come in
/// two flavors: `Path<A,B,C>` and `Path(A,B) -> C`.
/// `None` means that no parameter list is supplied (`Path`),
/// `Some` means that parameter list is supplied (`Path<X, Y>`)
/// but it can be empty (`Path<>`).
/// `P` is used as a size optimization for the common case with no parameters.
pub args: Option<P<GenericArgs>>,
}
impl PathSegment {
pub fn from_ident(ident: Ident) -> Self {
PathSegment { ident, id: DUMMY_NODE_ID, args: None }
}
pub fn path_root(span: Span) -> Self {
PathSegment::from_ident(Ident::new(kw::PathRoot, span))
}
pub fn span(&self) -> Span {
match &self.args {
Some(args) => self.ident.span.to(args.span()),
None => self.ident.span,
}
}
}
/// The arguments of a path segment.
///
/// E.g., `<A, B>` as in `Foo<A, B>` or `(A, B)` as in `Foo(A, B)`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum GenericArgs {
/// The `<'a, A, B, C>` in `foo::bar::baz::<'a, A, B, C>`.
AngleBracketed(AngleBracketedArgs),
/// The `(A, B)` and `C` in `Foo(A, B) -> C`.
Parenthesized(ParenthesizedArgs),
}
impl GenericArgs {
pub fn is_angle_bracketed(&self) -> bool {
matches!(self, AngleBracketed(..))
}
pub fn span(&self) -> Span {
match *self {
AngleBracketed(ref data) => data.span,
Parenthesized(ref data) => data.span,
}
}
}
/// Concrete argument in the sequence of generic args.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum GenericArg {
/// `'a` in `Foo<'a>`
Lifetime(Lifetime),
/// `Bar` in `Foo<Bar>`
Type(P<Ty>),
/// `1` in `Foo<1>`
Const(AnonConst),
}
impl GenericArg {
pub fn span(&self) -> Span {
match self {
GenericArg::Lifetime(lt) => lt.ident.span,
GenericArg::Type(ty) => ty.span,
GenericArg::Const(ct) => ct.value.span,
}
}
}
/// A path like `Foo<'a, T>`.
#[derive(Clone, Encodable, Decodable, Debug, Default)]
pub struct AngleBracketedArgs {
/// The overall span.
pub span: Span,
/// The comma separated parts in the `<...>`.
pub args: Vec<AngleBracketedArg>,
}
/// Either an argument for a parameter e.g., `'a`, `Vec<u8>`, `0`,
/// or a constraint on an associated item, e.g., `Item = String` or `Item: Bound`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum AngleBracketedArg {
/// Argument for a generic parameter.
Arg(GenericArg),
/// Constraint for an associated item.
Constraint(AssocTyConstraint),
}
impl AngleBracketedArg {
pub fn span(&self) -> Span {
match self {
AngleBracketedArg::Arg(arg) => arg.span(),
AngleBracketedArg::Constraint(constraint) => constraint.span,
}
}
}
impl Into<Option<P<GenericArgs>>> for AngleBracketedArgs {
fn into(self) -> Option<P<GenericArgs>> {
Some(P(GenericArgs::AngleBracketed(self)))
}
}
impl Into<Option<P<GenericArgs>>> for ParenthesizedArgs {
fn into(self) -> Option<P<GenericArgs>> {
Some(P(GenericArgs::Parenthesized(self)))
}
}
/// A path like `Foo(A, B) -> C`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ParenthesizedArgs {
/// ```text
/// Foo(A, B) -> C
/// ^^^^^^^^^^^^^^
/// ```
pub span: Span,
/// `(A, B)`
pub inputs: Vec<P<Ty>>,
/// ```text
/// Foo(A, B) -> C
/// ^^^^^^
/// ```
pub inputs_span: Span,
/// `C`
pub output: FnRetTy,
}
impl ParenthesizedArgs {
pub fn as_angle_bracketed_args(&self) -> AngleBracketedArgs {
let args = self
.inputs
.iter()
.cloned()
.map(|input| AngleBracketedArg::Arg(GenericArg::Type(input)))
.collect();
AngleBracketedArgs { span: self.inputs_span, args }
}
}
pub use crate::node_id::{NodeId, CRATE_NODE_ID, DUMMY_NODE_ID};
/// A modifier on a bound, e.g., `?Sized` or `?const Trait`.
///
/// Negative bounds should also be handled here.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug)]
pub enum TraitBoundModifier {
/// No modifiers
None,
/// `?Trait`
Maybe,
/// `?const Trait`
MaybeConst,
/// `?const ?Trait`
//
// This parses but will be rejected during AST validation.
MaybeConstMaybe,
}
/// The AST represents all type param bounds as types.
/// `typeck::collect::compute_bounds` matches these against
/// the "special" built-in traits (see `middle::lang_items`) and
/// detects `Copy`, `Send` and `Sync`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum GenericBound {
Trait(PolyTraitRef, TraitBoundModifier),
Outlives(Lifetime),
}
impl GenericBound {
pub fn span(&self) -> Span {
match self {
GenericBound::Trait(ref t, ..) => t.span,
GenericBound::Outlives(ref l) => l.ident.span,
}
}
}
pub type GenericBounds = Vec<GenericBound>;
/// Specifies the enforced ordering for generic parameters. In the future,
/// if we wanted to relax this order, we could override `PartialEq` and
/// `PartialOrd`, to allow the kinds to be unordered.
#[derive(Hash, Clone, Copy)]
pub enum ParamKindOrd {
Lifetime,
Type,
// `unordered` is only `true` if `sess.has_features().const_generics`
// is active. Specifically, if it's only `min_const_generics`, it will still require
// ordering consts after types.
Const { unordered: bool },
}
impl Ord for ParamKindOrd {
fn cmp(&self, other: &Self) -> Ordering {
use ParamKindOrd::*;
let to_int = |v| match v {
Lifetime => 0,
Type | Const { unordered: true } => 1,
// technically both consts should be ordered equally,
// but only one is ever encountered at a time, so this is
// fine.
Const { unordered: false } => 2,
};
to_int(*self).cmp(&to_int(*other))
}
}
impl PartialOrd for ParamKindOrd {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for ParamKindOrd {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for ParamKindOrd {}
impl fmt::Display for ParamKindOrd {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParamKindOrd::Lifetime => "lifetime".fmt(f),
ParamKindOrd::Type => "type".fmt(f),
ParamKindOrd::Const { .. } => "const".fmt(f),
}
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum GenericParamKind {
/// A lifetime definition (e.g., `'a: 'b + 'c + 'd`).
Lifetime,
Type {
default: Option<P<Ty>>,
},
Const {
ty: P<Ty>,
/// Span of the `const` keyword.
kw_span: Span,
/// Optional default value for the const generic param
default: Option<AnonConst>,
},
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct GenericParam {
pub id: NodeId,
pub ident: Ident,
pub attrs: AttrVec,
pub bounds: GenericBounds,
pub is_placeholder: bool,
pub kind: GenericParamKind,
}
/// Represents lifetime, type and const parameters attached to a declaration of
/// a function, enum, trait, etc.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Generics {
pub params: Vec<GenericParam>,
pub where_clause: WhereClause,
pub span: Span,
}
impl Default for Generics {
/// Creates an instance of `Generics`.
fn default() -> Generics {
Generics {
params: Vec::new(),
where_clause: WhereClause {
has_where_token: false,
predicates: Vec::new(),
span: DUMMY_SP,
},
span: DUMMY_SP,
}
}
}
/// A where-clause in a definition.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct WhereClause {
/// `true` if we ate a `where` token: this can happen
/// if we parsed no predicates (e.g. `struct Foo where {}`).
/// This allows us to accurately pretty-print
/// in `nt_to_tokenstream`
pub has_where_token: bool,
pub predicates: Vec<WherePredicate>,
pub span: Span,
}
/// A single predicate in a where-clause.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum WherePredicate {
/// A type binding (e.g., `for<'c> Foo: Send + Clone + 'c`).
BoundPredicate(WhereBoundPredicate),
/// A lifetime predicate (e.g., `'a: 'b + 'c`).
RegionPredicate(WhereRegionPredicate),
/// An equality predicate (unsupported).
EqPredicate(WhereEqPredicate),
}
impl WherePredicate {
pub fn span(&self) -> Span {
match self {
WherePredicate::BoundPredicate(p) => p.span,
WherePredicate::RegionPredicate(p) => p.span,
WherePredicate::EqPredicate(p) => p.span,
}
}
}
/// A type bound.
///
/// E.g., `for<'c> Foo: Send + Clone + 'c`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct WhereBoundPredicate {
pub span: Span,
/// Any generics from a `for` binding.
pub bound_generic_params: Vec<GenericParam>,
/// The type being bounded.
pub bounded_ty: P<Ty>,
/// Trait and lifetime bounds (`Clone + Send + 'static`).
pub bounds: GenericBounds,
}
/// A lifetime predicate.
///
/// E.g., `'a: 'b + 'c`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct WhereRegionPredicate {
pub span: Span,
pub lifetime: Lifetime,
pub bounds: GenericBounds,
}
/// An equality predicate (unsupported).
///
/// E.g., `T = int`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct WhereEqPredicate {
pub id: NodeId,
pub span: Span,
pub lhs_ty: P<Ty>,
pub rhs_ty: P<Ty>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Crate {
pub attrs: Vec<Attribute>,
pub items: Vec<P<Item>>,
pub span: Span,
/// The order of items in the HIR is unrelated to the order of
/// items in the AST. However, we generate proc macro harnesses
/// based on the AST order, and later refer to these harnesses
/// from the HIR. This field keeps track of the order in which
/// we generated proc macros harnesses, so that we can map
/// HIR proc macros items back to their harness items.
pub proc_macros: Vec<NodeId>,
}
/// Possible values inside of compile-time attribute lists.
///
/// E.g., the '..' in `#[name(..)]`.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum NestedMetaItem {
/// A full MetaItem, for recursive meta items.
MetaItem(MetaItem),
/// A literal.
///
/// E.g., `"foo"`, `64`, `true`.
Literal(Lit),
}
/// A spanned compile-time attribute item.
///
/// E.g., `#[test]`, `#[derive(..)]`, `#[rustfmt::skip]` or `#[feature = "foo"]`.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct MetaItem {
pub path: Path,
pub kind: MetaItemKind,
pub span: Span,
}
/// A compile-time attribute item.
///
/// E.g., `#[test]`, `#[derive(..)]` or `#[feature = "foo"]`.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum MetaItemKind {
/// Word meta item.
///
/// E.g., `test` as in `#[test]`.
Word,
/// List meta item.
///
/// E.g., `derive(..)` as in `#[derive(..)]`.
List(Vec<NestedMetaItem>),
/// Name value meta item.
///
/// E.g., `feature = "foo"` as in `#[feature = "foo"]`.
NameValue(Lit),
}
/// A block (`{ .. }`).
///
/// E.g., `{ .. }` as in `fn foo() { .. }`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Block {
/// The statements in the block.
pub stmts: Vec<Stmt>,
pub id: NodeId,
/// Distinguishes between `unsafe { ... }` and `{ ... }`.
pub rules: BlockCheckMode,
pub span: Span,
pub tokens: Option<LazyTokenStream>,
}
/// A match pattern.
///
/// Patterns appear in match statements and some other contexts, such as `let` and `if let`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Pat {
pub id: NodeId,
pub kind: PatKind,
pub span: Span,
pub tokens: Option<LazyTokenStream>,
}
impl Pat {
/// Attempt reparsing the pattern as a type.
/// This is intended for use by diagnostics.
pub fn to_ty(&self) -> Option<P<Ty>> {
let kind = match &self.kind {
// In a type expression `_` is an inference variable.
PatKind::Wild => TyKind::Infer,
// An IDENT pattern with no binding mode would be valid as path to a type. E.g. `u32`.
PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None) => {
TyKind::Path(None, Path::from_ident(*ident))
}
PatKind::Path(qself, path) => TyKind::Path(qself.clone(), path.clone()),
PatKind::MacCall(mac) => TyKind::MacCall(mac.clone()),
// `&mut? P` can be reinterpreted as `&mut? T` where `T` is `P` reparsed as a type.
PatKind::Ref(pat, mutbl) => {
pat.to_ty().map(|ty| TyKind::Rptr(None, MutTy { ty, mutbl: *mutbl }))?
}
// A slice/array pattern `[P]` can be reparsed as `[T]`, an unsized array,
// when `P` can be reparsed as a type `T`.
PatKind::Slice(pats) if pats.len() == 1 => pats[0].to_ty().map(TyKind::Slice)?,
// A tuple pattern `(P0, .., Pn)` can be reparsed as `(T0, .., Tn)`
// assuming `T0` to `Tn` are all syntactically valid as types.
PatKind::Tuple(pats) => {
let mut tys = Vec::with_capacity(pats.len());
// FIXME(#48994) - could just be collected into an Option<Vec>
for pat in pats {
tys.push(pat.to_ty()?);
}
TyKind::Tup(tys)
}
_ => return None,
};
Some(P(Ty { kind, id: self.id, span: self.span, tokens: None }))
}
/// Walk top-down and call `it` in each place where a pattern occurs
/// starting with the root pattern `walk` is called on. If `it` returns
/// false then we will descend no further but siblings will be processed.
pub fn walk(&self, it: &mut impl FnMut(&Pat) -> bool) {
if !it(self) {
return;
}
match &self.kind {
// Walk into the pattern associated with `Ident` (if any).
PatKind::Ident(_, _, Some(p)) => p.walk(it),
// Walk into each field of struct.
PatKind::Struct(_, _, fields, _) => fields.iter().for_each(|field| field.pat.walk(it)),
// Sequence of patterns.
PatKind::TupleStruct(_, _, s)
| PatKind::Tuple(s)
| PatKind::Slice(s)
| PatKind::Or(s) => s.iter().for_each(|p| p.walk(it)),
// Trivial wrappers over inner patterns.
PatKind::Box(s) | PatKind::Ref(s, _) | PatKind::Paren(s) => s.walk(it),
// These patterns do not contain subpatterns, skip.
PatKind::Wild
| PatKind::Rest
| PatKind::Lit(_)
| PatKind::Range(..)
| PatKind::Ident(..)
| PatKind::Path(..)
| PatKind::MacCall(_) => {}
}
}
/// Is this a `..` pattern?
pub fn is_rest(&self) -> bool {
matches!(self.kind, PatKind::Rest)
}
}
/// A single field in a struct pattern.
///
/// Patterns like the fields of `Foo { x, ref y, ref mut z }`
/// are treated the same as `x: x, y: ref y, z: ref mut z`,
/// except when `is_shorthand` is true.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct PatField {
/// The identifier for the field.
pub ident: Ident,
/// The pattern the field is destructured to.
pub pat: P<Pat>,
pub is_shorthand: bool,
pub attrs: AttrVec,
pub id: NodeId,
pub span: Span,
pub is_placeholder: bool,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
pub enum BindingMode {
ByRef(Mutability),
ByValue(Mutability),
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum RangeEnd {
/// `..=` or `...`
Included(RangeSyntax),
/// `..`
Excluded,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum RangeSyntax {
/// `...`
DotDotDot,
/// `..=`
DotDotEq,
}
/// All the different flavors of pattern that Rust recognizes.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum PatKind {
/// Represents a wildcard pattern (`_`).
Wild,
/// A `PatKind::Ident` may either be a new bound variable (`ref mut binding @ OPT_SUBPATTERN`),
/// or a unit struct/variant pattern, or a const pattern (in the last two cases the third
/// field must be `None`). Disambiguation cannot be done with parser alone, so it happens
/// during name resolution.
Ident(BindingMode, Ident, Option<P<Pat>>),
/// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).
/// The `bool` is `true` in the presence of a `..`.
Struct(Option<QSelf>, Path, Vec<PatField>, /* recovered */ bool),
/// A tuple struct/variant pattern (`Variant(x, y, .., z)`).
TupleStruct(Option<QSelf>, Path, Vec<P<Pat>>),
/// An or-pattern `A | B | C`.
/// Invariant: `pats.len() >= 2`.
Or(Vec<P<Pat>>),
/// A possibly qualified path pattern.
/// Unqualified path patterns `A::B::C` can legally refer to variants, structs, constants
/// or associated constants. Qualified path patterns `<A>::B::C`/`<A as Trait>::B::C` can
/// only legally refer to associated constants.
Path(Option<QSelf>, Path),
/// A tuple pattern (`(a, b)`).
Tuple(Vec<P<Pat>>),
/// A `box` pattern.
Box(P<Pat>),
/// A reference pattern (e.g., `&mut (a, b)`).
Ref(P<Pat>, Mutability),
/// A literal.
Lit(P<Expr>),
/// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
Range(Option<P<Expr>>, Option<P<Expr>>, Spanned<RangeEnd>),
/// A slice pattern `[a, b, c]`.
Slice(Vec<P<Pat>>),
/// A rest pattern `..`.
///
/// Syntactically it is valid anywhere.
///
/// Semantically however, it only has meaning immediately inside:
/// - a slice pattern: `[a, .., b]`,
/// - a binding pattern immediately inside a slice pattern: `[a, r @ ..]`,
/// - a tuple pattern: `(a, .., b)`,
/// - a tuple struct/variant pattern: `$path(a, .., b)`.
///
/// In all of these cases, an additional restriction applies,
/// only one rest pattern may occur in the pattern sequences.
Rest,
/// Parentheses in patterns used for grouping (i.e., `(PAT)`).
Paren(P<Pat>),
/// A macro pattern; pre-expansion.
MacCall(MacCall),
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Copy)]
#[derive(HashStable_Generic, Encodable, Decodable)]
pub enum Mutability {
Mut,
Not,
}
impl Mutability {
pub fn invert(self) -> Self {
match self {
Mutability::Mut => Mutability::Not,
Mutability::Not => Mutability::Mut,
}
}
pub fn prefix_str(&self) -> &'static str {
match self {
Mutability::Mut => "mut ",
Mutability::Not => "",
}
}
}
/// The kind of borrow in an `AddrOf` expression,
/// e.g., `&place` or `&raw const place`.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum BorrowKind {
/// A normal borrow, `&$expr` or `&mut $expr`.
/// The resulting type is either `&'a T` or `&'a mut T`
/// where `T = typeof($expr)` and `'a` is some lifetime.
Ref,
/// A raw borrow, `&raw const $expr` or `&raw mut $expr`.
/// The resulting type is either `*const T` or `*mut T`
/// where `T = typeof($expr)`.
Raw,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
pub enum BinOpKind {
/// The `+` operator (addition)
Add,
/// The `-` operator (subtraction)
Sub,
/// The `*` operator (multiplication)
Mul,
/// The `/` operator (division)
Div,
/// The `%` operator (modulus)
Rem,
/// The `&&` operator (logical and)
And,
/// The `||` operator (logical or)
Or,
/// The `^` operator (bitwise xor)
BitXor,
/// The `&` operator (bitwise and)
BitAnd,
/// The `|` operator (bitwise or)
BitOr,
/// The `<<` operator (shift left)
Shl,
/// The `>>` operator (shift right)
Shr,
/// The `==` operator (equality)
Eq,
/// The `<` operator (less than)
Lt,
/// The `<=` operator (less than or equal to)
Le,
/// The `!=` operator (not equal to)
Ne,
/// The `>=` operator (greater than or equal to)
Ge,
/// The `>` operator (greater than)
Gt,
}
impl BinOpKind {
pub fn to_string(&self) -> &'static str {
use BinOpKind::*;
match *self {
Add => "+",
Sub => "-",
Mul => "*",
Div => "/",
Rem => "%",
And => "&&",
Or => "||",
BitXor => "^",
BitAnd => "&",
BitOr => "|",
Shl => "<<",
Shr => ">>",
Eq => "==",
Lt => "<",
Le => "<=",
Ne => "!=",
Ge => ">=",
Gt => ">",
}
}
pub fn lazy(&self) -> bool {
matches!(self, BinOpKind::And | BinOpKind::Or)
}
pub fn is_comparison(&self) -> bool {
use BinOpKind::*;
// Note for developers: please keep this as is;
// we want compilation to fail if another variant is added.
match *self {
Eq | Lt | Le | Ne | Gt | Ge => true,
And | Or | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr => false,
}
}
}
pub type BinOp = Spanned<BinOpKind>;
/// Unary operator.
///
/// Note that `&data` is not an operator, it's an `AddrOf` expression.
#[derive(Clone, Encodable, Decodable, Debug, Copy)]
pub enum UnOp {
/// The `*` operator for dereferencing
Deref,
/// The `!` operator for logical inversion
Not,
/// The `-` operator for negation
Neg,
}
impl UnOp {
pub fn to_string(op: UnOp) -> &'static str {
match op {
UnOp::Deref => "*",
UnOp::Not => "!",
UnOp::Neg => "-",
}
}
}
/// A statement
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Stmt {
pub id: NodeId,
pub kind: StmtKind,
pub span: Span,
}
impl Stmt {
pub fn tokens(&self) -> Option<&LazyTokenStream> {
match self.kind {
StmtKind::Local(ref local) => local.tokens.as_ref(),
StmtKind::Item(ref item) => item.tokens.as_ref(),
StmtKind::Expr(ref expr) | StmtKind::Semi(ref expr) => expr.tokens.as_ref(),
StmtKind::Empty => None,
StmtKind::MacCall(ref mac) => mac.tokens.as_ref(),
}
}
pub fn has_trailing_semicolon(&self) -> bool {
match &self.kind {
StmtKind::Semi(_) => true,
StmtKind::MacCall(mac) => matches!(mac.style, MacStmtStyle::Semicolon),
_ => false,
}
}
/// Converts a parsed `Stmt` to a `Stmt` with
/// a trailing semicolon.
///
/// This only modifies the parsed AST struct, not the attached
/// `LazyTokenStream`. The parser is responsible for calling
/// `CreateTokenStream::add_trailing_semi` when there is actually
/// a semicolon in the tokenstream.
pub fn add_trailing_semicolon(mut self) -> Self {
self.kind = match self.kind {
StmtKind::Expr(expr) => StmtKind::Semi(expr),
StmtKind::MacCall(mac) => {
StmtKind::MacCall(mac.map(|MacCallStmt { mac, style: _, attrs, tokens }| {
MacCallStmt { mac, style: MacStmtStyle::Semicolon, attrs, tokens }
}))
}
kind => kind,
};
self
}
pub fn is_item(&self) -> bool {
matches!(self.kind, StmtKind::Item(_))
}
pub fn is_expr(&self) -> bool {
matches!(self.kind, StmtKind::Expr(_))
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum StmtKind {
/// A local (let) binding.
Local(P<Local>),
/// An item definition.
Item(P<Item>),
/// Expr without trailing semi-colon.
Expr(P<Expr>),
/// Expr with a trailing semi-colon.
Semi(P<Expr>),
/// Just a trailing semi-colon.
Empty,
/// Macro.
MacCall(P<MacCallStmt>),
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct MacCallStmt {
pub mac: MacCall,
pub style: MacStmtStyle,
pub attrs: AttrVec,
pub tokens: Option<LazyTokenStream>,
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
pub enum MacStmtStyle {
/// The macro statement had a trailing semicolon (e.g., `foo! { ... };`
/// `foo!(...);`, `foo![...];`).
Semicolon,
/// The macro statement had braces (e.g., `foo! { ... }`).
Braces,
/// The macro statement had parentheses or brackets and no semicolon (e.g.,
/// `foo!(...)`). All of these will end up being converted into macro
/// expressions.
NoBraces,
}
/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Local {
pub id: NodeId,
pub pat: P<Pat>,
pub ty: Option<P<Ty>>,
/// Initializer expression to set the value, if any.
pub init: Option<P<Expr>>,
pub span: Span,
pub attrs: AttrVec,
pub tokens: Option<LazyTokenStream>,
}
/// An arm of a 'match'.
///
/// E.g., `0..=10 => { println!("match!") }` as in
///
/// ```
/// match 123 {
/// 0..=10 => { println!("match!") },
/// _ => { println!("no match!") },
/// }
/// ```
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Arm {
pub attrs: AttrVec,
/// Match arm pattern, e.g. `10` in `match foo { 10 => {}, _ => {} }`
pub pat: P<Pat>,
/// Match arm guard, e.g. `n > 10` in `match foo { n if n > 10 => {}, _ => {} }`
pub guard: Option<P<Expr>>,
/// Match arm body.
pub body: P<Expr>,
pub span: Span,
pub id: NodeId,
pub is_placeholder: bool,
}
/// A single field in a struct expression, e.g. `x: value` and `y` in `Foo { x: value, y }`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ExprField {
pub attrs: AttrVec,
pub id: NodeId,
pub span: Span,
pub ident: Ident,
pub expr: P<Expr>,
pub is_shorthand: bool,
pub is_placeholder: bool,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
pub enum BlockCheckMode {
Default,
Unsafe(UnsafeSource),
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
pub enum UnsafeSource {
CompilerGenerated,
UserProvided,
}
/// A constant (expression) that's not an item or associated item,
/// but needs its own `DefId` for type-checking, const-eval, etc.
/// These are usually found nested inside types (e.g., array lengths)
/// or expressions (e.g., repeat counts), and also used to define
/// explicit discriminant values for enum variants.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct AnonConst {
pub id: NodeId,
pub value: P<Expr>,
}
/// An expression.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Expr {
pub id: NodeId,
pub kind: ExprKind,
pub span: Span,
pub attrs: AttrVec,
pub tokens: Option<LazyTokenStream>,
}
// `Expr` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Expr, 104);
impl Expr {
/// Returns `true` if this expression would be valid somewhere that expects a value;
/// for example, an `if` condition.
pub fn returns(&self) -> bool {
if let ExprKind::Block(ref block, _) = self.kind {
match block.stmts.last().map(|last_stmt| &last_stmt.kind) {
// Implicit return
Some(StmtKind::Expr(_)) => true,
// Last statement is an explicit return?
Some(StmtKind::Semi(expr)) => matches!(expr.kind, ExprKind::Ret(_)),
// This is a block that doesn't end in either an implicit or explicit return.
_ => false,
}
} else {
// This is not a block, it is a value.
true
}
}
/// Is this expr either `N`, or `{ N }`.
///
/// If this is not the case, name resolution does not resolve `N` when using
/// `min_const_generics` as more complex expressions are not supported.
pub fn is_potential_trivial_const_param(&self) -> bool {
let this = if let ExprKind::Block(ref block, None) = self.kind {
if block.stmts.len() == 1 {
if let StmtKind::Expr(ref expr) = block.stmts[0].kind { expr } else { self }
} else {
self
}
} else {
self
};
if let ExprKind::Path(None, ref path) = this.kind {
if path.segments.len() == 1 && path.segments[0].args.is_none() {
return true;
}
}
false
}
pub fn to_bound(&self) -> Option<GenericBound> {
match &self.kind {
ExprKind::Path(None, path) => Some(GenericBound::Trait(
PolyTraitRef::new(Vec::new(), path.clone(), self.span),
TraitBoundModifier::None,
)),
_ => None,
}
}
pub fn peel_parens(&self) -> &Expr {
let mut expr = self;
while let ExprKind::Paren(inner) = &expr.kind {
expr = &inner;
}
expr
}
/// Attempts to reparse as `Ty` (for diagnostic purposes).
pub fn to_ty(&self) -> Option<P<Ty>> {
let kind = match &self.kind {
// Trivial conversions.
ExprKind::Path(qself, path) => TyKind::Path(qself.clone(), path.clone()),
ExprKind::MacCall(mac) => TyKind::MacCall(mac.clone()),
ExprKind::Paren(expr) => expr.to_ty().map(TyKind::Paren)?,
ExprKind::AddrOf(BorrowKind::Ref, mutbl, expr) => {
expr.to_ty().map(|ty| TyKind::Rptr(None, MutTy { ty, mutbl: *mutbl }))?
}
ExprKind::Repeat(expr, expr_len) => {
expr.to_ty().map(|ty| TyKind::Array(ty, expr_len.clone()))?
}
ExprKind::Array(exprs) if exprs.len() == 1 => exprs[0].to_ty().map(TyKind::Slice)?,
ExprKind::Tup(exprs) => {
let tys = exprs.iter().map(|expr| expr.to_ty()).collect::<Option<Vec<_>>>()?;
TyKind::Tup(tys)
}
// If binary operator is `Add` and both `lhs` and `rhs` are trait bounds,
// then type of result is trait object.
// Otherwise we don't assume the result type.
ExprKind::Binary(binop, lhs, rhs) if binop.node == BinOpKind::Add => {
if let (Some(lhs), Some(rhs)) = (lhs.to_bound(), rhs.to_bound()) {
TyKind::TraitObject(vec![lhs, rhs], TraitObjectSyntax::None)
} else {
return None;
}
}
// This expression doesn't look like a type syntactically.
_ => return None,
};
Some(P(Ty { kind, id: self.id, span: self.span, tokens: None }))
}
pub fn precedence(&self) -> ExprPrecedence {
match self.kind {
ExprKind::Box(_) => ExprPrecedence::Box,
ExprKind::Array(_) => ExprPrecedence::Array,
ExprKind::ConstBlock(_) => ExprPrecedence::ConstBlock,
ExprKind::Call(..) => ExprPrecedence::Call,
ExprKind::MethodCall(..) => ExprPrecedence::MethodCall,
ExprKind::Tup(_) => ExprPrecedence::Tup,
ExprKind::Binary(op, ..) => ExprPrecedence::Binary(op.node),
ExprKind::Unary(..) => ExprPrecedence::Unary,
ExprKind::Lit(_) => ExprPrecedence::Lit,
ExprKind::Type(..) | ExprKind::Cast(..) => ExprPrecedence::Cast,
ExprKind::Let(..) => ExprPrecedence::Let,
ExprKind::If(..) => ExprPrecedence::If,
ExprKind::While(..) => ExprPrecedence::While,
ExprKind::ForLoop(..) => ExprPrecedence::ForLoop,
ExprKind::Loop(..) => ExprPrecedence::Loop,
ExprKind::Match(..) => ExprPrecedence::Match,
ExprKind::Closure(..) => ExprPrecedence::Closure,
ExprKind::Block(..) => ExprPrecedence::Block,
ExprKind::TryBlock(..) => ExprPrecedence::TryBlock,
ExprKind::Async(..) => ExprPrecedence::Async,
ExprKind::Await(..) => ExprPrecedence::Await,
ExprKind::Assign(..) => ExprPrecedence::Assign,
ExprKind::AssignOp(..) => ExprPrecedence::AssignOp,
ExprKind::Field(..) => ExprPrecedence::Field,
ExprKind::Index(..) => ExprPrecedence::Index,
ExprKind::Range(..) => ExprPrecedence::Range,
ExprKind::Underscore => ExprPrecedence::Path,
ExprKind::Path(..) => ExprPrecedence::Path,
ExprKind::AddrOf(..) => ExprPrecedence::AddrOf,
ExprKind::Break(..) => ExprPrecedence::Break,
ExprKind::Continue(..) => ExprPrecedence::Continue,
ExprKind::Ret(..) => ExprPrecedence::Ret,
ExprKind::InlineAsm(..) | ExprKind::LlvmInlineAsm(..) => ExprPrecedence::InlineAsm,
ExprKind::MacCall(..) => ExprPrecedence::Mac,
ExprKind::Struct(..) => ExprPrecedence::Struct,
ExprKind::Repeat(..) => ExprPrecedence::Repeat,
ExprKind::Paren(..) => ExprPrecedence::Paren,
ExprKind::Try(..) => ExprPrecedence::Try,
ExprKind::Yield(..) => ExprPrecedence::Yield,
ExprKind::Err => ExprPrecedence::Err,
}
}
}
/// Limit types of a range (inclusive or exclusive)
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug)]
pub enum RangeLimits {
/// Inclusive at the beginning, exclusive at the end
HalfOpen,
/// Inclusive at the beginning and end
Closed,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum StructRest {
/// `..x`.
Base(P<Expr>),
/// `..`.
Rest(Span),
/// No trailing `..` or expression.
None,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct StructExpr {
pub qself: Option<QSelf>,
pub path: Path,
pub fields: Vec<ExprField>,
pub rest: StructRest,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum ExprKind {
/// A `box x` expression.
Box(P<Expr>),
/// An array (`[a, b, c, d]`)
Array(Vec<P<Expr>>),
/// Allow anonymous constants from an inline `const` block
ConstBlock(AnonConst),
/// A function call
///
/// The first field resolves to the function itself,
/// and the second field is the list of arguments.
/// This also represents calling the constructor of
/// tuple-like ADTs such as tuple structs and enum variants.
Call(P<Expr>, Vec<P<Expr>>),
/// A method call (`x.foo::<'static, Bar, Baz>(a, b, c, d)`)
///
/// The `PathSegment` represents the method name and its generic arguments
/// (within the angle brackets).
/// The first element of the vector of an `Expr` is the expression that evaluates
/// to the object on which the method is being called on (the receiver),
/// and the remaining elements are the rest of the arguments.
/// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
/// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, [x, a, b, c, d])`.
/// This `Span` is the span of the function, without the dot and receiver
/// (e.g. `foo(a, b)` in `x.foo(a, b)`
MethodCall(PathSegment, Vec<P<Expr>>, Span),
/// A tuple (e.g., `(a, b, c, d)`).
Tup(Vec<P<Expr>>),
/// A binary operation (e.g., `a + b`, `a * b`).
Binary(BinOp, P<Expr>, P<Expr>),
/// A unary operation (e.g., `!x`, `*x`).
Unary(UnOp, P<Expr>),
/// A literal (e.g., `1`, `"foo"`).
Lit(Lit),
/// A cast (e.g., `foo as f64`).
Cast(P<Expr>, P<Ty>),
/// A type ascription (e.g., `42: usize`).
Type(P<Expr>, P<Ty>),
/// A `let pat = expr` expression that is only semantically allowed in the condition
/// of `if` / `while` expressions. (e.g., `if let 0 = x { .. }`).
Let(P<Pat>, P<Expr>),
/// An `if` block, with an optional `else` block.
///
/// `if expr { block } else { expr }`
If(P<Expr>, P<Block>, Option<P<Expr>>),
/// A while loop, with an optional label.
///
/// `'label: while expr { block }`
While(P<Expr>, P<Block>, Option<Label>),
/// A `for` loop, with an optional label.
///
/// `'label: for pat in expr { block }`
///
/// This is desugared to a combination of `loop` and `match` expressions.
ForLoop(P<Pat>, P<Expr>, P<Block>, Option<Label>),
/// Conditionless loop (can be exited with `break`, `continue`, or `return`).
///
/// `'label: loop { block }`
Loop(P<Block>, Option<Label>),
/// A `match` block.
Match(P<Expr>, Vec<Arm>),
/// A closure (e.g., `move |a, b, c| a + b + c`).
///
/// The final span is the span of the argument block `|...|`.
Closure(CaptureBy, Async, Movability, P<FnDecl>, P<Expr>, Span),
/// A block (`'label: { ... }`).
Block(P<Block>, Option<Label>),
/// An async block (`async move { ... }`).
///
/// The `NodeId` is the `NodeId` for the closure that results from
/// desugaring an async block, just like the NodeId field in the
/// `Async::Yes` variant. This is necessary in order to create a def for the
/// closure which can be used as a parent of any child defs. Defs
/// created during lowering cannot be made the parent of any other
/// preexisting defs.
Async(CaptureBy, NodeId, P<Block>),
/// An await expression (`my_future.await`).
Await(P<Expr>),
/// A try block (`try { ... }`).
TryBlock(P<Block>),
/// An assignment (`a = foo()`).
/// The `Span` argument is the span of the `=` token.
Assign(P<Expr>, P<Expr>, Span),
/// An assignment with an operator.
///
/// E.g., `a += 1`.
AssignOp(BinOp, P<Expr>, P<Expr>),
/// Access of a named (e.g., `obj.foo`) or unnamed (e.g., `obj.0`) struct field.
Field(P<Expr>, Ident),
/// An indexing operation (e.g., `foo[2]`).
Index(P<Expr>, P<Expr>),
/// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`; and `..` in destructuring assignment).
Range(Option<P<Expr>>, Option<P<Expr>>, RangeLimits),
/// An underscore, used in destructuring assignment to ignore a value.
Underscore,
/// Variable reference, possibly containing `::` and/or type
/// parameters (e.g., `foo::bar::<baz>`).
///
/// Optionally "qualified" (e.g., `<Vec<T> as SomeTrait>::SomeType`).
Path(Option<QSelf>, Path),
/// A referencing operation (`&a`, `&mut a`, `&raw const a` or `&raw mut a`).
AddrOf(BorrowKind, Mutability, P<Expr>),
/// A `break`, with an optional label to break, and an optional expression.
Break(Option<Label>, Option<P<Expr>>),
/// A `continue`, with an optional label.
Continue(Option<Label>),
/// A `return`, with an optional value to be returned.
Ret(Option<P<Expr>>),
/// Output of the `asm!()` macro.
InlineAsm(P<InlineAsm>),
/// Output of the `llvm_asm!()` macro.
LlvmInlineAsm(P<LlvmInlineAsm>),
/// A macro invocation; pre-expansion.
MacCall(MacCall),
/// A struct literal expression.
///
/// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. rest}`.
Struct(P<StructExpr>),
/// An array literal constructed from one repeated element.
///
/// E.g., `[1; 5]`. The expression is the element to be
/// repeated; the constant is the number of times to repeat it.
Repeat(P<Expr>, AnonConst),
/// No-op: used solely so we can pretty-print faithfully.
Paren(P<Expr>),
/// A try expression (`expr?`).
Try(P<Expr>),
/// A `yield`, with an optional value to be yielded.
Yield(Option<P<Expr>>),
/// Placeholder for an expression that wasn't syntactically well formed in some way.
Err,
}
/// The explicit `Self` type in a "qualified path". The actual
/// path, including the trait and the associated item, is stored
/// separately. `position` represents the index of the associated
/// item qualified with this `Self` type.
///
/// ```ignore (only-for-syntax-highlight)
/// <Vec<T> as a::b::Trait>::AssociatedItem
/// ^~~~~ ~~~~~~~~~~~~~~^
/// ty position = 3
///
/// <Vec<T>>::AssociatedItem
/// ^~~~~ ^
/// ty position = 0
/// ```
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct QSelf {
pub ty: P<Ty>,
/// The span of `a::b::Trait` in a path like `<Vec<T> as
/// a::b::Trait>::AssociatedItem`; in the case where `position ==
/// 0`, this is an empty span.
pub path_span: Span,
pub position: usize,
}
/// A capture clause used in closures and `async` blocks.
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum CaptureBy {
/// `move |x| y + x`.
Value,
/// `move` keyword was not specified.
Ref,
}
/// The movability of a generator / closure literal:
/// whether a generator contains self-references, causing it to be `!Unpin`.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable, Debug, Copy)]
#[derive(HashStable_Generic)]
pub enum Movability {
/// May contain self-references, `!Unpin`.
Static,
/// Must not contain self-references, `Unpin`.
Movable,
}
/// Represents a macro invocation. The `path` indicates which macro
/// is being invoked, and the `args` are arguments passed to it.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct MacCall {
pub path: Path,
pub args: P<MacArgs>,
pub prior_type_ascription: Option<(Span, bool)>,
}
impl MacCall {
pub fn span(&self) -> Span {
self.path.span.to(self.args.span().unwrap_or(self.path.span))
}
}
/// Arguments passed to an attribute or a function-like macro.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum MacArgs {
/// No arguments - `#[attr]`.
Empty,
/// Delimited arguments - `#[attr()/[]/{}]` or `mac!()/[]/{}`.
Delimited(DelimSpan, MacDelimiter, TokenStream),
/// Arguments of a key-value attribute - `#[attr = "value"]`.
Eq(
/// Span of the `=` token.
Span,
/// "value" as a nonterminal token.
Token,
),
}
impl MacArgs {
pub fn delim(&self) -> DelimToken {
match self {
MacArgs::Delimited(_, delim, _) => delim.to_token(),
MacArgs::Empty | MacArgs::Eq(..) => token::NoDelim,
}
}
pub fn span(&self) -> Option<Span> {
match self {
MacArgs::Empty => None,
MacArgs::Delimited(dspan, ..) => Some(dspan.entire()),
MacArgs::Eq(eq_span, token) => Some(eq_span.to(token.span)),
}
}
/// Tokens inside the delimiters or after `=`.
/// Proc macros see these tokens, for example.
pub fn inner_tokens(&self) -> TokenStream {
match self {
MacArgs::Empty => TokenStream::default(),
MacArgs::Delimited(.., tokens) => tokens.clone(),
MacArgs::Eq(.., token) => TokenTree::Token(token.clone()).into(),
}
}
/// Whether a macro with these arguments needs a semicolon
/// when used as a standalone item or statement.
pub fn need_semicolon(&self) -> bool {
!matches!(self, MacArgs::Delimited(_, MacDelimiter::Brace, _))
}
}
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum MacDelimiter {
Parenthesis,
Bracket,
Brace,
}
impl MacDelimiter {
pub fn to_token(self) -> DelimToken {
match self {
MacDelimiter::Parenthesis => DelimToken::Paren,
MacDelimiter::Bracket => DelimToken::Bracket,
MacDelimiter::Brace => DelimToken::Brace,
}
}
pub fn from_token(delim: DelimToken) -> Option<MacDelimiter> {
match delim {
token::Paren => Some(MacDelimiter::Parenthesis),
token::Bracket => Some(MacDelimiter::Bracket),
token::Brace => Some(MacDelimiter::Brace),
token::NoDelim => None,
}
}
}
/// Represents a macro definition.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct MacroDef {
pub body: P<MacArgs>,
/// `true` if macro was defined with `macro_rules`.
pub macro_rules: bool,
}
#[derive(Clone, Encodable, Decodable, Debug, Copy, Hash, Eq, PartialEq)]
#[derive(HashStable_Generic)]
pub enum StrStyle {
/// A regular string, like `"foo"`.
Cooked,
/// A raw string, like `r##"foo"##`.
///
/// The value is the number of `#` symbols used.
Raw(u16),
}
/// An AST literal.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct Lit {
/// The original literal token as written in source code.
pub token: token::Lit,
/// The "semantic" representation of the literal lowered from the original tokens.
/// Strings are unescaped, hexadecimal forms are eliminated, etc.
/// FIXME: Remove this and only create the semantic representation during lowering to HIR.
pub kind: LitKind,
pub span: Span,
}
/// Same as `Lit`, but restricted to string literals.
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
pub struct StrLit {
/// The original literal token as written in source code.
pub style: StrStyle,
pub symbol: Symbol,
pub suffix: Option<Symbol>,
pub span: Span,
/// The unescaped "semantic" representation of the literal lowered from the original token.
/// FIXME: Remove this and only create the semantic representation during lowering to HIR.
pub symbol_unescaped: Symbol,
}
impl StrLit {
pub fn as_lit(&self) -> Lit {
let token_kind = match self.style {
StrStyle::Cooked => token::Str,
StrStyle::Raw(n) => token::StrRaw(n),
};
Lit {
token: token::Lit::new(token_kind, self.symbol, self.suffix),
span: self.span,
kind: LitKind::Str(self.symbol_unescaped, self.style),
}
}
}
/// Type of the integer literal based on provided suffix.
#[derive(Clone, Copy, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
#[derive(HashStable_Generic)]
pub enum LitIntType {
/// e.g. `42_i32`.
Signed(IntTy),
/// e.g. `42_u32`.
Unsigned(UintTy),
/// e.g. `42`.
Unsuffixed,
}
/// Type of the float literal based on provided suffix.
#[derive(Clone, Copy, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
#[derive(HashStable_Generic)]
pub enum LitFloatType {
/// A float literal with a suffix (`1f32` or `1E10f32`).
Suffixed(FloatTy),
/// A float literal without a suffix (`1.0 or 1.0E10`).
Unsuffixed,
}
/// Literal kind.
///
/// E.g., `"foo"`, `42`, `12.34`, or `bool`.
#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq, HashStable_Generic)]
pub enum LitKind {
/// A string literal (`"foo"`).
Str(Symbol, StrStyle),
/// A byte string (`b"foo"`).
ByteStr(Lrc<[u8]>),
/// A byte char (`b'f'`).
Byte(u8),
/// A character literal (`'a'`).
Char(char),
/// An integer literal (`1`).
Int(u128, LitIntType),
/// A float literal (`1f64` or `1E10f64`).
Float(Symbol, LitFloatType),
/// A boolean literal.
Bool(bool),
/// Placeholder for a literal that wasn't well-formed in some way.
Err(Symbol),
}
impl LitKind {
/// Returns `true` if this literal is a string.
pub fn is_str(&self) -> bool {
matches!(self, LitKind::Str(..))
}
/// Returns `true` if this literal is byte literal string.
pub fn is_bytestr(&self) -> bool {
matches!(self, LitKind::ByteStr(_))
}
/// Returns `true` if this is a numeric literal.
pub fn is_numeric(&self) -> bool {
matches!(self, LitKind::Int(..) | LitKind::Float(..))
}
/// Returns `true` if this literal has no suffix.
/// Note: this will return true for literals with prefixes such as raw strings and byte strings.
pub fn is_unsuffixed(&self) -> bool {
!self.is_suffixed()
}
/// Returns `true` if this literal has a suffix.
pub fn is_suffixed(&self) -> bool {
match *self {
// suffixed variants
LitKind::Int(_, LitIntType::Signed(..) | LitIntType::Unsigned(..))
| LitKind::Float(_, LitFloatType::Suffixed(..)) => true,
// unsuffixed variants
LitKind::Str(..)
| LitKind::ByteStr(..)
| LitKind::Byte(..)
| LitKind::Char(..)
| LitKind::Int(_, LitIntType::Unsuffixed)
| LitKind::Float(_, LitFloatType::Unsuffixed)
| LitKind::Bool(..)
| LitKind::Err(..) => false,
}
}
}
// N.B., If you change this, you'll probably want to change the corresponding
// type structure in `middle/ty.rs` as well.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct MutTy {
pub ty: P<Ty>,
pub mutbl: Mutability,
}
/// Represents a function's signature in a trait declaration,
/// trait implementation, or free function.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct FnSig {
pub header: FnHeader,
pub decl: P<FnDecl>,
pub span: Span,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum FloatTy {
F32,
F64,
}
impl FloatTy {
pub fn name_str(self) -> &'static str {
match self {
FloatTy::F32 => "f32",
FloatTy::F64 => "f64",
}
}
pub fn name(self) -> Symbol {
match self {
FloatTy::F32 => sym::f32,
FloatTy::F64 => sym::f64,
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum IntTy {
Isize,
I8,
I16,
I32,
I64,
I128,
}
impl IntTy {
pub fn name_str(&self) -> &'static str {
match *self {
IntTy::Isize => "isize",
IntTy::I8 => "i8",
IntTy::I16 => "i16",
IntTy::I32 => "i32",
IntTy::I64 => "i64",
IntTy::I128 => "i128",
}
}
pub fn name(&self) -> Symbol {
match *self {
IntTy::Isize => sym::isize,
IntTy::I8 => sym::i8,
IntTy::I16 => sym::i16,
IntTy::I32 => sym::i32,
IntTy::I64 => sym::i64,
IntTy::I128 => sym::i128,
}
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Debug)]
#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum UintTy {
Usize,
U8,
U16,
U32,
U64,
U128,
}
impl UintTy {
pub fn name_str(&self) -> &'static str {
match *self {
UintTy::Usize => "usize",
UintTy::U8 => "u8",
UintTy::U16 => "u16",
UintTy::U32 => "u32",
UintTy::U64 => "u64",
UintTy::U128 => "u128",
}
}
pub fn name(&self) -> Symbol {
match *self {
UintTy::Usize => sym::usize,
UintTy::U8 => sym::u8,
UintTy::U16 => sym::u16,
UintTy::U32 => sym::u32,
UintTy::U64 => sym::u64,
UintTy::U128 => sym::u128,
}
}
}
/// A constraint on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or
/// `A: TraitA + TraitB` in `Foo<A: TraitA + TraitB>`).
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct AssocTyConstraint {
pub id: NodeId,
pub ident: Ident,
pub gen_args: Option<GenericArgs>,
pub kind: AssocTyConstraintKind,
pub span: Span,
}
/// The kinds of an `AssocTyConstraint`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum AssocTyConstraintKind {
/// E.g., `A = Bar` in `Foo<A = Bar>`.
Equality { ty: P<Ty> },
/// E.g. `A: TraitA + TraitB` in `Foo<A: TraitA + TraitB>`.
Bound { bounds: GenericBounds },
}
#[derive(Encodable, Decodable, Debug)]
pub struct Ty {
pub id: NodeId,
pub kind: TyKind,
pub span: Span,
pub tokens: Option<LazyTokenStream>,
}
impl Clone for Ty {
fn clone(&self) -> Self {
ensure_sufficient_stack(|| Self {
id: self.id,
kind: self.kind.clone(),
span: self.span,
tokens: self.tokens.clone(),
})
}
}
impl Ty {
pub fn peel_refs(&self) -> &Self {
let mut final_ty = self;
while let TyKind::Rptr(_, MutTy { ty, .. }) = &final_ty.kind {
final_ty = &ty;
}
final_ty
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct BareFnTy {
pub unsafety: Unsafe,
pub ext: Extern,
pub generic_params: Vec<GenericParam>,
pub decl: P<FnDecl>,
}
/// The various kinds of type recognized by the compiler.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum TyKind {
/// A variable-length slice (`[T]`).
Slice(P<Ty>),
/// A fixed length array (`[T; n]`).
Array(P<Ty>, AnonConst),
/// A raw pointer (`*const T` or `*mut T`).
Ptr(MutTy),
/// A reference (`&'a T` or `&'a mut T`).
Rptr(Option<Lifetime>, MutTy),
/// A bare function (e.g., `fn(usize) -> bool`).
BareFn(P<BareFnTy>),
/// The never type (`!`).
Never,
/// A tuple (`(A, B, C, D,...)`).
Tup(Vec<P<Ty>>),
/// An anonymous struct type i.e. `struct { foo: Type }`
AnonymousStruct(Vec<FieldDef>, bool),
/// An anonymous union type i.e. `union { bar: Type }`
AnonymousUnion(Vec<FieldDef>, bool),
/// A path (`module::module::...::Type`), optionally
/// "qualified", e.g., `<Vec<T> as SomeTrait>::SomeType`.
///
/// Type parameters are stored in the `Path` itself.
Path(Option<QSelf>, Path),
/// A trait object type `Bound1 + Bound2 + Bound3`
/// where `Bound` is a trait or a lifetime.
TraitObject(GenericBounds, TraitObjectSyntax),
/// An `impl Bound1 + Bound2 + Bound3` type
/// where `Bound` is a trait or a lifetime.
///
/// The `NodeId` exists to prevent lowering from having to
/// generate `NodeId`s on the fly, which would complicate
/// the generation of opaque `type Foo = impl Trait` items significantly.
ImplTrait(NodeId, GenericBounds),
/// No-op; kept solely so that we can pretty-print faithfully.
Paren(P<Ty>),
/// Unused for now.
Typeof(AnonConst),
/// This means the type should be inferred instead of it having been
/// specified. This can appear anywhere in a type.
Infer,
/// Inferred type of a `self` or `&self` argument in a method.
ImplicitSelf,
/// A macro in the type position.
MacCall(MacCall),
/// Placeholder for a kind that has failed to be defined.
Err,
/// Placeholder for a `va_list`.
CVarArgs,
}
impl TyKind {
pub fn is_implicit_self(&self) -> bool {
matches!(self, TyKind::ImplicitSelf)
}
pub fn is_unit(&self) -> bool {
matches!(self, TyKind::Tup(tys) if tys.is_empty())
}
}
/// Syntax used to declare a trait object.
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum TraitObjectSyntax {
Dyn,
None,
}
/// Inline assembly operand explicit register or register class.
///
/// E.g., `"eax"` as in `asm!("mov eax, 2", out("eax") result)`.
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
pub enum InlineAsmRegOrRegClass {
Reg(Symbol),
RegClass(Symbol),
}
bitflags::bitflags! {
#[derive(Encodable, Decodable, HashStable_Generic)]
pub struct InlineAsmOptions: u8 {
const PURE = 1 << 0;
const NOMEM = 1 << 1;
const READONLY = 1 << 2;
const PRESERVES_FLAGS = 1 << 3;
const NORETURN = 1 << 4;
const NOSTACK = 1 << 5;
const ATT_SYNTAX = 1 << 6;
const RAW = 1 << 7;
}
}
#[derive(Clone, PartialEq, PartialOrd, Encodable, Decodable, Debug, Hash, HashStable_Generic)]
pub enum InlineAsmTemplatePiece {
String(String),
Placeholder { operand_idx: usize, modifier: Option<char>, span: Span },
}
impl fmt::Display for InlineAsmTemplatePiece {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::String(s) => {
for c in s.chars() {
match c {
'{' => f.write_str("{{")?,
'}' => f.write_str("}}")?,
_ => c.fmt(f)?,
}
}
Ok(())
}
Self::Placeholder { operand_idx, modifier: Some(modifier), .. } => {
write!(f, "{{{}:{}}}", operand_idx, modifier)
}
Self::Placeholder { operand_idx, modifier: None, .. } => {
write!(f, "{{{}}}", operand_idx)
}
}
}
}
impl InlineAsmTemplatePiece {
/// Rebuilds the asm template string from its pieces.
pub fn to_string(s: &[Self]) -> String {
use fmt::Write;
let mut out = String::new();
for p in s.iter() {
let _ = write!(out, "{}", p);
}
out
}
}
/// Inline assembly operand.
///
/// E.g., `out("eax") result` as in `asm!("mov eax, 2", out("eax") result)`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum InlineAsmOperand {
In {
reg: InlineAsmRegOrRegClass,
expr: P<Expr>,
},
Out {
reg: InlineAsmRegOrRegClass,
late: bool,
expr: Option<P<Expr>>,
},
InOut {
reg: InlineAsmRegOrRegClass,
late: bool,
expr: P<Expr>,
},
SplitInOut {
reg: InlineAsmRegOrRegClass,
late: bool,
in_expr: P<Expr>,
out_expr: Option<P<Expr>>,
},
Const {
anon_const: AnonConst,
},
Sym {
expr: P<Expr>,
},
}
/// Inline assembly.
///
/// E.g., `asm!("NOP");`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct InlineAsm {
pub template: Vec<InlineAsmTemplatePiece>,
pub operands: Vec<(InlineAsmOperand, Span)>,
pub options: InlineAsmOptions,
pub line_spans: Vec<Span>,
}
/// Inline assembly dialect.
///
/// E.g., `"intel"` as in `llvm_asm!("mov eax, 2" : "={eax}"(result) : : : "intel")`.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, Hash, HashStable_Generic)]
pub enum LlvmAsmDialect {
Att,
Intel,
}
/// LLVM-style inline assembly.
///
/// E.g., `"={eax}"(result)` as in `llvm_asm!("mov eax, 2" : "={eax}"(result) : : : "intel")`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct LlvmInlineAsmOutput {
pub constraint: Symbol,
pub expr: P<Expr>,
pub is_rw: bool,
pub is_indirect: bool,
}
/// LLVM-style inline assembly.
///
/// E.g., `llvm_asm!("NOP");`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct LlvmInlineAsm {
pub asm: Symbol,
pub asm_str_style: StrStyle,
pub outputs: Vec<LlvmInlineAsmOutput>,
pub inputs: Vec<(Symbol, P<Expr>)>,
pub clobbers: Vec<Symbol>,
pub volatile: bool,
pub alignstack: bool,
pub dialect: LlvmAsmDialect,
}
/// A parameter in a function header.
///
/// E.g., `bar: usize` as in `fn foo(bar: usize)`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Param {
pub attrs: AttrVec,
pub ty: P<Ty>,
pub pat: P<Pat>,
pub id: NodeId,
pub span: Span,
pub is_placeholder: bool,
}
/// Alternative representation for `Arg`s describing `self` parameter of methods.
///
/// E.g., `&mut self` as in `fn foo(&mut self)`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum SelfKind {
/// `self`, `mut self`
Value(Mutability),
/// `&'lt self`, `&'lt mut self`
Region(Option<Lifetime>, Mutability),
/// `self: TYPE`, `mut self: TYPE`
Explicit(P<Ty>, Mutability),
}
pub type ExplicitSelf = Spanned<SelfKind>;
impl Param {
/// Attempts to cast parameter to `ExplicitSelf`.
pub fn to_self(&self) -> Option<ExplicitSelf> {
if let PatKind::Ident(BindingMode::ByValue(mutbl), ident, _) = self.pat.kind {
if ident.name == kw::SelfLower {
return match self.ty.kind {
TyKind::ImplicitSelf => Some(respan(self.pat.span, SelfKind::Value(mutbl))),
TyKind::Rptr(lt, MutTy { ref ty, mutbl }) if ty.kind.is_implicit_self() => {
Some(respan(self.pat.span, SelfKind::Region(lt, mutbl)))
}
_ => Some(respan(
self.pat.span.to(self.ty.span),
SelfKind::Explicit(self.ty.clone(), mutbl),
)),
};
}
}
None
}
/// Returns `true` if parameter is `self`.
pub fn is_self(&self) -> bool {
if let PatKind::Ident(_, ident, _) = self.pat.kind {
ident.name == kw::SelfLower
} else {
false
}
}
/// Builds a `Param` object from `ExplicitSelf`.
pub fn from_self(attrs: AttrVec, eself: ExplicitSelf, eself_ident: Ident) -> Param {
let span = eself.span.to(eself_ident.span);
let infer_ty = P(Ty { id: DUMMY_NODE_ID, kind: TyKind::ImplicitSelf, span, tokens: None });
let param = |mutbl, ty| Param {
attrs,
pat: P(Pat {
id: DUMMY_NODE_ID,
kind: PatKind::Ident(BindingMode::ByValue(mutbl), eself_ident, None),
span,
tokens: None,
}),
span,
ty,
id: DUMMY_NODE_ID,
is_placeholder: false,
};
match eself.node {
SelfKind::Explicit(ty, mutbl) => param(mutbl, ty),
SelfKind::Value(mutbl) => param(mutbl, infer_ty),
SelfKind::Region(lt, mutbl) => param(
Mutability::Not,
P(Ty {
id: DUMMY_NODE_ID,
kind: TyKind::Rptr(lt, MutTy { ty: infer_ty, mutbl }),
span,
tokens: None,
}),
),
}
}
}
/// A signature (not the body) of a function declaration.
///
/// E.g., `fn foo(bar: baz)`.
///
/// Please note that it's different from `FnHeader` structure
/// which contains metadata about function safety, asyncness, constness and ABI.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct FnDecl {
pub inputs: Vec<Param>,
pub output: FnRetTy,
}
impl FnDecl {
pub fn has_self(&self) -> bool {
self.inputs.get(0).map_or(false, Param::is_self)
}
pub fn c_variadic(&self) -> bool {
self.inputs.last().map_or(false, |arg| matches!(arg.ty.kind, TyKind::CVarArgs))
}
}
/// Is the trait definition an auto trait?
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum IsAuto {
Yes,
No,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable, Debug)]
#[derive(HashStable_Generic)]
pub enum Unsafe {
Yes(Span),
No,
}
#[derive(Copy, Clone, Encodable, Decodable, Debug)]
pub enum Async {
Yes { span: Span, closure_id: NodeId, return_impl_trait_id: NodeId },
No,
}
impl Async {
pub fn is_async(self) -> bool {
matches!(self, Async::Yes { .. })
}
/// In this case this is an `async` return, the `NodeId` for the generated `impl Trait` item.
pub fn opt_return_id(self) -> Option<NodeId> {
match self {
Async::Yes { return_impl_trait_id, .. } => Some(return_impl_trait_id),
Async::No => None,
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable, Debug)]
#[derive(HashStable_Generic)]
pub enum Const {
Yes(Span),
No,
}
/// Item defaultness.
/// For details see the [RFC #2532](https://github.com/rust-lang/rfcs/pull/2532).
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum Defaultness {
Default(Span),
Final,
}
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
pub enum ImplPolarity {
/// `impl Trait for Type`
Positive,
/// `impl !Trait for Type`
Negative(Span),
}
impl fmt::Debug for ImplPolarity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ImplPolarity::Positive => "positive".fmt(f),
ImplPolarity::Negative(_) => "negative".fmt(f),
}
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum FnRetTy {
/// Returns type is not specified.
///
/// Functions default to `()` and closures default to inference.
/// Span points to where return type would be inserted.
Default(Span),
/// Everything else.
Ty(P<Ty>),
}
impl FnRetTy {
pub fn span(&self) -> Span {
match *self {
FnRetTy::Default(span) => span,
FnRetTy::Ty(ref ty) => ty.span,
}
}
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
pub enum Inline {
Yes,
No,
}
/// Module item kind.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum ModKind {
/// Module with inlined definition `mod foo { ... }`,
/// or with definition outlined to a separate file `mod foo;` and already loaded from it.
/// The inner span is from the first token past `{` to the last token until `}`,
/// or from the first to the last token in the loaded file.
Loaded(Vec<P<Item>>, Inline, Span),
/// Module with definition outlined to a separate file `mod foo;` but not yet loaded from it.
Unloaded,
}
/// Foreign module declaration.
///
/// E.g., `extern { .. }` or `extern "C" { .. }`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ForeignMod {
/// `unsafe` keyword accepted syntactically for macro DSLs, but not
/// semantically by Rust.
pub unsafety: Unsafe,
pub abi: Option<StrLit>,
pub items: Vec<P<ForeignItem>>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct EnumDef {
pub variants: Vec<Variant>,
}
/// Enum variant.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Variant {
/// Attributes of the variant.
pub attrs: AttrVec,
/// Id of the variant (not the constructor, see `VariantData::ctor_id()`).
pub id: NodeId,
/// Span
pub span: Span,
/// The visibility of the variant. Syntactically accepted but not semantically.
pub vis: Visibility,
/// Name of the variant.
pub ident: Ident,
/// Fields and constructor id of the variant.
pub data: VariantData,
/// Explicit discriminant, e.g., `Foo = 1`.
pub disr_expr: Option<AnonConst>,
/// Is a macro placeholder
pub is_placeholder: bool,
}
/// Part of `use` item to the right of its prefix.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum UseTreeKind {
/// `use prefix` or `use prefix as rename`
///
/// The extra `NodeId`s are for HIR lowering, when additional statements are created for each
/// namespace.
Simple(Option<Ident>, NodeId, NodeId),
/// `use prefix::{...}`
Nested(Vec<(UseTree, NodeId)>),
/// `use prefix::*`
Glob,
}
/// A tree of paths sharing common prefixes.
/// Used in `use` items both at top-level and inside of braces in import groups.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct UseTree {
pub prefix: Path,
pub kind: UseTreeKind,
pub span: Span,
}
impl UseTree {
pub fn ident(&self) -> Ident {
match self.kind {
UseTreeKind::Simple(Some(rename), ..) => rename,
UseTreeKind::Simple(None, ..) => {
self.prefix.segments.last().expect("empty prefix in a simple import").ident
}
_ => panic!("`UseTree::ident` can only be used on a simple import"),
}
}
}
/// Distinguishes between `Attribute`s that decorate items and Attributes that
/// are contained as statements within items. These two cases need to be
/// distinguished for pretty-printing.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, HashStable_Generic)]
pub enum AttrStyle {
Outer,
Inner,
}
rustc_index::newtype_index! {
pub struct AttrId {
ENCODABLE = custom
DEBUG_FORMAT = "AttrId({})"
}
}
impl<S: Encoder> rustc_serialize::Encodable<S> for AttrId {
fn encode(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_unit()
}
}
impl<D: Decoder> rustc_serialize::Decodable<D> for AttrId {
fn decode(d: &mut D) -> Result<AttrId, D::Error> {
d.read_nil().map(|_| crate::attr::mk_attr_id())
}
}
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct AttrItem {
pub path: Path,
pub args: MacArgs,
pub tokens: Option<LazyTokenStream>,
}
/// A list of attributes.
pub type AttrVec = ThinVec<Attribute>;
/// Metadata associated with an item.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Attribute {
pub kind: AttrKind,
pub id: AttrId,
/// Denotes if the attribute decorates the following construct (outer)
/// or the construct this attribute is contained within (inner).
pub style: AttrStyle,
pub span: Span,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum AttrKind {
/// A normal attribute.
Normal(AttrItem, Option<LazyTokenStream>),
/// A doc comment (e.g. `/// ...`, `//! ...`, `/** ... */`, `/*! ... */`).
/// Doc attributes (e.g. `#[doc="..."]`) are represented with the `Normal`
/// variant (which is much less compact and thus more expensive).
DocComment(CommentKind, Symbol),
}
/// `TraitRef`s appear in impls.
///
/// Resolution maps each `TraitRef`'s `ref_id` to its defining trait; that's all
/// that the `ref_id` is for. The `impl_id` maps to the "self type" of this impl.
/// If this impl is an `ItemKind::Impl`, the `impl_id` is redundant (it could be the
/// same as the impl's `NodeId`).
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct TraitRef {
pub path: Path,
pub ref_id: NodeId,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct PolyTraitRef {
/// The `'a` in `<'a> Foo<&'a T>`.
pub bound_generic_params: Vec<GenericParam>,
/// The `Foo<&'a T>` in `<'a> Foo<&'a T>`.
pub trait_ref: TraitRef,
pub span: Span,
}
impl PolyTraitRef {
pub fn new(generic_params: Vec<GenericParam>, path: Path, span: Span) -> Self {
PolyTraitRef {
bound_generic_params: generic_params,
trait_ref: TraitRef { path, ref_id: DUMMY_NODE_ID },
span,
}
}
}
#[derive(Copy, Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum CrateSugar {
/// Source is `pub(crate)`.
PubCrate,
/// Source is (just) `crate`.
JustCrate,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Visibility {
pub kind: VisibilityKind,
pub span: Span,
pub tokens: Option<LazyTokenStream>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum VisibilityKind {
Public,
Crate(CrateSugar),
Restricted { path: P<Path>, id: NodeId },
Inherited,
}
impl VisibilityKind {
pub fn is_pub(&self) -> bool {
matches!(self, VisibilityKind::Public)
}
}
/// Field definition in a struct, variant or union.
///
/// E.g., `bar: usize` as in `struct Foo { bar: usize }`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct FieldDef {
pub attrs: AttrVec,
pub id: NodeId,
pub span: Span,
pub vis: Visibility,
pub ident: Option<Ident>,
pub ty: P<Ty>,
pub is_placeholder: bool,
}
/// Fields and constructor ids of enum variants and structs.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum VariantData {
/// Struct variant.
///
/// E.g., `Bar { .. }` as in `enum Foo { Bar { .. } }`.
Struct(Vec<FieldDef>, bool),
/// Tuple variant.
///
/// E.g., `Bar(..)` as in `enum Foo { Bar(..) }`.
Tuple(Vec<FieldDef>, NodeId),
/// Unit variant.
///
/// E.g., `Bar = ..` as in `enum Foo { Bar = .. }`.
Unit(NodeId),
}
impl VariantData {
/// Return the fields of this variant.
pub fn fields(&self) -> &[FieldDef] {
match *self {
VariantData::Struct(ref fields, ..) | VariantData::Tuple(ref fields, _) => fields,
_ => &[],
}
}
/// Return the `NodeId` of this variant's constructor, if it has one.
pub fn ctor_id(&self) -> Option<NodeId> {
match *self {
VariantData::Struct(..) => None,
VariantData::Tuple(_, id) | VariantData::Unit(id) => Some(id),
}
}
}
/// An item definition.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Item<K = ItemKind> {
pub attrs: Vec<Attribute>,
pub id: NodeId,
pub span: Span,
pub vis: Visibility,
/// The name of the item.
/// It might be a dummy name in case of anonymous items.
pub ident: Ident,
pub kind: K,
/// Original tokens this item was parsed from. This isn't necessarily
/// available for all items, although over time more and more items should
/// have this be `Some`. Right now this is primarily used for procedural
/// macros, notably custom attributes.
///
/// Note that the tokens here do not include the outer attributes, but will
/// include inner attributes.
pub tokens: Option<LazyTokenStream>,
}
impl Item {
/// Return the span that encompasses the attributes.
pub fn span_with_attributes(&self) -> Span {
self.attrs.iter().fold(self.span, |acc, attr| acc.to(attr.span))
}
}
impl<K: Into<ItemKind>> Item<K> {
pub fn into_item(self) -> Item {
let Item { attrs, id, span, vis, ident, kind, tokens } = self;
Item { attrs, id, span, vis, ident, kind: kind.into(), tokens }
}
}
/// `extern` qualifier on a function item or function type.
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
pub enum Extern {
None,
Implicit,
Explicit(StrLit),
}
impl Extern {
pub fn from_abi(abi: Option<StrLit>) -> Extern {
abi.map_or(Extern::Implicit, Extern::Explicit)
}
}
/// A function header.
///
/// All the information between the visibility and the name of the function is
/// included in this struct (e.g., `async unsafe fn` or `const extern "C" fn`).
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
pub struct FnHeader {
pub unsafety: Unsafe,
pub asyncness: Async,
pub constness: Const,
pub ext: Extern,
}
impl FnHeader {
/// Does this function header have any qualifiers or is it empty?
pub fn has_qualifiers(&self) -> bool {
let Self { unsafety, asyncness, constness, ext } = self;
matches!(unsafety, Unsafe::Yes(_))
|| asyncness.is_async()
|| matches!(constness, Const::Yes(_))
|| !matches!(ext, Extern::None)
}
}
impl Default for FnHeader {
fn default() -> FnHeader {
FnHeader {
unsafety: Unsafe::No,
asyncness: Async::No,
constness: Const::No,
ext: Extern::None,
}
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct TraitKind(
pub IsAuto,
pub Unsafe,
pub Generics,
pub GenericBounds,
pub Vec<P<AssocItem>>,
);
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct TyAliasKind(pub Defaultness, pub Generics, pub GenericBounds, pub Option<P<Ty>>);
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct ImplKind {
pub unsafety: Unsafe,
pub polarity: ImplPolarity,
pub defaultness: Defaultness,
pub constness: Const,
pub generics: Generics,
/// The trait being implemented, if any.
pub of_trait: Option<TraitRef>,
pub self_ty: P<Ty>,
pub items: Vec<P<AssocItem>>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct FnKind(pub Defaultness, pub FnSig, pub Generics, pub Option<P<Block>>);
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum ItemKind {
/// An `extern crate` item, with the optional *original* crate name if the crate was renamed.
///
/// E.g., `extern crate foo` or `extern crate foo_bar as foo`.
ExternCrate(Option<Symbol>),
/// A use declaration item (`use`).
///
/// E.g., `use foo;`, `use foo::bar;` or `use foo::bar as FooBar;`.
Use(UseTree),
/// A static item (`static`).
///
/// E.g., `static FOO: i32 = 42;` or `static FOO: &'static str = "bar";`.
Static(P<Ty>, Mutability, Option<P<Expr>>),
/// A constant item (`const`).
///
/// E.g., `const FOO: i32 = 42;`.
Const(Defaultness, P<Ty>, Option<P<Expr>>),
/// A function declaration (`fn`).
///
/// E.g., `fn foo(bar: usize) -> usize { .. }`.
Fn(Box<FnKind>),
/// A module declaration (`mod`).
///
/// E.g., `mod foo;` or `mod foo { .. }`.
/// `unsafe` keyword on modules is accepted syntactically for macro DSLs, but not
/// semantically by Rust.
Mod(Unsafe, ModKind),
/// An external module (`extern`).
///
/// E.g., `extern {}` or `extern "C" {}`.
ForeignMod(ForeignMod),
/// Module-level inline assembly (from `global_asm!()`).
GlobalAsm(InlineAsm),
/// A type alias (`type`).
///
/// E.g., `type Foo = Bar<u8>;`.
TyAlias(Box<TyAliasKind>),
/// An enum definition (`enum`).
///
/// E.g., `enum Foo<A, B> { C<A>, D<B> }`.
Enum(EnumDef, Generics),
/// A struct definition (`struct`).
///
/// E.g., `struct Foo<A> { x: A }`.
Struct(VariantData, Generics),
/// A union definition (`union`).
///
/// E.g., `union Foo<A, B> { x: A, y: B }`.
Union(VariantData, Generics),
/// A trait declaration (`trait`).
///
/// E.g., `trait Foo { .. }`, `trait Foo<T> { .. }` or `auto trait Foo {}`.
Trait(Box<TraitKind>),
/// Trait alias
///
/// E.g., `trait Foo = Bar + Quux;`.
TraitAlias(Generics, GenericBounds),
/// An implementation.
///
/// E.g., `impl<A> Foo<A> { .. }` or `impl<A> Trait for Foo<A> { .. }`.
Impl(Box<ImplKind>),
/// A macro invocation.
///
/// E.g., `foo!(..)`.
MacCall(MacCall),
/// A macro definition.
MacroDef(MacroDef),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ItemKind, 112);
impl ItemKind {
pub fn article(&self) -> &str {
use ItemKind::*;
match self {
Use(..) | Static(..) | Const(..) | Fn(..) | Mod(..) | GlobalAsm(..) | TyAlias(..)
| Struct(..) | Union(..) | Trait(..) | TraitAlias(..) | MacroDef(..) => "a",
ExternCrate(..) | ForeignMod(..) | MacCall(..) | Enum(..) | Impl { .. } => "an",
}
}
pub fn descr(&self) -> &str {
match self {
ItemKind::ExternCrate(..) => "extern crate",
ItemKind::Use(..) => "`use` import",
ItemKind::Static(..) => "static item",
ItemKind::Const(..) => "constant item",
ItemKind::Fn(..) => "function",
ItemKind::Mod(..) => "module",
ItemKind::ForeignMod(..) => "extern block",
ItemKind::GlobalAsm(..) => "global asm item",
ItemKind::TyAlias(..) => "type alias",
ItemKind::Enum(..) => "enum",
ItemKind::Struct(..) => "struct",
ItemKind::Union(..) => "union",
ItemKind::Trait(..) => "trait",
ItemKind::TraitAlias(..) => "trait alias",
ItemKind::MacCall(..) => "item macro invocation",
ItemKind::MacroDef(..) => "macro definition",
ItemKind::Impl { .. } => "implementation",
}
}
pub fn generics(&self) -> Option<&Generics> {
match self {
Self::Fn(box FnKind(_, _, generics, _))
| Self::TyAlias(box TyAliasKind(_, generics, ..))
| Self::Enum(_, generics)
| Self::Struct(_, generics)
| Self::Union(_, generics)
| Self::Trait(box TraitKind(_, _, generics, ..))
| Self::TraitAlias(generics, _)
| Self::Impl(box ImplKind { generics, .. }) => Some(generics),
_ => None,
}
}
}
/// Represents associated items.
/// These include items in `impl` and `trait` definitions.
pub type AssocItem = Item<AssocItemKind>;
/// Represents associated item kinds.
///
/// The term "provided" in the variants below refers to the item having a default
/// definition / body. Meanwhile, a "required" item lacks a definition / body.
/// In an implementation, all items must be provided.
/// The `Option`s below denote the bodies, where `Some(_)`
/// means "provided" and conversely `None` means "required".
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum AssocItemKind {
/// An associated constant, `const $ident: $ty $def?;` where `def ::= "=" $expr? ;`.
/// If `def` is parsed, then the constant is provided, and otherwise required.
Const(Defaultness, P<Ty>, Option<P<Expr>>),
/// An associated function.
Fn(Box<FnKind>),
/// An associated type.
TyAlias(Box<TyAliasKind>),
/// A macro expanding to associated items.
MacCall(MacCall),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(AssocItemKind, 72);
impl AssocItemKind {
pub fn defaultness(&self) -> Defaultness {
match *self {
Self::Const(def, ..)
| Self::Fn(box FnKind(def, ..))
| Self::TyAlias(box TyAliasKind(def, ..)) => def,
Self::MacCall(..) => Defaultness::Final,
}
}
}
impl From<AssocItemKind> for ItemKind {
fn from(assoc_item_kind: AssocItemKind) -> ItemKind {
match assoc_item_kind {
AssocItemKind::Const(a, b, c) => ItemKind::Const(a, b, c),
AssocItemKind::Fn(fn_kind) => ItemKind::Fn(fn_kind),
AssocItemKind::TyAlias(ty_alias_kind) => ItemKind::TyAlias(ty_alias_kind),
AssocItemKind::MacCall(a) => ItemKind::MacCall(a),
}
}
}
impl TryFrom<ItemKind> for AssocItemKind {
type Error = ItemKind;
fn try_from(item_kind: ItemKind) -> Result<AssocItemKind, ItemKind> {
Ok(match item_kind {
ItemKind::Const(a, b, c) => AssocItemKind::Const(a, b, c),
ItemKind::Fn(fn_kind) => AssocItemKind::Fn(fn_kind),
ItemKind::TyAlias(ty_alias_kind) => AssocItemKind::TyAlias(ty_alias_kind),
ItemKind::MacCall(a) => AssocItemKind::MacCall(a),
_ => return Err(item_kind),
})
}
}
/// An item in `extern` block.
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum ForeignItemKind {
/// A foreign static item (`static FOO: u8`).
Static(P<Ty>, Mutability, Option<P<Expr>>),
/// An foreign function.
Fn(Box<FnKind>),
/// An foreign type.
TyAlias(Box<TyAliasKind>),
/// A macro expanding to foreign items.
MacCall(MacCall),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ForeignItemKind, 72);
impl From<ForeignItemKind> for ItemKind {
fn from(foreign_item_kind: ForeignItemKind) -> ItemKind {
match foreign_item_kind {
ForeignItemKind::Static(a, b, c) => ItemKind::Static(a, b, c),
ForeignItemKind::Fn(fn_kind) => ItemKind::Fn(fn_kind),
ForeignItemKind::TyAlias(ty_alias_kind) => ItemKind::TyAlias(ty_alias_kind),
ForeignItemKind::MacCall(a) => ItemKind::MacCall(a),
}
}
}
impl TryFrom<ItemKind> for ForeignItemKind {
type Error = ItemKind;
fn try_from(item_kind: ItemKind) -> Result<ForeignItemKind, ItemKind> {
Ok(match item_kind {
ItemKind::Static(a, b, c) => ForeignItemKind::Static(a, b, c),
ItemKind::Fn(fn_kind) => ForeignItemKind::Fn(fn_kind),
ItemKind::TyAlias(ty_alias_kind) => ForeignItemKind::TyAlias(ty_alias_kind),
ItemKind::MacCall(a) => ForeignItemKind::MacCall(a),
_ => return Err(item_kind),
})
}
}
pub type ForeignItem = Item<ForeignItemKind>;
| 30.987792 | 101 | 0.580604 |
8ab27cfb4ee232d7f9c050dea488469d47084bf4 | 988 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X {
a: int
}
trait Changer {
fn change(mut self) -> Self {
self.set_to(55);
self
}
fn change_again(mut ~self) -> Box<Self> {
self.set_to(45);
self
}
fn set_to(&mut self, a: int);
}
impl Changer for X {
fn set_to(&mut self, a: int) {
self.a = a;
}
}
pub fn main() {
let x = X { a: 32 };
let new_x = x.change();
assert_eq!(new_x.a, 55);
let x = box new_x;
let new_x = x.change_again();
assert_eq!(new_x.a, 45);
}
| 21.955556 | 68 | 0.615385 |
23188a73f6eabd97a5b2017e299f3d23433ec626 | 4,928 | //! Welcome to `lastfm_rs`, a Rust library for interacting with the Last.fm API.
//!
//! This library aims to be easy to work with, as well as having complete support for the majority of the
//! Last.fm API endpoints. However, at this time, only a few API endpoints are supported by the library. These
//! include a few endpoints relating to retrieving user information, such as recent tracks, or general user
//! information retrieval. For all supported API endpoints, please check the Modules section; all API endpoints
//! are organized by data type and/or category as reflected by the [Last.fm API Documentation].
//!
//! However, it should be noted that the Scrobbling API is not planned to be implemented in the near future,
//! as music / media players written in Rust are fairly uncommon at this time, meaning support for scrobbling
//! is not currently a high priority for the library. This can always change however, but the main focus is
//! being as robust a library as possible for working with the heavily data-driven Last.fm API endpoints.
//!
//! The `error` and `model` modules are only used for error handling and models that are used across several
//! API endpoints, so they can be ignored, unless you intend on implementing error handling in your application,
//! in which case the error module should be looked at as the module has support for all of Last.fm's error types.
//!
//! # Installation
//!
//! lastfm_rs is very easy to install into a project; just add the following to the `[dependencies]`
//! section of your `Cargo.toml` file in your project's main directory.
//!
//! ```toml
//! lastfm_rs = "0.4"
//! ```
//!
//! [Last.fm API Documentation]: https://www.last.fm/api/intro/
extern crate reqwest;
extern crate serde;
extern crate serde_json;
extern crate url;
use reqwest::{Client as ReqwestClient, Error, Response};
use std::marker::PhantomData;
use url::Url;
pub mod error;
pub mod macros;
pub mod model;
pub mod track;
pub mod user;
pub mod utilities;
const WS_ENDPOINT: &str = "http://ws.audioscrobbler.com/2.0/";
/// The Request Builder.
///
/// This is the main request builder, used for constructing any and all requests to the Last.fm API.
///
/// * `client` - An instance of the Last.fm API client.
/// * `url` - The Last.fm API endpoint URL to feed to the request builder.
/// * `phantom` - An unused parameter, only used to satisfy the type checker.
pub struct RequestBuilder<'a, T: 'a> {
/// An instance of the Last.fm API client.
client: &'a mut Client,
/// The URL containing the Last.fm endpoint to feed to the Request Builder.
url: Url,
/// The type of the data, e.g. UserInfo. Only used to satisfy
/// Rust's type checker.
phantom: PhantomData<&'a T>,
}
/// The Last.fm client.
///
/// The main client, used for interacting with the Last.fm API. This client is where you will use any
/// given API methods / calls, such as when you want to retrieve a user's recent tracks. All of the
/// available methods can be seen below.
///
/// * `api_key` - The API key used to authenticate to the Last.fm API.
/// * `client` - The given `reqwest` client. Used to send API requests.
pub struct Client {
/// The API key used to authenticate with Last.fm.
api_key: String,
/// The `reqwest` client. Used to transmit and receive API requests and responses.
client: ReqwestClient,
}
impl Client {
/// Initializes a new Last.fm API client with a new `reqwest` client set to defaults.
///
/// * `api_key` - The API key used to authenticate with the Last.fm API.
pub fn new(api_key: &str) -> Client {
Client {
api_key: api_key.to_owned(),
client: ReqwestClient::new(),
}
}
/// Initializes a Last.fm API client from a pre-existing reqwest client. This is useful for when
/// you already have a `reqwest` client already initialized and don't need a brand new client to
/// be initialized each time.
///
/// * `client` - The reqwest client to hook into.
/// * `api_key` - The API key used to authenticate with the Last.fm API.
pub fn from_reqwest_client(client: ReqwestClient, api_key: &str) -> Client {
Client {
api_key: api_key.to_owned(),
client,
}
}
/// Build a new URL with the given query parameters pointing to a given Last.fm API endpoint.
async fn build_url(&self, params: Vec<(&str, &str)>) -> Url {
let mut url = Url::parse(WS_ENDPOINT).unwrap();
url.query_pairs_mut().clear().append_pair("api_key", &*self.api_key).append_pair("format", "json");
for (key, value) in params {
url.query_pairs_mut().append_pair(key, value);
}
url
}
/// Send a GET request to the provided [`Url`].
///
/// [`Url`]: url::Url
async fn request(&mut self, url: &Url) -> Result<Response, Error> { self.client.get(url.as_str()).send().await }
}
| 41.762712 | 116 | 0.680398 |
ef9cbd794837ebca4dc3eabd67f5257125dd12ae | 701 | /// This binary runs integration tests between Lighthouse and execution engines.
///
/// It will first attempt to build any supported integration clients, then it will run tests.
///
/// A return code of `0` indicates the tests succeeded.
mod build_geth;
mod execution_engine;
mod genesis_json;
mod test_rig;
use execution_engine::Geth;
use test_rig::TestRig;
/// Set to `false` to send logs to the console during tests. Logs are useful when debugging.
const SUPPRESS_LOGS: bool = false;
fn main() {
if cfg!(windows) {
panic!("windows is not supported, only linux");
}
test_geth()
}
fn test_geth() {
build_geth::build();
TestRig::new(Geth).perform_tests_blocking();
}
| 24.172414 | 93 | 0.710414 |
08b10ddbfff2b82dfadcb4f0ce3cc4c1d9262456 | 71,229 | solana_sdk::pubkeys!(
testnet_validators,
[
"123vij84ecQEKUvQ7gYMKxKwKF6PbYSzCzzURYA4xULY",
"12NG4Y7LGq8VQgtDZxn82ok4eGUEqmhzKh3wZtsakjLN",
"12Y25eHzGPaK5R5DjQ1kgJWuVd7zrtQ7cmaMPfmacsJV",
"234u57PuEif5LkTBwS7rHzu1XF5VWg79ddLLDkYBh44Q",
"23SUe5fzmLws1M58AnGnvnUBRUKJmzCpnFQwv4M4b9Er",
"25UM59KCvciYwhjCq7t1rC8ZuvsxQBC2QRcaRNfq7xML",
"27SB7d27xvtBJjgsAV8JBDjQroySmZepiNSepeRbRhe9",
"28LgQ7MeEZVgNJfYRc6UnoAz2SnSjKbyCKM6sntCRotb",
"295DP6WSsiJ3oLNPhZ3oSZUA6gwbGe7KDoHWRnRBZAHu",
"29Xwdi4HpBr1u9EAqDz3tBbMuwqBuczLPuVe2gGkg7ZF",
"2bQyrSEPaQ9BMbu7Ftv7ye1fxtSLW3oZRj4d2U64AJmc",
"2BT25HZHpyzYmTbqyqzxBK7YjjH4a6aZ783TEgTTGYo5",
"2bvUqyatpaDLn9ch9BxNYRwg5SFpq3rDfWJKfohC9iD4",
"2C26iHJcU5dqJJQ6NME3Lq583RT1Js9QDtgfmzknRajc",
"2CGskjnksG9YwAFMJkPDwsKx1iRAXJSzfpAxyoWzGj6M",
"2D1oCLRK6geGhV5RyZ52JD9Qzqt311AEH1XrTjZdzbRh",
"2dm9YbgXtR5yimmgsLkfaMLcNZxhjywW4bLnvChms3tb",
"2DvsPbbKrBaJm7SbdVvRjZL1NGCU3MwciGCoCw42fTMu",
"2dYoJ9T45W6N8bCaccesfJ3i2diazR7YkyeLMX4okKth",
"2eoKP1tzZkkXWexUY7XHLSNbo9DbuFGBssfhp8zCcdwH",
"2EQekczAd7QmnWhmPpbhCCsWeFooqZmQqHNz8qo8S8RA",
"2FCxeG7mBYy2kpYgvgTZLCYXnaqDc4hGwYunhzHmEUD1",
"2GAdxV8QafdRnkTwy9AuX8HvVcNME6JqK2yANaDunhXp",
"2GAtWPFNhEfj6nmXJp2YqPhSY55TvLeZpLRq9t3hqi1F",
"2gV5onEfn8KmtZ3Lck39GrNEZyTxJ1RiNV5s7fRdC3gc",
"2ibbdJtxwzzzhK3zc7FR3cfea2ATHwCJ8ybcG7WzKtBd",
"2iczkZceGZQqimksY8uk6NLrQXoMFZGK1mTWos4QnZ3a",
"2jrM8c8ZhpX9CovseJ2sRsrU9yzsFrP7d3gCi5ESU5Rs",
"2JT1SRSm61vvHKErY2PCnHUtMsumoh69jrC7bojd9f1x",
"2jypS1SoX6MLEfuNvUH23K7UU3BsRu3vBphcd7BVkEpj",
"2jYzt9Ly7dNzNpMV9sbHBNuwMEvVdSi9L8yJCTJT21ki",
"2K2UhSoUNNi7Q78F5QCpcutRaifTjckGYKLpsw7i9noa",
"2khFqurxeMKKfhFJ9dfas1L9LsHwt2qHGW8Ztinzoeob",
"2LsRRgttA1PKXXeKTZP2QhetgM94Dj5uecmTzyQkTvXK",
"2mEvgikTj9SRL59MXogmYfyAR9ze51HV5dCgPUEj6V5t",
"2Pik6jn6yLQVi8jmwvZCibTygPWvhh3pXoGJrGT3eVGf",
"2PSBMBFVykqSc4MFQ3krJVRosjmixSbNeNJfGMmL1DFp",
"2qYB7wEBJ1UstVNZLbWyvTXNfocSsWEh7cFaTLkqsvfR",
"2RLf3RSy1ScBFL5UzVDw3jYKCAuGA9vHpr9dnbQzJt8V",
"2ryEov5c84JLWwt5Ub1xGYWux1LF63j7kaNRc6jiv4As",
"2RYnM1C5XuzWzZu4sD7TyJTgxQTKzFVHG6jNtbK65q2y",
"2tvTYUajoBgeAd66Zhd5Nc2BgKGCgdmasR94fpBokzop",
"2tZoLFgcbeW8Howq8QMRnExvuwHFUeEnx9ZhHq2qX77E",
"2URaCX9G2dKKrtwygpZemfTpDivpSNT31T25vX4YGR4R",
"2VzCLy98rzmvKGo23e1eM4LANCt9JFrtVTBBMZzGT4FW",
"2X5JSTLN9m2wm3ejCxfWRNMieuC2VMtaMWSoqLPbC4Pq",
"2XAHomUvH3LFjYSxzSfcbwS73JgynpQHfapMNMJ8isL9",
"2xFjhfxTKGVvGDXLwroqGiKNEF3KCSFaCRVLHfpsiPgd",
"2XP9MfzWQnX3DiAJQFSKXKyqBr4M7GKhHnKc9P7z519H",
"2yDwZer11v2TTj86WeHzRDpE4HJVbyJ3fJ8H4AkUtWTc",
"2YhQPcbz1NYuu1B7Kpxi2NGrpZ8Jdv9azw8Jf6E9FC71",
"2YLPihCDxqztR5be69jhoNDPMxV6KeTJ2X2LtVBXDgp4",
"2ZeChc7Res7fUVdcGCDTJfRd9N8R21hiBPLAuJsqHHwh",
"2ZETk6Sy3wdrbTRRCFa6u1gzNjg59B5yFJwjiACC6Evc",
"2zHkPFBSxWF4Bc6P7XHaZMJLfBqtSgfDCBqTZ7STXE1a",
"2ZuLSKq6t5nqRLp3GqtRSttu7FE9if2nrMegHLnp5skK",
"2ZZkgKcBfp4tW8qCLj2yjxRYh9CuvEVJWb6e2KKS91Mj",
"31cud34DHkL5zM4ZiHXgsrkAu13Jeck7ahvkPU9i4Jze",
"33kmrfcRXVtWZxmVJ1GTsPNKpBXWTj1tv65wTVTiifyw",
"33LfdA2yKS6m7E8pSanrKTKYMhpYHEGaSWtNNB5s7xnm",
"34D4nS1eywoA1wiwcgrBP8Ewj9NXyaZ3dP9DJKfkvpGn",
"368KipD4nBzVs4AizHj1iU4TErSSqmZaNGVyzHx8TVXM",
"37Gr1zVPr79E3AdPFj8EMyKZYt7Bnz3VWKjdFctQC8fL",
"383CX582368Zao48isK6qp33xBphCvhfgdEowfiXeG1k",
"38zXVD94Hp71ftxT5JqeTYzTGECk1BmiSJVoXzbS7jsR",
"39FH4cnkSawRtr9N2VbUVST4o6ZiixW2K4QCzLqW8tMg",
"39moskfERPLyaspZAZNsbrXgiUxWGGgyjPWu1ZaN2dJv",
"3ANJb42D3pkVtntgT6VtW2cD3icGVyoHi2NGwtXYHQAs",
"3ckQZncmgmS1aZCC7Eot659hoBQ8k85ExddD7fu9qJ2c",
"3Df9iVRoSkX3YZ6GmMexeLSX8vyt8BADpHn571KfQSWa",
"3dVEmuYaJxLdLNGnNxCuLtELp38VKMxJPY6gUJScu6Jc",
"3F6wMGB5C5fB7xtTMmJ8NAfkwmWSaQgxPsBUBgveZvrQ",
"3FhfNWGiqDsy4xAXiS74WUb5GLfK7FVnn6kxt3CYLgvr",
"3Fiu2KFBf3BoT9REvsFbpb7L1vTSs7jnmuDrk4vZ9DNE",
"3g7c1Mufk7Bi8Kk4wKTGQ3eLwfHYqc7ySpP46fqEMsEC",
"3HitRjngqhAgVuNdFwtR1Lp5tQavbJri8MvKUq5Jpw1N",
"3i7sS5McrJ7EzU8nbdA5rcXT9kNiSxLxhwyfuxbsDvBj",
"3iPu9xQ3mCFmqME9ZajuZbFHjwagAxhgfTxnc4pWbEBC",
"3jddze9ZFYxTfVkV7xr78TSSkLahj6Zf1G3TUrhnWQuw",
"3JeLfM9V7CkK1D8W8LFirU65AZsWRo127PumwNc3dFQi",
"3JfoYf6wmQxhpry1L61dnDWYJbL7GYi4yt7mybehuhne",
"3kWT2K2HfxrspLFoJhKUAio3QF85EuTemJKTUcPEjm7m",
"3LtAt3iqmeTgJ3GD8DtCcjkRkJdDKAF42nJytn28syeP",
"3LWv8RrdEyMtePAMCmohBzWAz7fmN7Cf2ctSUxJKEQnS",
"3mx22d1aJLazEutJyHVszdwyLJcrRo26EKB4AWDbRxRc",
"3NchsxHzVUAv6MTGEuAVt8QRdi93uHGNRmS9AEiZkMVh",
"3NdgfZXaj83dKoskxY2LPyQmrjtF3ikCmfCJeKTdFqaB",
"3nL1oAkcW4M88VG4D78dNxHrqaNdKyJqKW3wbhhBjhig",
"3nvAV4PVG2w1F9GDh3YMnhYNvEEzV3LRMJ5e6bMYcULk",
"3NyhrTWkxdLmi1nuW7Xo7tfqSKrsM6dxQkZzhs7Z1JWx",
"3oaf5Y78LHEt38Bcb8bBtabCBySWyyEor7LN8hGwHLLU",
"3oSpYov4ngdWKcZBnxxSPTe2KmeC5s3iKTi518KP9exz",
"3pqniPoVa85STVdvSDpKiaAqvWrMWLgsbJaNos5mvnYk",
"3PVz8crz85wgqgudf6mxws2psgKc4kr51MhfmU6VekEG",
"3pzTi41c6NAbZyTiyPEAQtmi2K5MyWZJMxx6nDvWPgnQ",
"3qaaXFYh389e1Ncboc7qbCWxSQdbaiYuTFrJVYuh7jo2",
"3QK8tbsVSwU6xRzLWhVFJCcnqm9WPxSUdaa7cXzBQZZh",
"3QuBhrNbo47ywuK2TmAbvKp331kNDr1up5HM4J6KKYwC",
"3rFxX6D68YhDpF7c6vDt2yhfp8CXXcjNNga43cCJ8Ww9",
"3SYNAWbZuuWMYKHwL73Gpeo1ySqRSshf5WDoW9vYVz9V",
"3Teu85ACyEKeqaoFt7ZTfGw256kdYGCcJXkMA5AbMfp4",
"3uF82ATbSzKFpAZKQ9LV4BziPsKycMeUZCzaWfL6rSbU",
"3viEMMqkPRBiAKXB3Y7yH5GbzqtRn3NmnLPi8JsZmLQw",
"3W4fe5WTAS4iPzBhjGP8a1LHBTx8vbscqThXT1THqEGC",
"3w6hQh7Ndx93eqbaEMLyR3BwqtRxT2XVumavvU93mcRk",
"3Wyqj2cgKYK2sSSb3wVv3wJ5yD3yigV8iLLttkZfKn8d",
"3wz211BhQAE2n5fjDQSStM2iSizhNRyJDNRkDEc1YwMF",
"3wZB4hqoLXqBuzFun2sBstu72RgYnR37RWk8YnqzEBLa",
"3X5fSxjnJ3Y626cGNosRokUW3nYdTpvZeheGpSjD5uEb",
"3xKsqGgLMNVazzNBsKa9TPG2Vo5fGLr1xkKrTMVXVVkT",
"3xUTkgPKNJZ3dkpDMV8zWV34BkmvKanguKipv6M9x2Mt",
"41NQTqFPCwdiiGMibSExCNJ95ALFBkcxJtvQw2S41oLc",
"42oRgGrFtPHPdw28dw9HiJEKTc7WVJh1ND3dDc2m2UWm",
"45aGtJWVx9xbhp11diPithdQS1E9Hzjm5b5HEpAM68Ax",
"45tyReiehTPZ7rYq35khyF1bnanHPHavNrRj5QH8HnAc",
"473ToSs8wTyGd2DTmwb1zNkr7TweNC1Wfui2FzKNB1JE",
"4ajWybNN1XqaapKEEiz4MPMyCP7Ppuw7FMQwQ57o7gFZ",
"4bLyjRauEjdJGb86g9V9p2ysveMFZTJiDZZmg8Bj29ss",
"4BWRooHWbpeBRmjb8y3HgtkMcK5MWVAKce2dSXnTP5Tr",
"4Bx5bzjmPrU1g74AHfYpTMXvspBt8GnvZVQW3ba9z4Af",
"4Bzp9fzcdjctbdo23SCwCEkPeQzCeyTb3WtwiK3KNVRc",
"4cxKnptRvBHYMqUX5hsbEBcj2GmoAxmJGYoqN5YgfUQp",
"4dT3ySaMTMyG7wssPNYUixRBxfkbWTbnsoaWLzfwUinU",
"4dWYFeMhh2Q6bqXdV7CCd4mJC81im2k6CXCBKVPShXjT",
"4eyn57baA11sgvkQafTcrwJ9qVs6QptXBahf43Li1jKc",
"4fBQr617DmhjekLFckh2JkGWNboKQbpRchNrXwDQdjSv",
"4FZSiJpGgprsVxkzc2F8v3bgnRpk8Ez1Dq7ohXwY1q9V",
"4g5gX1mmFGGragqYQ1AsRpB8ZJvwCoUKVT5LtKTDrNSp",
"4gEKnFpiJ8XC6DdFw4D65uYQeMF8x7KDqMrBPrDVjMPb",
"4GhLBaxr1oEHWpoGnWh3mcRXUkBU5EEQZv3L27c7ohoq",
"4gMboaRFTTxQ6iPoH3NmxLw6Ux3SEAGkQjfrBT1suDZd",
"4hDEtsHXAf6TMBNJHogmN5noitFzxGxKAs5YwsKZzrDd",
"4Jb1YfUUN1xxdYb28wPLT6A52j459uLNBJaetpk3vAKE",
"4jZMrzWGfMHDRkEBqwnx1cPR6uP3i8v2EaKALzi7bYbc",
"4JZsGW4WUSjAjH4joCaAAVnNi5ERfHr93YUDxmHZpDM7",
"4keeN1kQVHQFKBJvZNKjmLHpLvZMEM3rtrVhwM23Sbgr",
"4LyxkXdt9cwze6MkBY71pKdV2S3SxzNBYoUvNkDH9hBR",
"4N8tZu9Yn9AkkpJCYVSH2o7jCHMThkeG8SRNc3ThnrFL",
"4Nh8T1d4YBZHEuQNRmFbLXPT5HbWicqPxGeKZ5SdAr4i",
"4nu5rdaXjhXHniTtVG5ZEZbU3NBZsnbTL6Ug1zcTAfop",
"4oNUWNoSNnwghHBCGsuAaQEuaB6oZEXE2w4VNhRxoaQc",
"4pZjWxF6277CRncZjggHdiDN96juPucZHg537d2km4f9",
"4QNekaDqrLmUENqkVhGCJrgHziPxkX9kridbKwunx9su",
"4rGW4pdnjvMi22PnCdDBGxX5ChTrMcyFSF3T2xHUEQvC",
"4sRKUyYwqmc38TpPGmkbLfjKkyNBGEBaiYJaMCYfkUBh",
"4STBf6muaBxEx8e3wcUT1DwVq5UuXxXYjWPcNXecpdE5",
"4u2qTnf4QVC8PcgNFPBwY2PwdkiMa4jb3KnNZo4zZbtV",
"4uykzcDWW8wnVWMXXgh2RqXaddSVsx8TNvpJV7eACXbz",
"4veSBAABaESW2WpnJzcdNcduopX7X1f63KziC24FhQee",
"4vgoKb76Z2vj9V9z7hoQpZkkwJrkL1z35LWNd9EXSi2o",
"4vXPjSaZfydRqhnM85uFqDWqYcFyA744R2tjZQN8Nff4",
"4WufhXsUhPc7cdHXYxxDrYZVVLKa9jCDGC4ccfmuBvu2",
"4X1qvzrv95EfE6x3i13iokffWBLQGT3t2ZkGaKmERX7r",
"4XWxphAh1Ji9p3dYMNRNtW3sbmr5Z1cvsGyJXJx5Jvfy",
"4YGgmwyqztpJeAi3pzHQ4Gf9cWrMHCjZaWeWoCK6zz6X",
"4ZtE2XX6oQThPpdjwKXVMphTTZctbWwYxmcCV6xR11RT",
"512wm7UysDB8PNwWpjMBmRgYHdQAoj7o6EDJ9CUyK2kb",
"518q2YT5TjpwZM3sLSTk58VVmdYkF86abh7GGyoUaHZ",
"52MERCkzgb4icyneihfLaeaqhWZYPxYH8fyJLEddnYXY",
"55nmQ8gdWpNW5tLPoBPsqDkLm1W24cmY5DbMMXZKSP8U",
"55ofKaF1xdfgC9mB4zUhrffdx7CVoxTbNo7GeQLyj3YL",
"57DPUrAncC4BUY7KBqRMCQUt4eQeMaJWpmLQwsL35ojZ",
"58J9ucd9Qc6gMD8QHh2sHTyJyD8kdjHRQZkEAyAZ72YA",
"58ktQdWmK3D3VxX1smCXhHGKdNNEJ93PGpXtX8RLdLHL",
"58M2W8tybgWy6pJVqk7tT7YF7C3rmUxVM4MWN7LG6m7D",
"59WHuha1QunWmupWhFA4vr3WMUe8BLN7dc8HUsJ4YC86",
"5aGEHgWCyHNxCcNMHP5TDddUkT5uXGpuwBfonE13jnMB",
"5B5zutCiy12JLoi6urb7uX9rZzwUhYH4VmiDobMB42dU",
"5BFP2FY7kdV3ogDrKf9UtKphrRSd3kNGC7p3q17i5rSR",
"5Cf18uw63TPsS8XZ2gHiQKzxPh7i5axu6knFfAXFDEUe",
"5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on",
"5dLMRyPWx6rdPGZpZ7uuZiqry96dUT5yz48u62Gzugi6",
"5DsrdX4xPok2YNHUEtQsRuyAkDcdSBPXM74ezfRgy8Vm",
"5EamRRDR1j78iE2Q1TUmoDQRw59m2GTs8QJWtnTZsKf8",
"5Fa9zxgMJV23ShcXDYnGiUstjzeTNgeDaGDAYWBmEK3R",
"5fcmYHLur87RDZDmdXi6jDEuWKVeg9KcNHBu1UNqPjHU",
"5fnyGEnVu3nyMrUysGQLXz38QH51VNtmYGSA99197xCX",
"5FPQXMJxXKJmuShQCiTRYPzXL9LBfgphprhXR54pr2eu",
"5H3sMCaSJdN2k1hyuFTzq2BrZHUq7CinTa82hJS6EDTf",
"5hwtqBGMzoat2a7dzaPXMLKNPZUcsFm2jAko8Kx6tJLk",
"5idH3j6ugKTqSx6WHZax9LmVyu3MQ3sy1zsNqPv9mEgh",
"5jAKgxnCLVrb5zdDxjnRotwNirVG26Set4ZZ6BWC6Sx",
"5jLw8DGMmjwaCJWbkT3dksXVEdWrXzQtiBd2TfsF1J1H",
"5KFXF9DS2ETQVfUTAfMygw6LNbqiXWXYr4y2k1kYr9bA",
"5KZRD6hDCZd1dgM9rJLweFFRkHu3qAgBhHgQ96Wz1VSn",
"5m28zJcp7CsTrH2szyNQhygvDis3dPwbgrtYsWi3J4jN",
"5Mbpdczvb4nSC33AWXmh6wmDxSZpGRANNcZypdPSGv9y",
"5MGfrpdVsyifhn2x62j6PnBWQk2c5xTUK1o8KFrwPLWG",
"5mwyVeNzQx6CGnNSN6jPMFdui9LDvmFQBHcpHNNvDrMc",
"5NH47Zk9NAzfbtqNpUtn8CQgNZeZE88aa2NRpfe7DyTD",
"5NLjk9HANo3C9kRfxu63h2vZUD1cER2LacWD7idoJtKF",
"5NorYZBbtgbouD3toX3761ZGbaYTWrNSDNci4G4zV8eo",
"5nR5ktqmZufaVuK8N8nNoqVrQqopL6qAnf7YNvsjynhz",
"5nVDe1R4QW8XcaWrDUo88tG1V8CgAV2BqWpCX4mF49TE",
"5ogMBk74DTpRaEahTtBrrsFN5mcZ2cfmZfPsJMhJm31t",
"5oVky3o3pNbZfWndUBJbxH82ZDqaUx7k1CorxfisKWZt",
"5p3Y7UV2oZrSTTSLJzJknEzqQpetmk2NB2hQEKPc43dC",
"5PLDu8auwqtMkHW9zdPsfUcvyESZ45umFc4r8cWUw3Zp",
"5qsT9h2TuTMPLtX6gcD2DG6mcZwechxmWhKRFHbCt6Pu",
"5rxRt2GVpSUFJTqQ5E4urqJCDbcBPakb46t6URyxQ5Za",
"5SAMpCcejTXQMnbrtkNv6nSxqaYgjRbk733QNzc4teJC",
"5sjVVuHD9wgBgXDEWsPajQrJvdTPh9ed9MydCgmUVsec",
"5sjXEuFCerACmhdyhSmxGLD7TfvmXcg2XnPQP2o25kYT",
"5TkrtJfHoX85sti8xSVvfggVV9SDvhjYjiXe9PqMJVN9",
"5TZbMUkDaxxbyhkpgMQHZQCyvHAmsg9ZyDHf4R26qrap",
"5UBdwqhphJ54qRFg6G9iRZzuaSmbFmwy2KyE28JMLbYU",
"5ueaf3XmwPAqk92VvUvQfFvwY1XycV4ZFoznxffUz3Hh",
"5unroM4ZHe4ysnprhGrsHBUMsCbkfAHU1Z4rMtosbL26",
"5uTcsQSrUffYo6RYSWj75SuGMkJ4v9x5RYuQoTc5aWGR",
"5vaCfp7UEpW5qdJYyVH4m93oMzzyzTqXdbr7xLGobY8q",
"5vdpdDS5vvUrPTpGq8zDWmirYheKHq8RWrQfUrbarN29",
"5vfvM4qv8UERxSU4qjKhcyJYgfvBwxM3zotkbyXg5z4z",
"5vKzPeQeveU8qnvgaECkdVdBks6MxTWPWe48ZMeC6fdg",
"5vxoRv2P12q4K4cWPCJkvPjg6jYnuCYxzF3juJZJiwba",
"67VDb2iEdx6XjCfBLXhUgKQQjTuLe9X2eLqTq5nBjUTy",
"68ZxNmvRkaLLeNdqkKWKKSCmFiF28Zcy4xVHN5QZuzY3",
"69k73WLdHRge7E3vCUiDx7Dkm1DQSBBGAu9FqNj4AeJD",
"6AaA8HJGpYK9RDN5NQjDJfHPcqX63hnw3NXEa9rTXbEs",
"6AzAaGnhw5E9Nbkk4e7uhRFEtTVKPYwxWsLhVL8wPDuU",
"6bkTEGxMJf18WBT7QDjs39aEEmN39mQFFQfihyQ2yJkU",
"6C2zZ5hac7V9KzdwoGeRFjVpAgPPbXvxGrcRtq2Pf2CG",
"6DmskQV9ricrKpuGuHzyWmdsJcHPauvbPALdbsJxM9GB",
"6Dr57RWT2ctMt2XiQxj9Nec5mBrfjucfAyh8hWQE9cp9",
"6dr7c5k6SsFRFfmoNqADxZQsvPjPjg4meeEHVX8cn6HU",
"6E5NygCNcfyPHkLbHMckzF25cgQoxN3DfMqH9bwyQRpf",
"6FWhS2CHjtCf81GMsqHRXQqDUh3UKyyWGF15QGCWWb7Q",
"6j7DvYDyFTdrK99apFuuT8w2WaeaezfwLDLk8Em8sB2m",
"6jhfZV63yGmPM4djkHYNZ5eTxmuMxmi55UkJBDeMjUqL",
"6Ku6Cj3Y3FETU6JEuwjpLLu65CnKhi5YGtKdUTRGud7i",
"6Kwr8fUZPmSFNWaXfRL7e7v38itt276DFVu7RiYn8oW5",
"6m8LGKXMT5QrRQdQsQAd2VHpYwJZebbrc48WgkPWeRYc",
"6MEyD2RCk74jqYDrmuoDqor7KQca8B3pyRDw91ynmgki",
"6Mi3Q2eNUCess79xZJwGMhYYwzNo5Tr24Xbw3QhEWyFt",
"6nrkRvzUpTst8teZJawMFFHrmixJ2sxAUxPKrqoGwCB8",
"6oB8HATu5ApWMWhFpE4Ms5XMNcKmjk83VpcND5U1vHof",
"6p9f4oyPCR3iyAW69DjndFuggRt6KFp22RVaywthff9W",
"6PdBqw4p1iaNd3CYg18THHpzDBuophRUk3qSFy3KNTuD",
"6PkpXpQeLMp45TC1PTPUhCpywcCxMcmUXvH3dNPMXQvo",
"6PojfJo3DkFNHsybGRQr3E7DmtUPvnnUdeFFgN2MBkKE",
"6PwxMMGLFnAf9sjMHfVr15z9fntjYTNPxJ7gFhkFxkXi",
"6qJPxxgZHCQKBvbGC9zCsuuPtHMMLszVCoiCvEhVULyJ",
"6rBEm4eWXATTETo1ncXYiAJBLwh1VtMBiRwMLRY75yR7",
"6t8zWy766tsHBVNxhMwsTGiEYkGtjaZncRU3vcSEYtHU",
"6TkKqq15wXjqEjNg9zqTKADwuVATR9dW3rkNnsYme1ea",
"6tptWLfq3o2Q5M74ZJLpGGpA9jCAScHSaun5aQVTtp1h",
"6vx5vGgqAa9dRaJpbViCNDjzxp6EyGV38YMYbNDqTzLr",
"6vZuaLY4n4GP9DVroymfZ4D1oP6xpgF1ExLMqHQbt32L",
"6W2xi4iCGU8eTMCGtG3DQXgMGurXFnd5iVXCY5Sq7AbF",
"6w8Gxzq1AusnWxrnBH49wkWVemp7MPxXftfyUQy67yJZ",
"6WwCWBHYvNXnDswu6qrHbKoXMqtB1ZwRCD2U3oqWbZmB",
"6YpwLjgXcMWAj29govWQr87kaAGKS7CnoqWsEDJE4h8P",
"6zCt5z72rfN9sRk2hTgc1LeFDbEBfXYmW6xtSNmgyama",
"6zdz3xCHLqu9uruKskX24eztqWUchf4P8YjY3pMnwCY9",
"6ZEbKFxTjEKGC9HUqzy9z4ccJ8Aq3ktPKEzHGDosQJo4",
"6ZGwooZQdrHZNjGP7Lrye4JuStqeLJf7wwSgLcZFNciA",
"71bhKKL89U3dNHzuZVZ7KarqV6XtHEgjXjvJTsguD11B",
"721wicnref9s6HiU1ZWmYTWp3ZRonzHWRxAPSc2Ceu9K",
"731Lnc3mbXpquV8FmFnTL8EE36uoZRiDMUKXehwZq8x2",
"74U9LiSPv2gb8bBZSh5uVNf89W4wZ8zs9B8EvRVVwr87",
"75ApR5PzaMtcx2DhzZvkj3MDRQWsDCjcuEzWsygboVix",
"75yzn7njq32yY1ieCZxFNVFZQWtEbQHpaaG6dSZFfmX5",
"77uXenX1Y9T2D1pcnHnYsYiwTTHbnzkyrKX5fQFMGVCR",
"787PK2WaCUZCyYEmuYQSGmoxu7MyqK1usn43FfiVwhcB",
"7AbNcvhBBHeL3LFhULonjidYSKKaZzyoiM6dDv8zQpjo",
"7arfejY2YxX9QrmzHrhu3rG3HofjMqKtfBzQLf8s3Wop",
"7AW5VGSNcaECGKJD2C4rpRuWpcT4kdAHrbahc7KFQM3p",
"7bFx5g3sh5CqupFYtch3J1RdZBZs29HtpXAWyPPyptB3",
"7BS1RfipQ7zwuKAdiUX5CNFCKNEdk82TN2C3CmoXR4ux",
"7C3FrWyhFGc75WgccpnpuuCRSqpZiWpvj6d7U7jScSKU",
"7Cj9XfthjKp4KxccS8tV5RmjZtucBuyyXJDRovLkyFmS",
"7cY1beonNGzrqUk4pNWErm2vYcyw5yyLqwnrEHr6iKmu",
"7dEjSFnrm66CJ7Aj5mC1hsYmMzmGgWPr6iZNhcvANZ1w",
"7DU3QwALuHzaDU5YVWK1BgeFaPo4TJ7QPAifNHCHBDBC",
"7EucomZSKvQdiZLvra8hLszL1kRYiGewxyMJnyyzdbH7",
"7F2vcJca5ewzdJUNcVMKCLVYneq6CX9JFMH1U7JeVG5",
"7FnrBgjPb1y8PNjzRLihQWUvky37F7wkvRb7MUL89Q8P",
"7FVCgatxKrX34VwM4YRhUVdXsJAoB5Kk3EGWW5M2Nqub",
"7hAddyJcvQAS6SsfRKLJzYPuq4h1XykRSJEUmr64p8oF",
"7hifPeGJ4YFHh935XngjGuv67PHruoBxiqeALMqqjDnz",
"7JFfCpPEodnt6SWY41ePBRXR6LUGiKhLSKJNw9ZYjdah",
"7K32uTNK2zJwp5WTt4t57qJMf1JnHBq2HcSkc4oV5sQb",
"7kmwiz4wbzf1kUSZmKKzaJRxybGeDMLSqhR9s2FebhoY",
"7mmRxJNttYcJVNsJmiLjTHYmNDt32EpzUSgLDoBsTKKK",
"7NPcRcHu3jACoQf54nkRBLgdn7zBbUYhnsdC4VHqBQwK",
"7qFWjQLAYeSGf7MBFo9LUYEiFN4LpMagwFu4MwAUueVn",
"7RTTKCWBJ2XwtSHkUfpwBTH7SsdKqHrWfnD9Dv4z2Wyw",
"7RUobwC33EbHaWWR2sbdaJhT8x8PpgUoAbJQYrQqrSgQ",
"7scarR3Z5obfefZr8bPKYoMNipua43K35AJAc1YchQBK",
"7sEpbQB3Dryn5JhQVCGWoGgUfYwNEZzjPNa1Tu9mVa5p",
"7suRNpX7bJsXphHJtBv4ZsLjJZ1dTGeX256pLqJZdEAm",
"7T5ZekSsBSgLNKVzQmCRQ5iqL5ycprREa1tz3GYmb4eT",
"7TcmJn12spW6KQJp4fvvo45d1hpxS8EnLjKMxihtNZ1V",
"7TG3LLqWYn8ybpqAJaiop1bVb5mPWJSDhxPaLzUdF3M2",
"7X3csFXUN2AZph83GC2FZCpkCTZXfVWssaJ72cpwG96w",
"7xhwT3FQqW88unUPfVQat5F3koUTPSujfwaT41bt51qc",
"7yiFPLeyH2B2p3xGrr2Y5nmk8B52nEaa6j55Nk3u6648",
"7yzVecfpWupdJwVQby3inMggGSotFSnSrhvPGPp6JGdU",
"836riBS2E6qfxjnrTkQdzD1JkFAoQDyUjTmzi38Gg84w",
"84BoHfEzq1FccYEMZPeuTwc68DQC7LS6Bm5dRMsKypA5",
"84GZWtzfKYX1yfstmjA9eUEp3RnWys8DmsPjsd1ay7wv",
"8641M19beXr6FB4zaf6GPYdLaV695xikBLYFYTVEBZdm",
"86hxQGfre1vvVsYP7jPBka8ySrECZQewjG3MfxoCoBYf",
"87VQhN7dUfS9wacre7vqRm561bNUk5PwUB8xmroc2yEw",
"896tvc8WPdR33Q5XYZRxPQkRZaqUv4Mtr1kvFhYPFao1",
"8a3rCvLRcSMGqQvzoVcHYuJptd2o9sK2rt2CKfTtbGnE",
"8ASvX43WvDF4LHe2ioNUbn3A4ZRD9iC6rsxyjo5sVGLp",
"8aZ5CJf9qYnQtT2XYuDKASj2pCiPwhWoNsc2rBqc9s1n",
"8aZtHhTNFhVWp4fV3dUfBwsKKBjqzHDwpTZRbpeqo7vo",
"8bn2BhTzfNEQoMyEPJ2q58hFzQY8GxPkLRFWHMWksLet",
"8Bp1GmdCguUrbJbrhFDL11ZddgC83cPcbKEUbPPPmxsj",
"8caQuNVnmywtQnKWv6j8MzzJ8mrLwJkeGcKEtkQkoFZA",
"8dHEsm9aLBt7q6zu3ESfRXkS2eCwkbbzzynfd2QxDzms",
"8Di25FopYs1crYkwwrwuVJhuEkGj2rSCjcGXyZeBGttK",
"8diJdQj3y4QbkjfnXr95SXoktiJ1ad965ZkeFsmutfyz",
"8EqtKHaSgPskksNFSC8oWzSMT2mdSMMtNjGZ7E3KHxSn",
"8FaFEcUFgvJns6RAU4dso3aTm2qfzZMt2xXtSgCh3kn9",
"8fLtWUfZSpAJk7h4XhvM6TqGjXQxiwzWkymxmGtJoGdu",
"8FRFYPcwBan1KBKR6HuPy152L7pr3ePVYVxXXnWzPjEd",
"8hD1AVXPYCCrSfZGQKc56Lt6zuSLrFxejLFRT1SN2oXC",
"8HkuMwe42b1W1xvxhLoBGgtvM49FtxTFjk4JgDpLpbMq",
"8hpUJeGB6BF1JTZcbiNEgw9w9fdQ8dEi8jF4ohapsq3h",
"8hzgFMZG4WqaaMZ9H84J9fvcxkYDPYnTDFznx9mxHWhP",
"8iorF4s4S4NYYEMwGNJZqBbKoMePdoyYyrozanasEWyZ",
"8itTkbGjHRAx3cum5TD7bXaubmEFGxmKxqe6STrVqLdy",
"8iZ1Qk38z15xMW5ATSPbb42pC7FJdFj8NtbG7uosNdXF",
"8KKQ4QJ7JWAosHwL5pmjKpYWMNSxqtQjJVes2hQezNRQ",
"8LSwP5qYbmuUfKLGwi8XaKJnai9HyZAJTnBovyWebRfd",
"8mK7gCU3DhAG3YQrSN2p3HDM9S9vSd6wddhHKsXyKHvv",
"8nGsjSv92c2n35pPcxZUVytPTpD4WqnZ751MeH42whqZ",
"8NndwQsrH4f6xF6DW1tt7ESMEJpKz346AGqURKMXcNhT",
"8noQwzDhpb67yzfREDKvymKWtSdPZtbfjm3pxPYA4bag",
"8nvD1CUE48WdcmRdvbyWcM5LdJKRTNP3tXT6Qp2CSND5",
"8nvhPfZMet1yWpYzrEJcaXjc6AENvSC9AcwRrXJeMxhK",
"8onJp7KyshoMcxVm5CemhPgGvA1hdSnHbcjLCvJidV8o",
"8oRw7qpj6XgLGXYCDuNoTMCqoJnDd6A8LTpNyqApSfkA",
"8PTjAikKoAybKXcEPnDSoy8wSNNikUBJ1iKawJKQwXnB",
"8pUJmSJo9WXWVz3uf9rUmvQAxnbuCF6gWNeebbjGdyD1",
"8pWmLkuR3yio1Kcu1CqciTPmPMTiCf72h9n6Z1DmQNgk",
"8RsYRsi6f3hiK4EhyLS22Cy5KkrNbuidVYmsaYR1Xx78",
"8SQEcP4FaYQySktNQeyxF3w8pvArx3oMEh7fPrzkN9pu",
"8t6UUXRkQTBpanRoMjxNxio1baXXkEdeLniCVJGMdzLJ",
"8tiCet13nwqVRtG1UbW5Lf4uuj33X16JnHPZssfvPXpE",
"8tk7QMWkXBbzw9AJJtLkrdf8ZnEQMiWmgXx2prk4DoQv",
"8uJR7fEaTiriYLe4YMFkeeADdRkexxm8jkFCGRjPBvJD",
"8Vvc6PNQVbGAXiHssGez6a9ovgu6eyEp8PiTijfzE7nX",
"8wFK4fCAuDoAH1fsgou9yKZPqDMFtJUVoDdkZAAMuhyA",
"8WqBgoVXkVggLVuvZuF5wP8taQpzTuKGoK6brU5s5Hh8",
"8XvoJswfYCxWf2WkUmNBjtDWwonmKugYhxBruNpSfS4a",
"8ybtbfJ6rHeU49gtkQUBhAnaXBYGPdMk8dd4VCPmtbGz",
"8yS3Zc45xptsaay9iaUSpfdb5gaKcQaKAShVvEUFKpeN",
"8ZgmpBG5ixt4LVRQEK538hsKTsJBgmFFH5L6X5e9iPTd",
"8zH1mRkic3WDpUkSgtq1geCXXh4CLVfLrEi2TEqdTgFS",
"8ZjS3d1bQihC3p5voM8by2xi5PoBNxzTJtaQ9rvxUbbB",
"92ZDWNRurKikxrCQcfR9jMMYmqWksgTvSFFJ2Pa5FsMv",
"97vF6NK1NgmvMunNw9QL6ne9wxzUQ5RLAJqWxmDSkKuH",
"99NHmMDJeSo1AM8dg32nTokVRXByoJuA2gjDUDfiKHem",
"9A7aYiEPK5ymzPjniabXofR7jyEyihGAANakUf5AALT7",
"9a95Qsywtw2LKK9GkiR9dpTnoPGMVrLBLTAMPnaUwDjY",
"9B4oF52Web2deG8jdbNbBuM8DjiFyLYM6CTid3dmQkQ6",
"9c2aGPBPGbzw1yeweN1TvC24uEV5oUaGvWfFNJif6npa",
"9cZua5prTSEfednQQc9RkEPpbKDCh1AwnTzv3hE1eq3i",
"9DBdgP1ggAWJULBJXBPMzWvA539BhGVgTuTfebXhfynG",
"9dCMmPpfNuWKyZ2D1iRstMCc1rhr2DbHVFrZ9wFncQjp",
"9dSTVY7hXEJsqExDcD8vYMAZpJ5mt9HBMPwRe94nBwny",
"9fMqL641B7nQZ1xktU35qXuFESxMB7pkqmqVANtovzmE",
"9gmnbM2GUVXiTfCg1Pj3ZTJdqyKdS81kjBWwwnZbS4MR",
"9jAhC6dhjVqVA184dVczcBAar2GtXT7D7LwtXxLji3Re",
"9JKQiQqWkkUKHqnR73MmZ3kdiqQt7d3bEvy81Y5rv6k9",
"9kUAkfKvczyRJMn3cRz7SVnbotSdiTVyCFXkX6qeXmXC",
"9mbQ9mrRjBGiUVnz9Tdf7PuAkWomw3GLZ6Sup3R93Gw8",
"9me8oFZvWuc9cjBuXiW8YDGGZgkitk9GTKYTNCHaBQaF",
"9miqenD7FrGa3a4NNP6ygmYbpxtcAmW3AukuTUbAgG59",
"9nwweyAkLSXutqy13N54c4DMyvgehnkoa72EiwtnBqzB",
"9oKrJ9iiEnCC7bewcRFbcdo4LKL2PhUEqcu8gH2eDbVM",
"9oWDUVn41kNZuVCQBr563sgbLXGvZULKuMr74w7NSkz3",
"9Paysbs5evoh9BiWiS77NNutMCG9koUK2xyAsJm89Rfh",
"9pHNBdibr5ukpX28foKK3UfCMeaB5GyAuGcHyJ5DmUAJ",
"9PqR63RosK5siiSNvHtQMyEKr3CvJt1jh2qxoVmghhst",
"9pZZWsvdWsYiWSrt13MrxCuSigDcKfBzmc58HBfoZuwn",
"9Q8xe8KgzVf2tKwdXgjNaYdJwvChihmjhcHdae7c4jPb",
"9qpfxCUAPyaYPchHgRGXmNDDhPiNmJR39Lfx4A49Uh1P",
"9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv",
"9StH5W5oei8tNy8vrFeu2443Bz52Xr1w7NBmes1xnd2N",
"9TA34Aso9JfisCAsdqtpJ6cukxhDdqyE5xSYYvxpCM6H",
"9tJ8BjHCSYxVMJZNvFLRJj39QPssKoTc2h7Fev9xXTPD",
"9U4fqWRd3kcUHEX2jt1kFwF2dSXLnz9RA6B9W656Skbv",
"9UfKWtaruM2whJNqLLcrxKrSuS3VcVssdbTyNvfQCUpg",
"9wAkySUz2KihVRMUs86DcTNqPqtdNYVHMwFxoH7xbbiM",
"9waRyqWAoP68etU17DdWamgpTnPb3skY7Er9kRZMzCfS",
"9xe4rcxYUe6iADdnvLkWn8K26bvyWgfrp9HYbtwR2sPs",
"9yTy5duSRgxznU3mAfQp29qu7Q6sEQ5B54CedveiM8ru",
"9zkU8suQBdhZVax2DSGNAnyEhEzfEELvA25CJhy5uwnW",
"9ZTaH1QLsw9dcdkG2ZbtdbfzrNVyjmM3Vqxp16PDU8yX",
"A1voPbfnmCq8UBNQTBKnZ3Xbhs2x4cS2Gx2b2wJtqCh1",
"A2wYfDhhcoEvm3M3oehAtFdcwdZcxWvWAnk4yrpzbSS9",
"A3zoxWHVyqHui8y3Z4rKyqWJTyr78tusgAEpAtr4ZEfg",
"A5hMwgm8QfooAuCMw9Rw2S9vXbBwCknFMhhUwKKHvYeJ",
"A5mV39Cb5d6gbrssHvaYjGuurWpUnRGfMMtST5EKxx5m",
"A5oH9BPo6PRnEHmLnhtyN2YXELnbTotEUDB8axHVBcY4",
"A5TXyVrR7WwfNf2RjoN1W4Dw5CuuMDiLV9e77pWhmwAP",
"A8Lv2ZPKKSBFiAiepFsmCBvWEBSVGzuKxSLVt9z62Bqt",
"A91g1Y8xXFEvCGg9afjTn222JDuY7iSVmSg4fdbQEB21",
"A9CwddX4BA8AgPCmcHKAEZU4JDFRzruMFytr9oo5ZzPv",
"A9XUvhm5yKVs9Z3tYdyiAYRx9mNr2rqnv2VkY8D1N4uZ",
"AbF88hkkpZ28VaT3vYn4xu5CeNC8G6Dq9cc8ciRR4fY5",
"AbnagVJhwwM4wDuZbvoxWeofdpSWoDMhcmZCdCrxtCkN",
"AcjhWohnu7vYMdu4Yha63XZupqMKVVnrWmt1F57ScXhG",
"ACPgwKgncgFAm8goFj4dJ5e5mcH3tRy646f7zYPaWEzc",
"ACv5dTk7THbmUpHYGhgPzMhWr7oqHSkuPJpa5RfvmG5H",
"AEPNDgaApdcfZEZpww458Az9i2NZrwxsVCdiUih4EaRh",
"Ag51oCkwGy5rkbGEYrcP9GDjvFGMJrEdLxvedLSTSR15",
"AgFQkQe2Em2GUkDD85qPmHrvybnaXKMa7anSNdCunnM4",
"Ah5arzkbkHTMkzUaD5DiCAC1rzxqPgyQDFTnw8Krwz1V",
"AicKhNhJmkdqafRDjKLPgVqLzXLzJ8pS6aVrYrRkq1iq",
"AiPN5MwTHxRjG4eTQ1nrmxERRj4oXJURHPiTcNpVYcmk",
"AiWqv1dqsbvkUMec7G4DmM88ka7SaoqDPkn5U2iuvqnn",
"Aj6cb5bk1JbCYSHzNrV7xCzECWZY6Ys3VRTNw6vx2XWp",
"AK7ZZx2sdo39coZN5FsPdae2xNGqVKHX2TWJixmY4ecX",
"AkkJv1meyo2Ax2XTXEXWpvHTh4F8a68Lja5dx3TaX47K",
"AKqueA5Vfmf6BWTXuPdWxrYCDNPGi5gDLrNpdc1CSEzy",
"AkVMbJq8pqKEe87uFaxjmt35tX2cNhUJTJwv13iioHu7",
"AM6BNu2WZibZhYYHNo9ZWxmEAB7PhjNQBGKAhhN2VrFt",
"AmNRXJSSaGJrXaYBahD1PBoBR2ApkfLBAJ7SiRK4772R",
"AnMbQV5XNUEwC8HYX5wGkfmM5vo6eCkFUH29PvjtUHWM",
"Aom2EwxRjtcCZBDwqvaZiEZDPgmw4AsPQGVrsLa2srCg",
"AoUwfPuiEek2thVRDhMP7HbQb9rguyab4rDiz2NAfwwA",
"Aqh2c1x2AA59pek7pz8PymDXzq62qmNiQ5GXhpWq3rNr",
"AqhSZXj5TePD1E7d7VrQ5JhBdoa67Kz87kLQXPcfbwaw",
"ArpeD4LKYgza1o6aR5xNTQX3hxeik8URxWNQVpA8wirV",
"AS3rwVs9WR8HTzN7GA4aLBs3JjWjt1yKHfSzmwoqp2bY",
"AsvCrBKwz9Lj44Tp5zkhZabzR4bxE3HLTobrRRGQozcn",
"AT2N17bBBtTAu6ombzhiLNLc8JinjMXmGMzFbxt6AvwC",
"AtHoh3UapTWEBsygTDXNQgphTHcqTj9g4RFpbvPTBDND",
"AX44APbDNnr1J2wqa9yRQyuaiJ9NWmuEjyK51LiBWZWt",
"AxBt2PSuKyxj4muiCyNqByX9uwMG95CFGea8LcacBwPN",
"AXwviAZcTWRS2Et6BuFd2dEVhXFWpXi2et7bBN9CABcD",
"AyXMWbdxpvDoeJmueCBA3B4w9VURpiQu6pbjrwM2z3kR",
"AZFkNiUSszcpsTSAmCWFTcLPe7iQf6sGp4ceV72JiCdt",
"AZq4Y8BQYmqfav53bWKhqNRV3X34UJuybPXADVi67AGg",
"B21L2hCrdE4SDhRi2fHKohfSUNAhoLeaWfBp1C9HdF6Y",
"B2UcYy4WiS1fSYKbMPeAKZoCEzgfQKKt5QBAA8NXLvpZ",
"B4xFUYq2TDmz6PsiCj29vFyvmYvqTRAtnhQ3uSmxQVFd",
"B52Da5MCyTcyVJEsR9RUnbf715YuBAJMxCEEPzyZXgvY",
"B53tbis1864ZZqg9BBdXFAkDponmVvKHwHySAY2tC8gy",
"B5FNFrfrrfpBggFBp9h6Js93mrz1Tham1z1GgP3bDgNc",
"B5WTSSyoxCpkp5tqLH4x479aWFiMoumXkbf6UCiUX8N2",
"B6eeWqfF19AGj2HtEk6jzSPEvpnMZjTvbyh3d7HzRBeH",
"B6PJC25oDXqzS1hhnx5RLTp6SAdQvctrHF4Juyi4pCWE",
"B6ZramCQhQWcq4Vxo3H5y1MYvo37wRZiAwKk88qaYNiF",
"B75YnUyemn7ixtnUtq4cDUVKrFwQmn8J2Er85ypcEJ1c",
"B8T2dbvYaM4gJfpLLbuBScuWKFhnG6KspRK72v5D5uoK",
"B8wQDRb5JLuXjJhAtmY1MAQtLjWQySberTN7wLUHmP2B",
"B9FoWJeAVrDGZbSzWYxsbwGJKrwqMu6vqJHr6JmRdCx3",
"B9gJJ4vMLJvnb5geZjU9PqhkyHX4jESMYajfcALQgRry",
"Bbe9EKucmRtJr2J4dd5Eb5ybQmY7Fm7jYxKXxmmkLFsu",
"BBvV71ncxGrMDjmKTkcvcjipzu3bv6ExyVPPuRxAPtrC",
"Bdd4XhquueXBB7aZXVYUn1XBdJ18G7Wx3LUe6aKkmXEV",
"BebUNmLyM62d4BgE8N88YsJPygWrCWSNaCeq5s2U8uzC",
"BF1f26A9FdL6uWSajjTfstnLdCpynXGVrAEzqUyKXJKd",
"BfjmopwTknigm38Rj2synkw7mNTjgmm6hsCCb1hQetAK",
"BhbARoxdh2MT3vb4awXraZFPzSwBdmF9pGgURKNsjBqC",
"BHR2K2tpc1fowNyUf4PfAumc2tfaT2SpvQVqmmpuN6tF",
"BkTtw74AC3rDKUbFboQaRVnhLEhsUrchotzSvuweaUCH",
"BLfpk2WoF8RnerCqjxHe7qFJjpEE427riMkf2fS6vUB6",
"BLZtwHMTMgnZJdhJQxQaksgJgteXFFDBrA13ywWagji4",
"BMZY98zbjg2ey4XNfhBQXhEuvVqzaJ1T3AKD2quL3wnK",
"Bo9T1z62GVKmnttMz4HxPPtRXs2BUkAd7T7yUsKyG4iA",
"Bpq5BM15n4ps9zftpAiJARqVmAfUsmjSKfMQN7yEZARe",
"BQ7mx4ScgjetA378LnL1Nm3xiM9bbLuEsX7UKxPseRCU",
"BqGCBrgYpLv62ebUp7DKfnjvSJ2qBc783kehzKJDERbv",
"BSF2yD9mqzaixDaLEraF1en82EWaXx7wbaCqSuKppqG5",
"BTPUdVrsgfKFPyBmy5ozHvzMk1QCK9vii79wgxtGhjgn",
"BunxTHgkSEyHHMikCe9ofDB5dsgcXKN6nqKC5WQsd1op",
"BV8sS1jn1AvGAptY5TxNZdcm7aa49MZCXSpXQjzjdnYG",
"BVLVnUm2tkgX1sK2f5oVtTMR1opGra72s4qBD9LjMd9Q",
"BvVKJxCQNsFWpB5o1B6To4uZzqAUgNXLcfbS2inyL5XU",
"BWBZDvXHUsm5WUhDBzA65TqMF2CNSCtow2M7tfAtJA2C",
"BWiftESMUsve87rkjU7HsaA7fkiJRAbv3xZLQrmKZtnz",
"BYxEvmSwA1o3vJGhooJoReSjrzou8A9h1TZX9S4xomHP",
"BZCeZyvroBrSq2SbwjQKU41kcxkutUti1A7rzZqCfaHK",
"BzwAtaU5sqkd2wALkiR7A7TimJzN2Df6vr81tMWmiRky",
"C1QUyFjgVeG2mNBHErtmCLz8BUqS38saMUz8KA8r7921",
"C3hD8Q7dLoodUm6E6LTWR3XqJgcRrvrVaMscwMBV6vaV",
"C4eTa4tqvvzpTsp9pa5NKAbeDXJs6sHWS5BfxGB44Xex",
"C4N1bMSzbfDwHGMitxyufNZPaAkNYx8vJxHRnHWrptAT",
"C5HvMeXdHGxi7nVTFPF6KcyK77RSWLLvEEB3ParXoK1F",
"C8VJytJbZM7KFMXHNUdoF4V7V2QbhkxNs1qYybRoqUEK",
"C8x8gRPxVQd1rk9VG7fm5MqtbPkTF7C9R7NUvb8HJ6xo",
"Ca8DQQagVHeUAhWPWxGCmaMuccr6aGsm9HxeedxUKBC7",
"CaT9dSx37Quj1kcAXEVd6ncM6NLvYUSqhtgEnn1JtNKC",
"CbhvvtosVdVwZ8GVrBqgYT3JrXLh8JRqgKpimhnZw31h",
"CcorN3BoG1XMZehZ9Xib9YLo4mcvo7pzeVurC28gYYqX",
"Ccq6zHdtv3DWCP4AccTi4Ya2xPGsEVHSfoPmQ1qffb8H",
"CdgoKJdFPyqLXNmTFjiXSyrUefmUnjhdQr2kpTvDBfe9",
"CfhJ45Zx4Jod7LydzyrDyrywhmreZMWMZaT4p8YtdaWk",
"CFtGf5wQ7jPgJVSk4GiVxvqVZXfkpxzdnkFJGduUKA88",
"CGgiEmA5whBdjKKyJVgFEBe2Z2qDVQQd2rMvAaUJP6Yt",
"ChLMXZ4KXsMpa8W1VymMxim1vdPSK5a1jDwfMbm7cycT",
"ChorusM5BVgnAKbg9PF15285LkqeCoZWK2p9s35T7J2A",
"CiY5RjWPs1XyegKyBLcG7Ue7YMf98eiEnmvqnSuSKbob",
"CjhKCjNC1WUgBjAGst3D2XmSsWHvt3zGasasmogPTY6J",
"Ck3SxoXUShtXfLKfUUXAtCwrFVsEohESJfWGWuSgtTQU",
"CKs5FjmJ8qx2o5gzCJukb9Q6Z4TEJ7ogJjuA1Fch4bwA",
"CKYDvsLjwp6tPXfYxLmBv9LtX3FPpaAPj8kw1Gqy7s8Y",
"CM1c6z3pRNgHFcfZG4z3wE31jaR8c4gCYQBJVEoCUyq8",
"Co7UqfqzXzTjhBwvam3zhNi4p8dKtdSrfh6rQykoNMy7",
"CPPVEbGFbX3XAThetvfveCE1vYLWUwwJGT7DxkPAWb8D",
"CRCp8aHuYiUVfWMn8dG7z1T7SMi448ruuP9n1e6NEMmt",
"CS1Q8yNkw6a8SmY4nJ1jKrqhaDo18Wr4CnNbwsvKoswC",
"CsaAGRau3ZvyMQvJ9CWSqbqeVv9zw2Am8FhnL9sr6jTk",
"Csrv9JCbebTKu1uBWqkfwuPHwVCXsYDrQmeXf19onbsY",
"CtxU5HwVbgspJVtWxwjuP8wXUMdkjYJ4EJwJ3jvZh4zu",
"CUdHsUm5eaCZRctJo4HayvvYeh3AE3wCrNYXvMtVniBc",
"CV3F19YAhoW7DpfHQ5W9t2Zomb9h21NRi8k6hCA36Sk6",
"CVtdVkbhutoU29LZKuDM12EUZmmdVZABmPk2CYnjq7yn",
"CWL6skWfKLDd6SY7NnkjfMgNR1QxHhxCadyFNL1ssNaS",
"CWm26qFBekyTtUYg6ZLfJi2ePMLCdXjGkZkhFkXqJfrn",
"CZWpCTN4rCWer8fm5ZqFdx82CDiCJjZLKZ5Ti2gdmchQ",
"CZY1ZJAUyD2ZfHE5ENChUmhqSVFwPnTm6Aq6N5tbBqaP",
"D22jrsNWZG6qzTURrT5PM6QvUBt4vHLYE7dx4Frt5wNu",
"D23NCAVxinE53BTemguZCheAqCdMGfNTUzWdoWvq4Xj",
"D2NjDkcv8Y1dWGdtWAKPT4em2D3sYzM8AzMTpCG1RVf7",
"D2PNC1USZ1XZ7mZPgdLjAvbDnaWBx59WKKM3CxjAqyu9",
"D4kv7YbigKSHMbCpzLGyP8SywUAPw7Kvn5VdHkx2gNom",
"D4xPTEsWkU3zN5bDyTX9zgqnAhMoMccnCRGRLwSfo1We",
"D52Q6Ap8RVMw1EvJYTdEABP6M5SPg98aToMcqw7KVLD9",
"D5JqF3qkLkeJKKEi145oMseEGc1ym9cWKtBKtg4ZBBnN",
"D6beCFAZeFtXoZKio6JZV1GUmJ99Nz4XhtxMePFvuJWN",
"D6svmbCCUDFYmw8burYWAJwBq3e3Cdp9wiLdfNZ4SLus",
"D71JRzjPpHipt8NAWnWb3yZoXezbkGXqSf7TVCir6wvT",
"D8P3w7GQ4zTYbJfEGgfdQWQ1vrL6umGYAUrMz4hBJjrN",
"D9rCbP5rBrJztzv2EaACNt2LhXVLpPmsNgcWyB6LdfWW",
"D9YkGDRwdQaPXZe6V6WwWUeTWwfSoNmXADf1GXePetpC",
"DA7SNDUGAHwcXxHoUhbPqTv2p8GnncMpRYYoT6eJKmSR",
"DaB3ZwVtGLzSjazk5STQEu3MkJR2nkK3tDdCPAvx9QpM",
"DadnDZbFH5BHHRHD7TaobaSQ7QATXgvWegHUcZ7ZGzmW",
"Db2V7nPHc4sPHne87nYXPGn8Kv8rMsiWCAjgAXmpqcpC",
"Db5FG9D5Z1WWDSvQioKkymwRiTTGcTHbryBniRqYE65G",
"DbzdjE8TFSN1Zb4g3N9NsgFrzJ68G5WKtgSxqVox7Nxr",
"DciwdVV1DXimdsgRGQuQ45zYVjZNaof6a6EZ1JjaCsvx",
"DctYdX8c3qBZ7RUtYE4Ffunjv5SYFxVde4H4BDejPzMG",
"DD89H8QdPyWGtR5QnrfM734G4qrD775HFGMobyrkHjn9",
"DEZAHY54DgLq9Md8CyxBgNCe5hxDQi7fJaSE8jymtazr",
"DF57amFm9aHKYL9pKLSWindiF8o2RRxtReLb6d8DQc38",
"DFb6qaAkd5DTnFVYLDjzJNfsUPygP8GHHebN1CBv25cf",
"DFpi7mgmChYV9whs4uEtioFG1R2WF4TpGd1zcXMjGwF3",
"DfTeDaxk4RufkVbykedVnqa1r9S3z3oKFYL3FFmPdr1o",
"Dg5E8ktH4GWfKL1vuVTdqZJEkAEgtV8LqmSXyLJuZ3q1",
"DGf8USMPMty56BWgwFSUz4orb9smQxsWxufKBXSoX97f",
"DgHhYjwQfSrtyKCHHZ1kV2DB2ce7pNPKQsqEWz1NRsFq",
"Dh1DRj5mLYMeJVGvaPZN7F4XjpX6u2dCDXVnUXrE8rwW",
"DHa2QSwSdf4uVtFUyGeTPJ7XZcKrStoqQhHDa6dugP6R",
"DhMuXF3UqZvi3GhdrAMVyEQ7pW4prM8DkW54scYXo9Ke",
"DHNSHtEZHwPkkfomi5oMmCQy52ACoDpD5Kc6oNGTJTth",
"DJo6wDUWAdAFvuRvy938ze4VkwCBrW2o28pbcZ1qZogo",
"DJRbum8r82ts9uuGg9W7AheFwRE7atSV1touZPn2bEcH",
"DJvMQcb3ZtXC49LsaMvAo4x1rzCxjNfBfZtvkUeR4mAx",
"DKNy6YAPt6zq5jVD5S8EFSXpQmqA4NjrQf8t5v3tHo7h",
"DKnZytVA5wKbNPYW1pvPpoE5YeSsxu12KJFa95gBAGm7",
"DKyon4vSD7mF6uqgEJujABpEdhRbyX9X9EzFjmEz4VBx",
"Dm8Kusyhxmz2NmwF8RivLKembinSL6h7tvh4vrMVNxoR",
"Dq5r3zG6XGBcXNDRSWPSc7DiWwjqcGoiVcEhZ9mXEAaV",
"DqPvDQAjyZ557yaHJJzEekV11WKN5EZ6HQujNXQL3FJL",
"DQSg1PLT3Px71U4LsfBNhg5yT9GgH8FnK7qQAq1aLmk8",
"Drf2oN83THfrUJHA9AGzJZaL9KMKggPoL9HJVttkSCgL",
"Drkj3wbHHmE2iCnqXHKFTmwPkuSc4bsFdgAmqv6eXuWi",
"Drskw4YqMzYRVikgsGPACY1GnE4zTWs4uqSKajKdkU4Z",
"DsaF77cCADh79q7HPfz5TrWPfEmD5Gw1c15zSm4eaFyt",
"DsnqNtwKA817a2VQypWEzaRXY2soq5Jgc68MgFBMR35p",
"DTDiBe1ZLTJGzazmd3k6oHxKGTitNT6swT68MguT8QaS",
"DV78gathrorcpWsWrUkWrWNowLXpizKsPBupStzeAJnL",
"DVnKs7XAL9au7cWrTT335gZ3agJVwrqeSVnSWANo1SJG",
"DwCuwTRTXQWm5M2b55h5oyx1zWkNvTUVGwmUGvBAVbqc",
"Dx4bMuKpGaxAnd53QYDyKhD45PjuFLx16mrgoRK36STf",
"DXCzguRGhTGvFm9hdVsDkFi4S3n6W2yrNeUjrFN8tkvL",
"DxLtXrLUrqja3EFjkR4PXNYCuyVtaQnozonCdf3iZk8X",
"DzxNmWD99qvkPdDR94ojXhUrpzT8VdqB5ktYX7fZr4gc",
"E2cy4hqcUpdyMpx3TuHKpdW2cJZ3cTSthk4jfqJryt6B",
"E43Lu6um98dGLscCuPUobgKC2oLANeByzqdab5KjxV1W",
"E4YYWrsKv9YkBjLRtVNYn792RavzkXL6NPJ5Z4sHXiG5",
"E6MuSSCF5aoBstVcZaD6sk7hkQrxvh7s5ttVt8NzAiNM",
"E9qZxXtwWT5FuwsXHLjA4cjJyyeYb3ixHxBSrJJDzPwx",
"EBDnuJT5USg5HsQSZtWT1q8y5XjgW21b1ebYSahcX9V5",
"EBxhSfAWW2Cfouvj1k242W6U8krZVAxJS47SG8UKb4ch",
"EC1TjttBQaKU1dXuMbv4ZMSFXuPDt7UCMvNXruxCXdA8",
"ED3Y3cuH3wtHXT8TmbDE5toHU6kwQzevWtvkP6rGFNgc",
"EdSaRxfyh1Wjeq331Cr7aSpdentiPyCPXVHvSacovueU",
"EEpAEPJ1MCfZJes2sdCtkFwdMWvRPmDNnynnG2JkhGso",
"Ef5gVy3PFRJA7uQ4UkAD6AufNcZNtHN45k5N3L5mYatU",
"EG8D25QxDJ6nbD3oBpu4tPDvihriy9mFiPq3CxCGFiPF",
"EGknxV4LZM4DNL1Y68iAPQEdLsMZbL82wQbDmsGw6w8",
"EhZmFRvBcYQU5TscYdQV2i66pmmeKrzecbHYbUT9fuyk",
"EjcB41hrq5Ltr9Yvda3jQ8zGkkfFGKadkykTCQnPeCne",
"EkVaQMGB3cbyKdqBwBagGtURjtoXsP6pS7HGyizwhUs2",
"EL3RZmhvLAMMoDip59M3oKgqXXzHAPdZ88KQ2h82mCB8",
"ELsehFqpFHn2hJoJgEuSCpYhTvjCxFz9ToiiCXWUE2Tx",
"ELWMKHPVZpFTwBSzVPF2q4nmvexLxWycjy8fuoC6egBE",
"EMeaA1d3kmoBNtZQNgqEZS9y44gMrA7iSuqS4nZ4qxpB",
"EngVeJ8w7soeVvKwypuSutnXFPSWDLMq3Vw5wuAdSGjf",
"Eo1uPQgv6jHk5etCBX3hJ7Y1iYti7WqhisrtY3krEHCN",
"EoHfz2ybgn3GWN3TbTj2FB3AYAXCzhp4cWxMMnyAo4pi",
"Eq1A6L2ZUpy1KPVni4TPYvedSTiFSNNTiJdECskb2Qrz",
"EqaMZqSjRtm1c2FKLmZoSp7bgzEW9WBYVfT19n74cYo7",
"ErbvzZx2Ss9GxizKyDviybhZPu8noHv4AM5vuzTh1ij6",
"EtemtT8ofMe1puaYitDPxyz7KHXPjsxNwsgBqYYBCr8F",
"ETMbiU7hEt7jkoA8H6ACsfeR7LyGA773k9HA13yJUfex",
"EVCxvddWCFgh9LF4EJKzQb7stsPpSBrvTFUYnE51pX6t",
"EVd8FFVB54svYdZdG6hH4F4hTbqre5mpQ7XyF5rKUmes",
"EvenXreut1ywzoMaY3iXtwbuV4xcY2yz763TSxngja6n",
"EvVrzsxoj118sxxSTrcnc9u3fRdQfCc7d4gRzzX6TSqj",
"EW8YLbq9b6saEKn6K9R1K29CSST1XDTPpJLeXThpkXes",
"EWg9NTC5s7Pa9FktUk6dX8xRYkvJ952peH1z1iznd4nV",
"EXaDpiWsCGQY9d6AUa54XBAs7fvxF1z7V63xN9ikMMvP",
"EyNimNczDewG7Ubov7T2RGKD2R7i54vbeDUnivnR6ePA",
"F12Ah86ymdNPuXya5i3PKG7jeLfSMGpoRTriVTgcXr15",
"F3gsehGvHNXtF8mDbGVfB27Lq1paSgTiqe5nzvbFVREK",
"F4R2g7TnRmr88GY9DjhFo5Ssk9Ji3phBRssrZL5rQxWs",
"F67LCN4eLwqBGbHyT14nJw61x4d4CMtzdcvFq3tZYwki",
"F6ZzyJaFh2XNdDiZSnBHhQpqWH1YMifBxZNJ8JXeFwXB",
"F7FgS6rrWckgC5X4cP5WtRRp3U1u12nnuTRXbWYaKn1u",
"FA7DtFHm4gh7gwtwdRphSq7wFdWBU4inVnrchCmTKLTG",
"Fb5cEcYNgPXKJoEmvPvsU2ENYRVePQtExqgf77AnVX54",
"FbWq9mwUQRNVCAUdcECF5yhdwABmcnsZ6a6zpixeKuQE",
"Fc6NB99bkJQn7JsVSqdJs5fJEzj7KFpe4JHNQCGVCctj",
"FcTYrxp31zVjTW4qjFKkgRcKXbWcBbiRQqJYpufwcJZN",
"FdC2FZ4geXeEh4pTPXWrNH4nqsriB3xD3GME9HPD8CVR",
"FEq9FL3hzRDMtL8DinPAaeJpb28GBZYTpTeRcRyHSrGA",
"FFhtic9yPS8ao7Qg1GKjqyzwhGYK5tsksT9VrLioTgbY",
"FFRanhUkAFoYNPE527F7BKeaHe44Pwi2SaiaCoppkJCz",
"FfwtopRBJWKEJiCmkNUFyaQ2FMubtzMhAzKgHDF7XrLa",
"FHyqLxzvim55sLcM6SY4q99jqe5ifNi8oaj71pAef5gw",
"FiqLajUHAPrdmSZzcuy8gKNU1AweQpCXy8sznWgGEZ4S",
"fish2tcDFRiAM1cNKqF7b96WzQmmDRjsf5XD214MhpV",
"FivGzpupCvU4yr9E3J8RvWtLNWTm6ZRcGS87a51BVHWS",
"FJKaQebiX7qd4fy4dhkXx5Re88B9DVxLT1pQRiMuUTrG",
"Fk1pCDCzbBULB6Kw4cQUdTjvvbLBooW2TPSZjvK6YKA4",
"FkYhpz7HSGQJvA6apj1BKoUfytQvWseLfSUrE3zjvkQb",
"FLcUYvDMd5nh4cyP3oErMHoKnKREmza5rdAZ6XHYU1bd",
"FLjMwfY9ahmyc6VgQpokBrra5Ucf5KSK5YW5x9MMKnpS",
"FLpMRfbSMkBnFXDdGKdzcGP8JgrNVhaYowmtArNughqt",
"FMHjnmeRLszDSDTmHrbqUi7rpXLcrynh9K6jQvjdhqf6",
"FmVd4YWnsiau4JLHYxpRW63uutA2sPKpKYGiwB3hBkwt",
"FNBpvn9cNMmMA8GRfGxaD5P5zkG8m3YAJybgJkVi9bbK",
"FNdoUuKVBigMFGpVvSMLXJB4FC7XQL1RjPUqUiwvPiCS",
"FNH1XmR5WgK7CH7W7YdcfxtdgaKFueKtqaggVr3CnY7M",
"FoDccJmq4PksAoMpRbygVVocdp4NrC8PSwwDd8nfKYzv",
"Fou7Du6KtVb8dVMzKMYW39fuSGpMzJGwpkQ45NbxA3Tx",
"FQa4mYpWL7mNEXe8dWbd2FXxpreFtYJkD2S6hMD1oXHH",
"FQFrdHAhKFP9R5R6JkJPtVJhLDivDia4cNNUcL6Eja6j",
"FqheXr2yJSTRGncTqVFFG5sLaTtXZeQbkQAxbL8mcGru",
"FQPRF1x5eFUdNWvfVwwJkJdqqfstGnCkEFVKEKHp3GVG",
"FR2rptYjvSsYeDur8khPK33q6Byjn4if6Fb4oTiJF3Fh",
"Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN",
"FTUh2jo7GmxFqLy8c9R9jTPapfGjwcaDdozBjhKJz7UN",
"Fue9LZxjhk2DNXWxM3rPKr3z2qntChdeth615m7zUo8v",
"Fv4zJ7RvV8gEYxEtLjnGZAX1qxjqRh56DzBgqvFEVjjM",
"FVnG988wW3uF613QVxmQrkwtdzS8taxjFsuARTzZBwMo",
"Fxv1ymSwB6tdRCbjBURQK6P68XR2njCGfWbnfzVciJsP",
"Fycm3maimWfFWgdLJubtEogL1v4aJEkQapRwWRPxvf5m",
"FYH7U2HPhgxQCsHBGDaC135Rsx4tZx7P6ZjxnGqWHBHn",
"FzAv1TFpCyR65GrxeqBwnEzNVXEeUMPV5rKZGQhPR7mq",
"fzCFpDGUcyX78KEMSfJwczi61td55v133eyTeQtBuW1",
"G1uCu6JrV683QK3kdAzEiiAEBSMk32Ugy56u685cynJ2",
"G47WACh32JUcxyiCna7UYw45tyYSFKQ58yFpUmhmMybm",
"G5r4XSC5D4Rw4NaWjbgBKnj6bNDsSGUvE46w9BYAT79r",
"G7cSi3avELxMLTCossnsooLj6UNhnfER6kpnSx8NKHfM",
"G7gAgJpRHnRvFhrUMA5khWMqHJ3tpWVWdpsBvCq6w6MY",
"G7mFk3fX4xQmBV5je4926SzLCphWFoww8APYxQKfkNxn",
"G7NoACjYzRcrLibw3dbNoDCVoBR67ijm3tYeL5zL5qKY",
"GaTDMHvngmoJhuRYrLGcE2GMCofu7K8SLGwkCkDE1mYh",
"Gb9j79QFprtbY4sieaLZGjQr4a5ifFxGdnU2qkjPbBJQ",
"GbtVg3D6bNFSjem21vrJBJTpUniwwEtmvs8mQkX5XS1V",
"GC41PE9hngtmwzCbTuZQvCLN7C6xSLTC1AxRSAZSPRRf",
"GcCuD1yJFSeDdbogBFrnKRfPcv84wNjzN2MAz5vrv25S",
"GcibmF4zgb6Vr4bpZZYHGDPZNWiLnBDUHdpJZTsTDvwe",
"GCPW2jinG8pk2KfALJA2FYNhLKwCR42Y8ccQPXz2PYg",
"GEBvuMyPAM3Hmsr4UnGMqeeJNPiC6ZqPkCGKW6pADd8h",
"GevceSyTLxHv55phyp2PirpdsdqNFZRZSYViRCrXmneh",
"GFFuGhyHAr2fjH1DL42m9EWpAWXXdZ7R6PyzuMzDodLy",
"Gft346NFxfieeCXCHuwdQ9TN6HyPLr5oyfwGS4DGQWGt",
"GgmneSMKWnEcavporN1vPpyTun2QRBzCjFCecQT5km8y",
"GGQDNAb7hFMu9XnXPMw7NvyS6MA9BfeAmaZX8yp1QXFU",
"GhBd6sozvfR9F2YrKTFwEMqTqhfUjxNUYKxye7ZvTr5Q",
"GikkfYtVZgaUtcmreVpQ1Eamw7mrnf2jnDFGJBnhVQhG",
"GJRLu6i8j4CJukLEQQXe3y3pdk3ynVkt7R7ttcfCZBoA",
"GKEcqfeBS6rP3NUZBV8PWrmmBkPUNRpsV1CqqyqBUkAj",
"GLKsDBjWBaXHkyMihjpU5ZdKyKWtUpJyE4W7PjEFSEHh",
"Gp96C7xbTVp9SU4YDK26zYjnzLHSuHkfcHpzXQXNNvNq",
"GpowxwT8wY9x2uFLWhZtL3ELFdAMnpBxTrpqFgnEukVn",
"GQnaJJu7h53SVVhVpg2ErkSKhYMtqYrqv1qr13MUobuq",
"GqsnwvnnwfvevfovAfRu8XrJwGietC8h8t4dwyQerbfq",
"GRJohz4qkCV6YNjcSb3mRBYbtcTcN7GLG2cm6ffZqmW3",
"GrZcGUJ7baE8r9KSmrNJAKtgYAMiD7p2YfxefkbgTng9",
"GtU7wyz6vwTo7d82qNpFM6zsxWUnN7caxNMaxLwbwCEr",
"GuKn8nEJwUPjBfxpwyq2MXU2JNrSpj7gqnKptCZeEk7j",
"GvB8xRCx7HFxuM675n41mSsC5adFre4Sn7CMq1V6v2MN",
"GvZQ6JUcGiw26huYVv2eDTgrgVh3rKtADPYLfBiznVda",
"Gx6SwGTbYAFrUeBRMgMrgLUKaeGNeCKzkULXdEpwPSwc",
"GXfJaLrWgQbiuutiLyN7ijRgBvAvunJy7bYzaV562VWP",
"GXRPoAxz1gmqyNz3M21cKSJB47yAc3sJLGvenworEJn7",
"H2aCsvmJvG3tQZ58S4dTXBwmqia2GHvctgd5t2p1u2cV",
"H4Dgb3KyCuYWKT8yKtp8qbY49cvaqZcisa2GDnroFsv6",
"H4f9d4Ru1BV2CVDyYsTRShVWk1q99DjY62LxccREKZch",
"H5FVid8iRWjQAo8Cbvc7CJJGMwtTSQgk25sknxnEKkYs",
"H6djbzHAiv46Wxy3iwqD7LA8ART8YbgrWyxQPCNhnLPE",
"H8ACGbhYk4UA2F2nALk4aSia6Z8Vfnk3U3SnNDdENMAh",
"H8kdiUSyvHbxshcFmRqWTB1HZkQHKcQcagQ56TzLe8ib",
"H8MUh74GVNbSqGrYkZviws6xCmdVS3VZF1rbhE3gSESQ",
"HDitfpmCcy8WyJgNCQTrnZ8r71Nn7t7SjmVHnRwumGZi",
"HE35aDYTJHJ6KA7kLEXvENiRBX8c5UG5xHzgeKiXyQno",
"HEbMY624UhDGm1Qhy6neKSyi3bQjQ2RidSTyt7ARK8RW",
"HeeEbBAkuLzqxsFLcbKUfWmeNizywy2uzAfvRg63LFT2",
"HELPwwfg5W9LmXv3axe43EY1YGJjfVf3CcVjA8BZM82P",
"HEVxZQExGLJbpySA7MVumiTv6qbeCzmAQJT42CthZwoX",
"HfxzFiP19ymtxHagP4Zpga2zVo6ZgivpK6VkBKDowHRr",
"HG2CuUSFhdkdcv31qeyR13aS6chRgCFdqZfoFfPmYn2a",
"HGHMEEHCfbVFjqB69Hu9oNW6SviukB8jUheEhYVZJKe2",
"HgmPwzNcY85HfrN3bYiqaypb6Nmf7ayaZEaivGY37913",
"HGthEYmVZTvoGptPHvgD1dPxxKMGXYSrPDcFAPSgdYYK",
"HhjxbH3vLpUNShQB34NuMCL1Qc3xoiNDbvALWrAMCCnb",
"HiJ1pjUGoJ2G1fbrkYSNwPRrJ5ap3Q5usZMQB3pK7Zgs",
"HiVDGAGPSxxydKTY6BkjuLE3CyabGKyEuMMHc1yMw5Qq",
"HJ1hBuwYztBeiPhscJqQ1KKfLuhCcEKcPJQzKuZg14iw",
"HJhcE1XDYTRoHaDWcfkmfGvJuDWPZLEK8YkuMw9FYpP3",
"HjT9tCUEFWrUXFR37ahB383QTF4u53KWx9J29EWRfzdi",
"Hkj4Y4QxFvyoCd2wzAswsDpwW4vD1vyC5vppVyDDhJ8F",
"HKM7CfuNPKeU9crk7SWJCFgvFKvGh3VFgqT1sQVyeDDm",
"HKu753Hd2F1nWLPvcNZHX6RAGSXkg6AtywiVvRqDXxcP",
"HM2hzFLTd5TAhejGFjaXAm8LLjdmnj7bqQrzpRTaawdo",
"HmD9baVLsNVxALt5mo3yzuH85ND625cTDuDnvVo3sf6R",
"HmSU5YJr4XK2SYdF6dxNXtF9PQRzbXXupUXCVEaJZX37",
"HNJTofxjz68XBQxPHoakD5rru3qnLBJRVoRKVTYxN4EL",
"HoMBSLMokd6BUVDT4iGw21Tnxvp2G49MApewzGJr4rfe",
"Hq5o5jjDysktyUQdUxFk3RqbXvjSDyzNn1XtXKexHUu2",
"hQBS6cu8RHkXcCzE6N8mQxhgrtbNy4kivoRjTMzF2cA",
"HsWUiXARLPhYitGMapLYyMdV7k27kW2xzy9Z6L77jKBC",
"HSyQVMDPWiukduwnR4vS2fpuHZYWvK8LSnaY3UtrJoKa",
"Hu7DW7BoXXuKbwaFJaAMEXpBv8pqBJPhfThMD96WHiJR",
"HuJHVhpsf9nF4vbTWjgqgcCf2h97eFf4DnhAe3txLERo",
"HUoud6qywaWj8kZwdHRTbEPkKmskHa6Md1KNvF1JQFYF",
"HUrBwQeq52MqCih9c3yuixfrMWgYh1zuBZC8XNyk8TaR",
"HVD6ZDBgzjqYKyDLNadSkzev3qwSUnYEs6k81JktNuom",
"HVK74e6bD6cwDrK28hijNxiHZkvyXCXS8bd41tH6QwTk",
"HwFvyMbGLkiTUaT66cfL1FnJ26c9VqtpqAg4UbWSXtdq",
"HWvSRgESdWKDccWN91iRVQLN4rRyuCbuAHVWtPR1cJ1C",
"HxBkCVtiYAymCCv4EYakNDSCPgog3vBJMZx54dCceSyS",
"HxnjZ5Qg59nupGGXVo77idUxfsiRXPcBbBt2hw3Nt99c",
"HyCf5LyHfwnpnvwTQkfPWVdkqJJ2R2A8fBKb52m7cunf",
"HYW69eojAvAqiPfebT3S8yUTvTDHnssZbTq1TMCm5LfP",
"HZCUCLqV3P7QqG1oskLLMJW28zuckhxmRzEQ7UWaH2U4",
"HzPFqFKsGRT3Yvd5Wgfng16c8q6e1bDe3W48fZbuuS9Q",
"ibwMFhkkeMTn9746FERTdb7rGuQwVcRXDbNYXB4QB8q",
"J1Avgbk11jXQ7YfXcLiuh7zdLGwYujyGVixHLywWmSuC",
"J2VXfywh2oc5eT1LAtSApAqUVB1zJypFTYKTBdJg7BLW",
"j3z8UNavf2RXACm5fTkfmkhb9SZZDBt9pyYX5ZyyZCj",
"J5RKZBQ21j65Gn3cBBcFFtBaJW25Y3YEA3n1WjkKjNSE",
"J6ypmUL27j46uATh6RCC58m3yPDqbXfpE55oGsnYuSQ",
"J78SNwDW6G86sMmh7djnBKGjewXNpjD74sJTjJ1iNgTH",
"J7Jx6vDZNfCHUzMAXxbdjfNJY2WXcVsd5qMtVN1Pua6t",
"J7v9ndmcoBuo9to2MnHegLnBkC9x3SAVbQBJo5MMJrN1",
"JAgqqfmUxsLzLxbuWpigwJ7Hz38vtgyzzt4eKE5K6nfX",
"JAXyzy7iCvCSTxyd2JwdTooQJAceQyj4Ajy92Nwn8Axy",
"JE8vncP4cHCGhwJZQuzeBTfUVZ5tsFajkqUhcN4Ljk2f",
"JortrVgBnTJ764kQH5HHhv3ozUpGUDPKqWPavUmjEuv",
"KdNhBD4WCm4Gd1fwi7Uf7Z3JD9KrZcnWWm8nSEZ6NEB",
"kffvkDohANNa2rpj8Ti6KWZctCX3Ci6Rj1SnGHx2r63",
"KjkPqUPPsYe4exTrHj6XPKuKcEmXorNLPuPWboQMSaw",
"M7Pcv3j8KpX8ZAkeSsvJnexgKrZbBAaMEcRTvf6t2Em",
"marivpF7DDv9RhGnBYrn7SZFuahFdwHt2pBenWfG49j",
"markiLNTC3FuWYGXKz8h9XpbwJbVQhzuV5U3bfpPc64",
"mETnAkTMdDN41d9wSPYJWDFu7xehfoHyT5py2thcxHB",
"mf1oPU95NbDVo1V4ca3QayhQg7WdS9iLWeEM88ex62B",
"NHtR8X7dmwtCagm1FuuC6ngQ3wv52uJYqFvA79G47MX",
"qzCAEHbjb7AtpTPKaetY47LWNLCxFeHuFozjeXhj1k1",
"reisen5FAkzw9iKy7RWZqyhRq5PrDVTKGAAQSB9McYg",
"TxChgiaHwnkdT18sBnSepLE5sGk7vsQ4CZnhwiHUMQw",
"UdAZ7oz1WshdwyimF6e2VXiy1eSJ6UdHSRng9yRLtgY",
"URnkWZGiuB7jXbfCSuNSwir1qkn7sXjiSPeLPaXys7b",
"VbPq927r2eM2AiRcfibv9qdKtgbbpWJMCJcSSQjNWgL",
"VCRrRTgSjDLHvo6UQKXy8VQbNVG2ioHNUEyS7oB7u3X",
"VKoaYMffGUE92ZPve4wLCypUxoGsKYeo92F7BK1Rh61",
"VTAZqz5HadKsUWyavErx3hhUeaDPerPVDssjB69hP8b",
"VzZqDXSDF8KbfAQUvizj52NTjRg9cfXCAZ1cUJt5614",
"X8UDqnz9DemEwsVTmKcboZERqQXe1gNHRXAmGFQsY5J",
"xfCpo4ouRs5BP3WY5BdWhbr41pQxYGcXxz1sFyzPsZr",
"xViooFwobBZE416moj2yFV6RJwGVaBDbCQJRfCyz2FS",
"YpopmpJ5ryYnLZKD7a2dEbPdPiiSLRARWVj3oAmgWLt",
"YYYYW8eKkmwQFhVGUKdBAnDQPuhMTpG7zwm9nikNndC",
"ZDCJDkoBMTXpf8zsfQzbLeTfAus1qaxiFHnANseQrmA",
"zi35ABZ6Lc8L5s4nVt2qzAHJCAZZV4E2aYXL2Xq9aqF",
]
);
solana_sdk::pubkeys!(
mainnet_beta_validators,
[
"12CUDzb3oe8RBQ4tYGqsuPsCbsVE4KWfktXRihXf8Ggq",
"12oRmi8YDbqpkn326MdjwFeZ1bh3t7zVw8Nra2QK2SnR",
"1EWZm7aZYxfZHbyiELXtTgN1yT2vU1HF9d8DWswX2Tp",
"21REqzFbJJb7itcv2QzkjvHuCR9PyzXV1uw9251JZq1V",
"23SUe5fzmLws1M58AnGnvnUBRUKJmzCpnFQwv4M4b9Er",
"245B9WFHUGuWycSXHagHXwsXGcxDkNYfxWBaeh7vAHDU",
"25andkv98haPs8LeocDfmKwfNnpbNcHtsWoHDSBrWGYb",
"25V3EzQTGCSoLhihuH7f9jjyim1sxH6MR8MUtRLtLuoa",
"2B5wMmBQkMHu9V5JbUyJuf2mJJUU286qKPsZzvQQjTNQ",
"2beSsAmYWPaNJ6GShkAECv617wmGfeH4sSuRFBYYX662",
"2BHDXBCcndLg6oYu7EvSd3TuVJjUQX5xmUiKb8fDMjRt",
"2Ce1dmtdnvgDwD1MiZjP9wwGup6j8H7uXGuGMa9uZo5v",
"2D2v7sMqDuq2ekZnFhaQm4k2ErWHemZQuYf5qaVTPFmg",
"2dxz129YxB1xtf7Mx6HUT5JspexArNNtQt84FYueWZV7",
"2EUh4NtRwhJ69UUca8HuGEcABsn1MbvufqFUDF8XrZEe",
"2Joxdac2pgGA6xqBCChWKZNKhWgnv6vkuEkGp14JFPzT",
"2jS8AX38m8F9C5juToW1FmTufEbb1DfDzZJj9HSJcWwo",
"2KnfYVJiAtxiSPfHYRzTsGGhUAhDTNrJXWn7n6K6giJU",
"2LhJjdjNic2BcsRLN8opwd9KZSsabebQMnxj2PkU8ADQ",
"2mDrrmhSzpSyaF12izGk8hnFjtKCGeCFPwQHpRiJDby2",
"2mKEUVkcttHeMeaZK8jjwkuwGSdCBm23xDgzXgbhTdPe",
"2NJZ1Ajcwtc7hZdVXTXrh2SAiAXnFkVm6MWcGjBZfPkS",
"2nZo1aiuRdTKYCfuoLR8jHQY1TxkTQxCw3d34K3uQkiy",
"2oujYrRmtDDTF3b3JUgsZ34TkcyrozMjgRHBQE9R6K8i",
"2PotfnmMDS2mdzoggBnNhNVu5NxZsQB8RaxjbDRPJhSA",
"2pRZBEB1PX12cBAbgh58VfzTPV4ePJ3txXeDTM84ejbZ",
"2tKR4mX7LzhjfdNsR6HfBaDDh2RM3wdpUrJqUU42aJTc",
"2U8iE8LGdAqep5y3toyrBaZF6DHP2kf3Ev2vLFggJqiW",
"2Ue9zGmDnvYRrJNEjuAdNkbbickw6fKWtbeNM7T2rakg",
"2us4ysyNvYJHkYi7CtRuW413Mwi34kjjFQGhZDch4DEN",
"2UuVd6BMW97MRnj7mCpq8PhYmqmUYuojvDkqWstwASCm",
"2VA3q6DbiLjbrLgnkiZ2fdyuRyVBkYRgqBDwA6qYiSDD",
"2VAofLE2bYNM3ZPGfn6dJh2zqtnmNdD1bGqFet8mVYFN",
"2xte5CBkCBEBLNviyAfvSaTkMy6tvg99Cy3XJj9EJJs2",
"2zAbHUpE4MRgEwq1MWh3i9aJyzazSjUUPrmhNViqQn5W",
"34t5CmGFfVkdpxQgKbAJKje1NLXa1AdWUerSjnomPgxH",
"34viN9UrGJaVabrrSZDs8MnKwVt34nw2wv4Xkwk64shV",
"35AidZv6XxjDg5NNPaKUdzUxv7C2Qi9k1tF9vhMgnHhR",
"3A8F9LjrMgY37qZVAhQ4ccWmrvpQ3oosXfATUtV9ozDA",
"3B2mGaZoFwzAnWCoZ4EAKdps4FbYbDKQ48jo8u1XWynU",
"3BKnjJPKQrC7zw7X3KtTKVtZtjLoLgzeJ9xZoAtZyVci",
"3CxJofVghT3nbqtrSWo3oMQTFynpNyqtytBFawpAXi8E",
"3fA6TU7fQCkNDDKYJeCY4Ag2gCatEsYnYL4SpkSDYfCw",
"3KNGMiXwhy2CAWVNpLoUt25sNngFnX1mZpaiEeVccBA6",
"3LboiLyZ3U1556ZBnNi9384C8Gz1LxFmzRnAojumnCJB",
"3LKjD9Cb8RKKbmwM3LphHEvfZdjEU4rAFGDDUiVnuXhJ",
"3m4ov8U9Gccm8Wmkmuk4xJJajF6dSUQWZFDp36y7zHdW",
"3m5QHq24vbPVhtbcHbaDjvij58WExvfy16UgcCPLfDXP",
"3nRhescC7HMYC8gKti3ENiBe8LnKZUs2gzYPAjYniQCP",
"3nwmifffA54NDWqiZH1oYMKHaS21Evmy7rjPeL9veegs",
"3omj2Ajqtb7ZjCwxaYPXPrERhKDnwQRFr2ti7F23Kd6A",
"3RbsAuNknCTXuLyqmasnvYRpQg3MfWZ5N7WTi7ZGqdms",
"3rqEEEGjHRyndHuduBcjkf17rX3hgmGACpYTQYeZ5Ltk",
"3RXKQBRv7xKTQeNdLSPhCiD4QcUfxEQ12rtgUkMf5LnS",
"3ScqKCyAKGN4B27S1mFNCCna4cf3ZBZf6diuXNPq8pBq",
"3T2nqHFbmexvkhEo25SKnJsbaAjuyUGBjvdttczxDKgs",
"3VRzZfDDnNiSBzW259vKgtq5x1E4fzkvs5SZcVtpJEzj",
"3vYPCtncFxQ1RVtSpB2HRg1udHfeVPWPpWALuJaMcLx3",
"3xGDUST5CKZsiW4L5PPp9E63X3PdqRvCJfDM3q9EYcB4",
"3xubywCu9F3ALaYRKgp6RVUnQZFf4npNcDZCH4Qjpmmd",
"3zaPajeDw8FxutdgFTUSVKp1cxWFqs62dtuCZNQ18TT9",
"41iM1ZT5WYS8HgweopShefLJRfDD3jbB1MMJZiuEemvE",
"42xkjBQvPyicSdCET8eTWwJayTBofFLGWyCiuodd1SoT",
"44Bivyyp6Jv3Wm4RfJdtpCPBmbEXxvyotUoMDg91ZJPo",
"452L4U6HbzT59EP9vLyPxddF99FBBZ7foCrcn5A9HLMK",
"46yVA2WVP6ah5wLREZGHbGvvPaM55rcwDKDDikzn3BVV",
"48Dactjx3zuJujtBHZ5cd2w3maKFFGsKhn1sXzU59VGK",
"4asJHLR6DbifuQK2MRGNx9iHujYDYkQ9pqQXsmbc7fyD",
"4daH8Aotxpk68HsMvws3P5AQL3F1gVTA44jqLaB2GuGx",
"4e2KvSCgot2RGXsExfY48NdfykQSjgozV5FAXv13bUn1",
"4FozAhZhAo8ZTuzNHeAHMDDLqWmRwioWBhFqybZYHamV",
"4h5muqwz35tyPQdAXkZMyVM5cnGN5oXouTZL2AFA1Fjh",
"4i9CE2ewTTWwJkdMvNcia3D4yR5v4AKryae4nxeSULwL",
"4JryygoiM1j324fYkeBzcQDcwRfd2WpgkEzUePFj1rJY",
"4K5SSpWHqTbx5N5Ytjj9iWXNd5zZEZ6fkwjGA2KKafgv",
"4Kbcyn7JVPAWLRLPsNGTPmcNMvCkLTw51ZLRhqsUC6jP",
"4Ko2GufbWc8hhDD9GL46He1Q1Z1s6VFfBZS4RX9yY5Rt",
"4PRFPF7f9ERz9azkDFSFfgpye6yixPENCka994j8mQbj",
"4QNekaDqrLmUENqkVhGCJrgHziPxkX9kridbKwunx9su",
"4R8aXMwJUdE8WWtM7yE7sPFHJBfPBRd5o7ERVeQE7DKB",
"4rhWUqqNbdoVsGGw7P6Hzc81FB8C9jgR3UGF7aUgKAcY",
"4tS3UZfuRHzXuPenvErtRPtnZfY1KHhT96JBCQsLzKqW",
"4ULWSuaNnhQntP3DVxg1xa4yeNLNpDnAw3gTtrhPHzEA",
"4w2p2n83ter1rkk7Z6r63z3uNBKuBXzgV9KFDWBidSFQ",
"4WgsjJxehCavDYXEMTBQX91KKh1szuQtzksuxZKgymq9",
"4xp7K2vkm9LnJh1aCXjJyyF6XxL1u5gmYkrVCxHU6Cnw",
"4XspXDcJy3DWZsVdaXrt8pE1xhcLpXDKkhj9XyjmWWNy",
"4YGgmwyqztpJeAi3pzHQ4Gf9cWrMHCjZaWeWoCK6zz6X",
"4ZToBgveZ5m8NySrDyPA2fiGVRVBioaoMXD31KGidm65",
"55rEq5xznJEMEShWB9GFv5WNX5NfCZckLZbXXDyW2seb",
"5cshESzkc3hmUA3qnp5ridF6WTeiVVLUYkZUaCPYfbid",
"5DhEjMqxWWAagBywA6kL7EczqTnDGvPmDfseottjyps4",
"5GattKwm5oBpDJGgdL7ZZfCMthoETwrZFmQMDK5eq86a",
"5HXxjDZwm7MAZAm2aCgGcGRr3SKiwugcQymoByyd7pfv",
"5hyHG7SnmJAHZmM9shuq6BXxLSG2EeosMH3ZohtaewFM",
"5iJmsXsq49ZqgP9QbP2gNQRU9C8PhwA2gbgNLdgSHbjh",
"5kMJh1BtYFrvr6EP56XjoCZmyxx3by61dEvGewYTMuxE",
"5nU3v9FRnmTT7LrGKRj1reQoWWHHmPj5sfh2mTSti1uU",
"5p8qKVyKthA9DUb1rwQDzjcmTkaZdwN97J3LiaEywUjd",
"5Q1tTkjCtYFAtyZ4fxYYND8Ru65LLPuZSo8or9Keyq1h",
"5rNErxSxMj3WysMx8bC8vHkbrt9QmwMeG9H6aTp71485",
"5SpUnGmQBUYVWJgagPjgyvXW4ivYPi6v3n453Qi81Lox",
"5V32oyrRmsnuVdC4Jv178KVBeehDQUuk7U2D1YmeP6mS",
"5XKJwdKB2Hs7pkEXzifAysjSk6q7Rt6k5KfHwmAMPtoQ",
"5ya8UPujuXN8cys1EaaqMMH3auby3HTHZki73Q4Yfkff",
"63sJMcjh8r7W1oXjrNZrG4nc4UX6cQVonUHp9vGDUNdW",
"67iXZNZ4ytz3A23WueWr5B23WY7yHdESdRPGbVaPYkHw",
"67joanjyAoVmb9nZLyX8p3Gx9tAxzXaUgHDe3kaUH4wf",
"68EWE8SmV58oRG9JJELNDt8Y6MuWBbfraoT67KFw5Ath",
"692qK4TkXnrGfDZfoSq9aFfnoE2WNW5TpMRnN7M3sumF",
"69QvoqgDazuAqD4ygG6YsA31HBV31L497XqmQohF5ajH",
"6BDNnr38moGRQyvx1Ehs9cM6tJDFK7LF6mUvLziyNrzW",
"6cgsK8ph5tNUCiKG5WXLMZFX1CoL4jzuVouTPBwPC8fk",
"6DcjiCqt6E65w8P352JvAN7qDkdQc7YQJHBrGf5aVAze",
"6GRLDLiAtx8ZjYgQgPo7UsYeJ9g1pLX5j3HK97tFmtXb",
"6h9jyRgfpmgXNyaWpbDpbxbCoF56WEbzsruhMwDn2om4",
"6hpMTcU76pCkSZHG1qdfw8ZrM9rFwFCpmKZWqBc5VXr3",
"6iYUKnywYPEZt58hYs6WKA5tZjxL9MFupCxDkygX13dm",
"6kDyGMHbuWekkcquroYNp8VRL5pQiUzEJ11gJ75qJsRy",
"6kfL3zErU3z9iofwdg6iM6UJDFZaVJNguAwy8SiN82db",
"6M6aAEdhcWZGUeaRdc7cd81YFZ9UahgZP5D1x3MzXxmJ",
"6mdtNmZpSycAZKXKy9JkkfdEZCMcnuyuA7fzdnpUohZr",
"6npqjzgFPuL7dKdaUsgLwKokWLDe3wFyHeDaVT7Pro3d",
"6PbhWC1XRHsW3npzCHm374yE72B2mbjBrXdUAhouozqm",
"6poikjtKFzySv2zrfEJCQorTDJWmoqCLPbSXeNLHyvL3",
"6pTM5pLcQFEzCnd2GxLqLm4jL97ZL8L1ThSpR5aVRfc2",
"6RFbeXzEyabceq71fMBLuBVofWedpZAu2nK9vNbZiqei",
"6RmSMaZbyb7SGKLp34MXY2qkFePZh41Frs79xJgEg5R6",
"6TK3XwxN4XMEM9urq1n15CQyF1Z19idts4x5Mfibhtq9",
"6TkKqq15wXjqEjNg9zqTKADwuVATR9dW3rkNnsYme1ea",
"6UFmHMgP4ErpKrdjDgya1gAfHmcC6QhWK9w8wBT7iCNo",
"6uHvWHjBYiMowwKUZ5waAc83x5bnLbmGfxDnXPVQYorh",
"6UvA3z3sKbxGXobTQngyL4iVX7kV9jeGYmCUGRMYZqcK",
"6vi5VzR8KURahz7WheNGRTgGWHnKJQiKkqNbfua1sJiC",
"6wsSvrZPbjWeNNZ92KWtj94pdHj8v8sRbKsu1ZSpztpP",
"6yf57R7U1J1VXszE7CobdYEWQNJMPRfWEgGfaRsVNk32",
"6YPgmQm44UeSEeLU4NRP4fE5HWrEoKupqtRMK72pDjbS",
"6YXp94DjBg2cP3tiVjHhgZYLP7juG62GxzU3yZFE4zoJ",
"6ZomMAu9XSu9fzkRjqM6HbAXagw6Jo9fTsfsAADTZMSp",
"6zxkTupZL736phbbktUkWQSqWTzeYKbTFoKMENoNnJW3",
"72FqmsqNgpgF4XGueM7Ys8X7xd1ZqBzWvUQUq2Rv6bBv",
"72Sifwyudib2XE9nwDamapVEJr4rZLFthLtPH83ScupR",
"73A8PnqdHraWe9W5jiYzUBPEf4giyTxUG1CKZ6HkXbuz",
"768DzddM8MGfosYS5JxjzpY2FcnqeNHUxMerFQHWmwKy",
"776BzpbpsZU1rbCkNHizEP5r8RE8QL12Xqm49krkLPLy",
"77xRWv8Z3kaQpD9K3Den7YWJ7sxsf1KTnw5MdcM7Gtnw",
"79BLeiBD9r6okMwEPW3zHAviUJFwCLjTsK6HTPBwrN6f",
"7aR6AjK87ehUxwEJrPpnoizUKtxJKxqZCQCEGD3h1Xgf",
"7cVfgArCheMR6Cs4t6vz5rfnqd56vZq4ndaBrY5xkxXy",
"7CyNBLaoav9fZhX4D2WGrL5XCuMroSgDut68vtL8NB9p",
"7DBhzr38xGJeALbZB9bpA5bnzVL8N9oGCjFfWuN6t8cG",
"7ek3CDbxpGRdCVTJJpj6WPHmZrJqBCQ2RBQoqLHitx5L",
"7Gjec4iDbTxLvVYNsRbZrrHdtyLByzdDJ1C5BmcMMBks",
"7jmCjQt2D5MVvSz7NhpHLCXGsh8Mu3qeBbA4LJAf3Xsd",
"7N46n4N3tiX4AjVKBDPfAuxhgirAgDFknn5pkjYj3xz2",
"7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2",
"7nYNfGS6VVxzCZmfbLGpsXYFm2LS9XRrva9hZahFmpqh",
"7oGVeZnNWa5yDH6V6ec3Ya1xzSFykc62nXKDGhnbphuY",
"7PwCuKPmGF3ZqWHgn8zXPtsWJ7Ud2q1DFggRkzctwJnJ",
"7R3t8NAU7NDAd64KLz62tj1TaFZLmSyVSMRJX8VT8Agp",
"7VWgBZJCLtuCTGdWoiWPrVqY1TaqiZQzEqmArzGzrwGN",
"7WLzboF5Bu9e26MvLVps4rPbSHBgme4J1w2Hr9G4WW5C",
"7X1kgrfYRwFd94yyE8tvsQF5aTFHdmoLXnG4q79onDAg",
"7XSCAfoJ11zrQxonjbGZHLUL8tqpF7yhkxiieLds9mdH",
"7ySQKwtxuvxQkhW8rKsa4Ch83tdX5fzfpd5hu6Av66ur",
"7zPjirsZ2GLwji87WB9EPyeLXTzigjeVAxH14nB1Ss2E",
"81JyjgUyLbqs2f7iSvrEBwE7Grj3MsQwULMGTxXKnLvS",
"81zNHWkYaQRL3TWHSNwzX6T64yUyVQ7qXzLLxvaaB6b3",
"86fVAWdCDyrSHnv9y9dQCrfQCY5CrXptLP7gL5PyUDxR",
"8c95uL2WKofQGfr3rAMZHmJ4V1UJW5puTmpw96n47MX",
"8CzsSMYcmtFmcGpYYv98kcoQQaWQLVtpQRUzEw6awPHU",
"8D5rQbJD9qLNJm9HyTjFWV93CBc29ozAdGUia4hyMhw",
"8DXdM93UpEfqXezv1QTPhuA7Rci8MZujhsXQHoAsx5cN",
"8ebFZA8NPLBZD91CwsG1HWQsa2B5Ludgdyf5Hi3sYhhs",
"8Ey5FDayWYgJdVoquScT2hJDKWk7nQQfqzfGBt1emJpx",
"8gcMmaEAXfRZKcXHeHV2x8R1ebb78inTr9xhhEuNNoTt",
"8GLRbAstsabZuZUx73AoyfGi1FRCWSUhRgMugFyofEz7",
"8o59jbS8MEqSd1J72ryL7dghg9djh2BAUtpkZvTTYh4H",
"8pyp3vfVPRziYdAYEyqkwytdBbdVbQmHqfQAVDcRV3w",
"8rVubWZEYjy4YfMvUr5CDPg6YgdFdzUfZtfw84cxGjaZ",
"8sv24V7cy7tyFPwn8nqAKXiigeNmgYACoFmG2z2XBTZw",
"8tZuYRKd5kx4tNsZMZHA9ovsjKkQWf56kfe7ycLees9B",
"8VAyNMaz9DFLNxSFrhjiusKCcgQNKgnByiG6KfeCP8vE",
"8vnrJNMDERZRwWgUMSqwLyYHEPkQQg3Ww6BM9AH4uX5R",
"8W6wYH4cQKroY9ymP38DQeC1wV8b92xb4BWT2foXSvGy",
"8waGfnuaHHUgZT19xQo8LWsAY2CSrXiUMeMDUUrvkeu3",
"8X4SDANraPWJYCDB1ZYCucraB1TKki7vTFcJMiSrspTY",
"8ynefSJ7MVkC6VnMdM9Cm3V1F57fiYg8ugkfNV95X8Zx",
"914RFshndUeZaNPjf8UWDCyo49ahQ1XQ2w9BnEMwpHKF",
"93P67u58qai4kT5XoKX4Ti4g4H8nrgJ2PKnKgCuR39S4",
"94HgFvsD8zm7UXJ5KJxN4zW5nsdhZWY2LFREjfaFEHwR",
"97MtLX5ajrR319PH8iLnctBpaLFoT3TNuUAtZfZaEn7U",
"98vNehxpVCPz1t5RjnZZVjxQ549a2VLUbkpNwqj1rSb8",
"9bLEjf8SjumDs28WqUfoSnyh9Eg2VwQZGMrxxrWhvLWo",
"9bxGPEvFjGHqpAHMkm97R5d8euFnpJ3ws83tMkTbcBUJ",
"9CqDvvGvSNVZDo5RskCAv4fTubpFCs9RLTrjUxEYrvNA",
"9cYyjirWYs68YKw39r83qXv3rQB52dyviteRDKrDX2GC",
"9e7XGRqQqEvppx4Lkj6P1S7k65yWQpf3vcNzWecKSzDd",
"9gFxqsXbFyrKXUkqpAatonn47uYZ7sEZSnMxhzQoXrUJ",
"9gJT69qJUibNmfnAPBgsqSmLqVxc3kxXpw1Vk7APDrso",
"9iZV3mMdBh9y17hM3eVXUVuH28pGLERSJBq2z64yfreM",
"9JJQN1WpJ8QvH6XK1xAMbSpgHSiwqBgWaeCh3ViEFmtN",
"9jQ16ZjBN1vmXn3dpRDc5eZkRPU1kUTp5MPnJuSFWb1U",
"9JZXYs5TisLa6gra2PDuNibbi2979AZuXNY9zSVGz4UA",
"9mfDCev5UYzN15XS2AyRPXbcrDJ3HL1jkXj3mdP96Kam",
"9mo138RC4NbMYvWaVUE2cYPpY7KCCNGvgHhi4cVuCyM6",
"9Mo3ap3jpuqQpLi75EsiXLWfTr1cbBhrJNumoq1wnVp6",
"9nEBir5mYz5yAnAtuZsuBxwfC7SayWZTMnyCmmTsUknf",
"9NEsaMNVEEPHCdgxUzeziZdgx85LS5za9LVs1rQDGBdC",
"9nkbwUu8kVZMPSznrEkroG8bXHcgxPUTQxFbVp2oXYQH",
"9of5uVqC4EYZ4LLJzPecynNM4GsQWiT1WtHfA39VqcCR",
"9ppJrpsbbuGNjiMhhD52Ueco4KXUzVfrtNQ6tAcDab4f",
"9q16BB7WGmBxf1nJTdxH5zPnBUhtHqdqXqRFjSjuM4k7",
"9qbqAdFqJK4MhmZPQjbRUxRUi2NTpevAiZ8wj7SiCiTQ",
"9QEY6sXPc88gt1jNEYUC7YWceWV8nRuuUi9WYrDngro4",
"9QsFvmFWH2weMKYivw31wy6qCztnSEAgbvgAA8gMoUWH",
"9rVx9wo6d3Akaq9YBw4sFuwc9oFGtzs8GsTfkHE7EH6t",
"9SUf5VUSACNWW5CgbLs26hfezhUgeR5pNuTB1ZmUFxVg",
"9uAtx66puPQZLaP6fMKNKKyRtGzonhaUEdbVopgeWcfS",
"9UsSQ9m5QemVf9NFmKncDDXGFGzUXMiFay9Fv7rUmjtV",
"9UZ3NXk9jALnrhbWz91PXWya4Hvi94jceq5nojxHiKgL",
"9vpYXvRdqNJD2YKRZ9q6Xm7fh4FdPGuc5PBZSusv8vbi",
"9WuDnZMufE2nsuWsTnHjJsjavuKtswTpCy3i4A5kKT8c",
"9XQyJ5emdYVDB3PjFDCmXFG3G4cAbPHJttF2hJczhZB8",
"9xsgKAU3pyKZZddPXdh5wLRqjdw2Fc93BL41JszhEpZz",
"9XV114Dg7cQF5zbxEoEnTpuYXR9TgdjHSimZtkzh4epF",
"9YDu59tKDysB9zT9P8cgvaogmuy64R1Nxbr57QHztNrW",
"9YGn24qD8ZCU1EKZEsPLSEv7SVAJ4kpbCTmj7fR5TXYv",
"9yWfnPBofnWjXRvfEKLioMb4VrCkwqcYBbWj1uF9P6X8",
"9z7sdEnttp9T9bzoRZumMcKWCU76RdmrFPi42km7Twb8",
"a1phaKk6UbG1P2ZCpfMVFUeRM5E2EZhGvUjqWHRsrip",
"A31PGH4i5xGn7SHWpsQRhpBYUwanRuqNrHBp8bSeCSEr",
"A5dCbrjh492SU7fg31wZd6CNmz2NYqH1gEE219qyXZbe",
"A79u1awz7CqnxmNYEVtzWwSzup3eKPNW6w2Jrd56oZ3y",
"AaDBW2BYPC1cpCM6bYf5hN9MCNFz79fMHbK7VLXwrW5x",
"ABC1U4cf9DZMwqy8ktEr4WJj8VHmVBQibbC57gEJthwY",
"AEx4pxCHFpsVgZ2X9t38bt4CouZ85tXXcghjUkb6ypv7",
"AfDqsQKrpuDEHy65fsHMzVVeh4FcsRomxoe5nDLPERE8",
"AFNiQnEvAqjFJizDtSAh5ypdXvQDRxHN61kcect5wNEx",
"AgG8obWYeVY6nSkPYqDHXfssxdcG68GkuBikkearYRv1",
"AGsrW2sp9mqNp7g9Nqw6g1A9wMyDYysqTN6d8oaw2W4s",
"Agzs33CozCnr3eFvyHcYkb46nUZq4dL9SFMV9CNAxfGR",
"AHg5MDTTPKvfCxYy8Zb3NpRYG7ixsx2uTT1MUs7DwzEu",
"AhL6bJ2BF2qWSP9aE3VnFZFm9DXxWdfc4PjFBKPncsrV",
"AJ6PKuAJmACTpB4tLPm61u5ScygG4c7EtU5HZmCQ7MDA",
"ALiZQ5XboU6qTGKSP7LExokaZcPGVYvmdFqqEw2bCJWp",
"ALp2GdA1eJV8vZHMHazCtTxNXe3BLUSco9LDASgjDs8R",
"AmhQFcGvH2hjkucP78rn6GMKSbstYwyFpCDVKZUwBGrG",
"Anara9qw9KCKMFPX19GUE2drgwiw49u2pQhHXwVRukhn",
"AoUwfPuiEek2thVRDhMP7HbQb9rguyab4rDiz2NAfwwA",
"AqGAaaACTDNGrVNVoiyCGiMZe8pcM1YjGUcUdVwgUtud",
"ArA1pRBW7WFcx1StiRpwhktiXkdQTdoGSj519hzr8hdJ",
"Arvv3uwEyDPKckw3wEFiCq9hgxMw8kawFU6doxFAWGYR",
"ateamuvZX4iy2xYrNrMyGgwtTpWFiurNzHpmJMZwjar",
"ATwrKSHdF1JptZDFd2wUeU8reP6ftp9P695W9ipdB6TQ",
"AU8CBLxah7B3HQE4rtjfkRDmqEGLqu1xKUPLSytQ8tLc",
"AUa3iN7h4c3oSrtP5pmbRcXJv8QSo4HGHPqXT4WnHDnp",
"AUgbNHRcxPqfmq3CXQ5bvy3BpN8KZt5LtXBGSH3gV5B5",
"AvAivvbUcs2ewGpj2TxW39BXmA5t6d9ofQZFzwagqAzb",
"AVUsR5RuxBcZDftuUNQqwUKwx9puBE5N1zmgLQZSxiuH",
"Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM",
"AWHfhBSMN7rX7NEDrKNfjnUyR7W1xpfistTy6w9GqT6U",
"AXXrLQ7bVy92pPxW7heQkAHfg8LuJ4uufUWCQgH12rmR",
"Azg1hC9mg7pkSq6Q5sAESsBsrxHoUuyRnUYSCJMJMdup",
"B3SBGpUurKfrMAh3vykyueokU9vE7EiGDkvGFzxokQoa",
"B5GABybkqGaxxFE6bcN6TEejDF2tuz6yREciLhvvKyMp",
"BAHNFqttxYMMZUDqMm1JQPnxkZoHLnpash1ouM7pnLEA",
"Bb4BP3EvsPyBuqSAABx7KmYAp3mRqAZUYN1vChWsbjDc",
"BcMg5hjgLZF2mCJ6L2ah1p7TxHtH1euPWP1kZs3USRyC",
"BFMufPp4wW276nFzB7FVHgtY8FTahzn53kxxJaNpPGu6",
"BfVzQge1z9dSZ2YiGyRXoPRUvowe3S2kvbLU61zXn1cV",
"BGBxrNWZ9HDBQoXA9Zq6t4auRXa59qrLQkiTGtZK6FU9",
"BJafMGt4t8A9BENBg9EcXEAqUSgBLaQZujQqvrsGMgtL",
"BJevND8rrW5f9AHSysSGTtDWKcARxSpzTyDAvCm26Qms",
"BLPBUAuWmjTKT8rF6nXnHJCcuK4ZonH4xdYSD1gZiET9",
"BPX9WVx1MjVpSWY9fo3hGXfE74yKXcreS2XYKjpiyJfZ",
"BR9JfwDSvtP87kdJxeF5QJPCcj4bdmUNVDPtsK64DCkR",
"BsvtXMu1eGKrAhpP636EnNG8LWddxqmCDq8zcEG8CwY3",
"BT9ZFvsDfX6WpLFqmWEYuLuE5i3SxzdSJ1Vzm9arbRub",
"BvXzmS5rLW89nHD7K2qVjcERKigDdJ8Xhc74MQZumj9J",
"Bx6R7GBNsFCt7KJUZw5yrbvdhL1EYcXPhkUBnS2t3c7E",
"BxN7s13iMFRCZEumBRzUpH9E25V77dL7VgSig18MYKbm",
"BZBKHmW1DhBaAPojxWBQ26vGz42Y7MtNviFZWpc6nGLb",
"BzF3s3AGgYupiL5Mhbzr1tu1ySMZSR3SXU4DajV1Zwv9",
"C219S526JWRzpPUm3FfAzobwZMDuo3DM63phtJpqAz75",
"C5oTsyGA1gBvgWpvS6EaWGAgEKDBkMpQaPeLTiaLuYKi",
"CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S",
"cami5ixFFZD3jLdX8Ef5tu8o21reSGoE3GpGRrQyP4z",
"CBUGET5PnvLc3HvEeFYj64iTvdKhYV6pujTPDdDh785K",
"CcTtRsmLJEjqsv5iyfXSYwjaUJdfrRK7AU9cHMnQfTb3",
"Certusm1sa411sMpV9FPqU5dXAYhmmhygvxJ23S6hJ24",
"CfBJ2yK6hbRD3mV5VoMux6nkki3mYa3NCmWWnfTeKw4B",
"CGoT67sSerk9ckwin6yY4mna3ymLxxKNqdYeQZ9xNdkD",
"CgwKP4kM6B9giquCADDXQxik7BJR5L7m2EC2ntPnQoJy",
"CgX9oSHcpXjKKxUpLbrgNKWrk3ff4cQidfsf96AkMBke",
"Ch2UBdfwRY8UyAKCBzYksu7QYwjCXprkbUo7AY9CSRyS",
"ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n",
"CjmXSapt1ouz3CZzgkRJckBEwMSo5fVdVrizLeRscwYD",
"CLarKkpJDBiJYLXqKLVEL8VJaAxRHnWRyjjxAunCFJ41",
"CmoYHsxUDcReka49CbnJqoQ3y9hHYkCoTBZ59jA6k6LF",
"Cn2GFnp4H4mejHtR4GbV6ga48qoceZefzxNU68PxCcK8",
"CouDacpBtF6mmN5K9aXyVds1XRkmo6J4t8ebFZxAecGh",
"CRextgJEiudn3vzHZauno3a8BpfdweYEdLEjhpZ8C7Cm",
"CsKpHvEjoSkdNduEjb7mfuJbzRKqc5ypde4yUW4iGMS1",
"CsqwHrSUAWsWpN5kUcjcv4Nsq5JtgFiDJTckaY1zuXPf",
"Cu4M3yd2LfMoGhmYxKszhVH18SPgt6TQvqnE4AWjNKwd",
"Cu9Ls6dsTL6cxFHZdStHwVSh1uy2ynXz8qPJMS5FRq86",
"CUPryNhYfF8ChYyz3tahM84ppX5WSpvRxWqP7PEHMX36",
"CVAAQGA8GBzKi4kLdmpDuJnpkSik6PMWSvRk3RDds9K8",
"CVRr5oHCAAooVbYze7CvXtRp4FUtkMCSqBZU7MVu8v8e",
"CwhdMezLucz7bcuWzStpLXgrzKGC2tBBiaVmJZjfprRN",
"CYkkaM5KwoxaFtZcximkm1DFYnABdiUUUvJg1WURDRsh",
"CyyXgFh1cQsrrqUvwDTWFXKFMnSe2H7H4VgX6uEkfBL8",
"Czc6nGs45PPUdm1jA9MkBfdxYRrZbShyN9rNj3P9chSG",
"cZCvgqgrdX2LCScVku8CprMnAUZtSicBDJ8eiowfonn",
"D1KH1UwfTLaBW3NubpvNv4ze9S7SKW6jd5TLdrwZgLP1",
"D2RV1q6FgePVVjrMa7AMzVbvvAeg5oS7TAV7qdNKSDsX",
"D2UwKTrNJGNLM1h66qjoSDTK6CLcPSzmga84MFuiWdiS",
"D2Wa6JtXeyqFMdoacpKMo86Pbr4YpfdVCtAhem8HjqfF",
"D32cBNvo9qmMyMSJzWqDPQ3ujYFuW9HHNjVkwxspezQr",
"D6pUrfgc5ZyXSfgtCBYozydRSz92pse1S7AZP58muEYk",
"DAHJgPKdmncYW8DmY6meaU953a7SktQ7eDGtWduC8W8m",
"DAS6zKbzVe5DFSwyEgo1TisuWt31HjBdZxzgBF2ASiju",
"DdgotKX6oyHpadPbAuiyAodHh3mk72SXBL4aeg4vLzZc",
"DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ",
"DE37cgN2bGR26a1yQPPY42CozC1wXwXnTXDyyURHRsm7",
"DeCcxJU8AqxPRZ27wrKYKRgmNHwCUK7MvW5XPGWh8WZF",
"Dfc73Czi5K3xa6yKMq98LHJx69PDsSzUvSdCSSqr8NDQ",
"DFKj3e6WeQmLxQkRBHq7nqcEXjwkcfJGCt22VySeDiKx",
"Dgwcrprgu1WeCNEVG2stkiABqWjfSFfBuXeMrR7qwJHp",
"DiFeTctQSaNczJNmZ5121kYqLaBe9wDpM9sjCzTELJLE",
"Diman2GphWLwECE3swjrAEAJniezpYLxK1edUydiDZau",
"DJHsoHQvqYjb8G2Ni6XSbBSHxmycMAsZksRDytQ2bntK",
"DkycekX1rxCUr9FtBHfacHEgCTfRdRLaKYTiz7fXHMQ7",
"DL6URBwusvYUFwEYZK4VEaoaynWSduUA9e4N7WDuZawf",
"DM52rUKVkCPDY3MFmSL76XAuhB5ZN7MuogAiQtEe1Uvn",
"DNWsTLvsMixgUcbM93437U8DWmJ9bZikQeBxQLHbeH5L",
"DohaxzeUj6ma9shCykxGxi7FbWnMyW9hzNjwQjZHEDV7",
"Dokia75SVtetShgapUBoVFfYjL99fQyr1twxKKyTZKa3",
"Dq3piY2ZcBvNN84j2EhDLtTzRAw95za7Eau89pNcmSd5",
"DqBvkYXi7HjdaKz78yakiDsaGuq1BKrQi3Z5JV6STctz",
"DSV16KKYW34qjxTBcTzURTyJYgw7qLq4VLKq3UavQKCk",
"DTF5DM36Jc4vkVPJXbSm7wRLe6eeX1UWo72WQhxKJpR2",
"DU4shVYTQz4Pf1RXR3ZMNp7Q9AqLK72sx6b96UxEpcC6",
"DViARWAWKkxAzp4UCgbw5B9pLSrBY3PaztFErcwgVUKX",
"E1SFkvPjU31xWMcvgnX6vhGvfRvb1zXvHFkqmQNEGZKK",
"E6cyDdEH8fiyCTusmWcZVhapAvvp2LK24zMLg4KrrAkt",
"E96CSRSUT1WUGKzDb2Dug3zWRgj6qKffNTUZUihjFc7g",
"E9hD3ikumJx1GVswDjnpCt6Uu4WG5mz1PDWCqdE5uhmo",
"ECd2JogmL8Qr4hox1zUNEn8aHFchE1tAh3JXM4UQBzzo",
"ECULsxDc7pqadpQSQxFaNWAsemLW3wQq1vDKXg5P7GjW",
"EEN4pf92jyVoASZ6pQQMHcKXTF4d5T3cY1a942QhRasc",
"EEP7VNrtLt3tjM8DvqUVEsmwZxLKwHgEFWDtoMx2PQZB",
"Eik59s9go24w5KoyXGoyKSK1rUNYN2MLTv1iW2DFWkS3",
"EJ38tGihX9LtQ3q2eYGxUvLm6pzx8w6rXG2Q8BNhbeiM",
"EjcGVYigv2PA6MeZxRmgFot1P7eGQThFMS2Yh3cj5r2n",
"EKyzkF3pdGDR3Wnhuf3ouMoofnJi3r2XKioUuGqwrqB1",
"EqxCV4fz2unNzt8ydGrVyz24ngkH5n13x2wDSJ8DY6qi",
"EssmJHKwUffWWDBK4mb84QRQjWJEnuDui41T8ufcoqeZ",
"EUVBn58XXTX9RBTm1R7Wd8n8JkvBMQfc9uSn5wPhbdBL",
"Ev8D9dwYdfebkdLgAjwiJtCkqS882Uvrit5qN6NTeHMy",
"EVkRg5yjkmBP5tAmSiM9zBcrfnFE82rv3fdVvTLR2sxL",
"EvnRmnMrd69kFdbLMxWkTn1icZ7DCceRhvmb2SJXqDo4",
"EwHxEEjLrrhzB2TUNMEJh8kNcDSnfBYvuZzDizBHF51J",
"EwUVzgSPe1zy2hfUGZxJAEP7Y1wheNgNsgratbzPELru",
"EXCMwETx5Txcvxt6YYqxFmhSpQKH5BVjdat3NE5eJJ6a",
"ExnG6VdMubFJfrB7qereo3YxUMyeZvTuZUVKfv7R1YZD",
"ExyEA6EegthLVNEjkj37FDGUnokPHqtEEe58ncBAww7u",
"EZUyrtZoBWMMoXTLzDo4RVTXi5XKYCs1kq7oESFLDvCP",
"F84kSGMUy81sHVF7HtZn8nPMRBau2ZC9uqGAZGJtHXYJ",
"f8e1k1Qz7zsv9gp1kVPnzBHCV8kDkfM8Rad3ZfHxFZN",
"Fa17nmHFt62kmerRQNGtgVWDxnuf7UD3PY2eeFfhpz2t",
"FAAvB4WnbSPNT35oUXDvkYj932KFRPi24dNBtrSpPvCY",
"FaGBP8LJrXE5h3cFRtPxz8x8695LQAYnvYn3NGtVtvrw",
"FAwRZwJgi7h81ZphYhLauZKvBHvkr3Dbhh6R8DsaD4Xv",
"FCQTtjeFK7Fj64aVDwdahfyxiG94uLnUjkmmXUq2esuf",
"FcWJ9zuq23C74KzeZtrSZMNXDnKFN9fQXwAyVDTZFpLr",
"FCWkGAHDWK41ANjiaoPudkCZRkvTecaEkoZQugezUnpr",
"Fd7btgySsrjuo25CJCj7oE7VPMyezDhnx7pZkj2v69Nk",
"FdmF6aFAy3pisaTwcQnvLf43z3pd1QnqrNzFMa2vKABA",
"FEpYb3oJbdPf77DPdQagkcmcJ4SqcfRDeCydKVkks4HK",
"FGiEdzde7Fco2WLpNQMat299hUVoykJdaA5hxdmCzHiS",
"fishfishrD9BwrQQiAcG6YeYZVUYVJf3tb9QGQPMJqF",
"FoigPJ6kL6Gth5Er6t9d1Nkh96Skadqw63Ciyjxc1f8H",
"forb5u56XgvzxiKfRt4FVNFQKJrd2LWAfNCsCqL6P7q",
"FtyZ7aQneFyJs6wKsMTt6m9JFjYEkHwZ2DhhtS6gw3Th",
"Fudp7uPDYNYQRxoq1Q4JiwJnzyxhVz37bGqRki3PBzS",
"FuSZq1Xkvkjkj16fGdhDtZb5ATtsZ8FH5a9KGsDhtZQP",
"FUyx2W6wDt7u363QgQRWQYuytE5uJWZLGJpuVh3RDiCa",
"FvBrooVoGfi56ux9L49HZLDHMt4iznYVDAMtyH2ee2LD",
"FVmo2S1GZBRJLS5GM2F5uYF6bcg4rM47dtkxHABKES1b",
"FVsjR8faKFZSisBatLNVo5bSH1jvHz3JvneVbyVTiV9K",
"FvTY9S6vutvfyP1eJG7RLiR1er6nk6cNTSP8q9WRpvWA",
"Fw3khiQmpKLSdNyoVR52WxbTWT9gjFSXFsZewknwLGb5",
"FxE69xVkPAUYh3Y2QCHJVWwVB8x1F3wbHnfKGoUvXn81",
"FZktwGYwu2JfbECgs4ELaXXJtz9oZyJEfiWa234is6By",
"G2ccHPYPic368N1b6dHJBnE3tBmiceHGZefjk2FZuVfT",
"G2TBEh2ahNGS9tGnuBNyDduNjyfUtGhMcssgRb8b6KfH",
"G4BaMbWsp9uX7EX7dtv848xqfxtmoi6d44cNk4irEtvZ",
"G4dd6rLMW5aQEKFcYFHw148T8afjyavMyabsc86zACmq",
"G5rEbYLuVsTiXP1hUWkeJp9PptSujTGa3mFRqrxXgMx4",
"GAvEvUKfVkGjMgFhQjC6WAtzzM6YbHGvfC4PoCtrrfVz",
"GbpomwGXpPBfYpiMz6KWXBa23usJM6Cf426NQGzAKV2W",
"GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ",
"Gdo6FaCtTQqGYsmDQrX2icSZeqDCdVizGzBDNbiqCGbJ",
"Ge8M91yymAsZmjCyeAVCpxGA9k62CR8cxf4AhP1c38UP",
"GfCNi6GaCfEKyvpS8fs1D6VUdQcPJg4VrULvFFEBLFW6",
"GhuhvJcGcS4USKAt4JQsiYyZvcAj1MNzBhRiqR87wNv8",
"GinTsSooU4QChoqXFvEWnMLsM6iheLmLFFv681s9CeqY",
"GKu2xfGZopa8C9K11wduQWgP4W4H7EEcaNdsUb7mxhyr",
"GLmkRrNqTSVi9ykTBQVX9M8cV9EwiCABknXVweS4QVxj",
"GmZ7xFQ4GHDbQw4CSnFT9pAHa15u35qTN8e259UrBh9D",
"GosJ8GHbSUunTQPY5xEyjhY2Eg5a9qSuPhNC4Ctztr7y",
"GRmtMtAeSL8HgX1p815ATQjaYU4Sk7XCP21i4yoFd3KS",
"GSXEn8TVBgiqfiLP5c8VmUrdAX8sWDZVE9M6p2kZFehG",
"GUFNuRw9JEAQwrJR71mRa2LbMRyrUfziYUzqY3KQwAXv",
"GUvRSvhhQRA1PhTpMaqW5hELHcPP9QP4W45tHFEbtqRi",
"GVjtyyy9HrgBmQ62TxjyUuaQDvuHNbnnrZgi9gqwpVYv",
"GWccEztr4AFmiMvvvXcib4feDZXCi6U13XrxponUHFWC",
"GwHH8ciFhR8vejWCqmg8FWZUCNtubPY2esALvy5tBvji",
"GwnQsVbbVsMhGqWV3gcVCF1364LRmftggyc5SmsYMLrY",
"Gyxhb2GGcB3s5h1HcHekRJYyAHRXEDP1FfX5fncE9edA",
"GYZdxwPwV3wTFVC8DdZuJQZDDRCg45AtqUai8cpSyAga",
"GZNnph4EvmyjjL5uzF9xNNTHyV46RzbkW4w4HYU8BQCW",
"H2oJUXwghyv6BwZH68jobU8jGutBji4v3WbPA96kc5Yd",
"H3WEH47bHUy9i9rmzdaxmoLagoUhbcJ2shPXcsp8QdJC",
"H7tpxumDErX9DXNTKKBFTDmynXBF32QQ8n3QjiQfLn2i",
"HahmUFR44BXFP7fVLsnd4pyaE7GoN1KKV1hdL2eVUpok",
"heoSbe83UDZA5LNR6F33QHVk9iZXpxPKcNRtVpDczXL",
"Hj2jzpAp57KyM3SmnYwJbDVrQ8tTWizMon2hhzYzwxet",
"Hmube7PkFmvattfygQgEyWe4GfZ9Dskj4Zc9YFfRj9JU",
"HoXANZnWTGeePertqWkMEnnhgXjTjzpfWaT2kja2ZgVU",
"Hp9NMFfFhDQJCkgUHLajoWqTjm1xxdvQEyUnoYn1GWFX",
"HPca8Y8aMnkrTi87MJFHDvMtJVkvyMxFvkuLsoFEoELy",
"HPdVgk7NhewBTKDCyqKYFXLu3pErnUo5W9KLSLd18JJv",
"HPN8ANgpeF5mcViFW6fTCXGWPdQ6LhRWvcx86JVEkFC2",
"HQATLHBJcU7DET6b6NWNMpfsZVZ1PRnbfkCtcFCT5reo",
"Hqc2qT3vXvBSKozmz7Rd7gLF6jUJHuQEGBTnCHzEBnqk",
"HS8DF8wP3A8qSHPnzbE6pSvXub4FeZQ6xMNSJuqArAHS",
"Hsb14Gkhjbp6FinDcPRwrThS8BYsTnrhGY8Ui4f3EY7V",
"HsGWhcLqVsue45i4vMxku3YEVGF5cYxUSTaa6gwieSGN",
"HtzxUabNfYNJR43FUmcpkgmtANZahbq5iASB5oiboXzF",
"HU8uKcH8b2GRD5T2LYKDzV82H7CiRLTQLwYTNxAD7b62",
"HVWzcQZ12pvZN726XXgA2jBfDuj6wDFwSFVdzxooi7qx",
"HW4zorvt6xDwhU36RqjcWNwU8YMj9tiqnAafBKW4cqV",
"HW7ntfUHapD5o7McDuPfGvkfzrPcmuPSbZMMoe2gksKQ",
"HwdfNWCqP2vXRvaHqQhoVUM2uPndaY8DDJzzBxCoPNHU",
"HzrEstnLfzsijhaD6z5frkSE2vWZEH5EUfn3bU9swo1f",
"HzvGtvXFzMeJwNYcUu5pw8yyRxF2tLEvDSSFsAEBcBK2",
"HZzEML2w5Qs6cccMd7o57Vx2sQUuuTaiecEcMU2xceSK",
"J2jV3gQsvX2htBXHeNStAVvMJaPe3RgNotwfav9pyS6y",
"J5MroSHMzcPUTXcnpXygeCFnXJ39NBG5RRQRYD29ZwNy",
"J711fLahrgkEnPaCrbGUCpGemmeVg2LsaZhmC6HNPbXk",
"J78SNwDW6G86sMmh7djnBKGjewXNpjD74sJTjJ1iNgTH",
"J7jT9mGjhfXdGFdp9XT97Sd8ynFMe5e2L8DqXJQr8qgs",
"JCmdhNCyzypryaQUGLGxbCM366dScTD4tVy5ooSWyaBZ",
"JCZNdppKyur55JqY5VBctkVjxEcgoQq3RmBuVbQxSCEq",
"JD6R8sK3cu7tphNMyhzxSGnSf9DcFUacXBgqycvGAjai",
"JdJWLp5edqUHQB7K9mA3L1gFbFcrS8aeaar6hQ6M2Su",
"JDScHrbfa4DEmqA4j5JTVc3wtwqXXeDCrjntRo2pWoLP",
"JokerEfTSznB2aTmowy4QPqjyajLMuYM6Jd4TDnKPNc",
"LunaowJnt875WWoqDkhHhE93SNYHa6tfFNVn1rqc57c",
"MCFmmmXdzTKjBEoMggi8JGFJmd856uYSowuH2sCU5kx",
"N55CrumveNQrXFn9oL4xkFDB9K8q5PxwrYaAomrxemz",
"narPxmKTwkUxvcXhueccHT8xbE8og2Vb7NrLBm8kcrh",
"nVmiYamBpwzEqxykaGBWvY9W4R7rmK1JudonPRhmkAw",
"Pidptxt5SEqgeK9HgqacrY2KEEnMqtUFSmAucLwnTPG",
"PR3GtaLUjL8rSGcAQtV9NYGTWhpys5kYRDUSCMF5N4j",
"pZ2nxiW1M78Ez9Tk5DcQyw1nWpmPFVZk8KDGKWbvvUV",
"RBFiUqjYuy4mupzZaU96ctXJBy23sRBRsL3KivDAsFM",
"rusx3KV69WGvsEbWa2HxjXp9GfHpjojM94BqsnfxKhx",
"SFundNVpuWk89g211WKUZGkuu4BsKSp7PbnmRsPZLos",
"siriXy5CcarNiz4XL8ssBQGiy2PwReVLny3Bcxq6Ymb",
"SoLiDDAQBEK1LK8apZcZqY7FunqeqDY6nYXMabQZvPB",
"spcti6GQVvinbtHU9UAkbXhjTcBJaba1NVx4tmK4M5F",
"uEhHSnCXvWgtgvVaYscPHjG13G3peMmngQQ2ghC54i3",
"WJguyKr593U9gDSdUu9Rcr1TMKo6pTse9LFZfCArR5K",
"wpd8e4dzFVnCQY3jPuB9J7pKYQd6Mstz5tjWzF97Kxb",
"XkCriyrNwS3G4rzAXtG5B1nnvb5Ka1JtCku93VqeKAr",
"Zb8KTJXwFvBTnbEDbyxWs8AVWjjipbXNFAfc1Jsbhey",
"zeroT6PTAEjipvZuACTh1mbGCqTHgA6i1ped9DcuidX",
]
);
| 55.517537 | 55 | 0.78513 |
482435b4406301d24b1576db8a249209e17e1b58 | 1,588 | //! This file has been automatically generated. Any changes made to it will be overwritten upon subsequent runs!
#[allow(unused_imports)]
use crate::Modifier;
#[allow(unused_imports)]
use nom::{
branch::alt,
bytes::complete::{tag, tag_no_case, take_until, take_while1},
character::complete::{alphanumeric1, one_of, space0, space1},
combinator::{into, map, not, opt, value},
multi::{many1, separated_list1},
sequence::{preceded, tuple},
IResult,
};
#[allow(unused_imports)]
use std::borrow::Cow;
/// Specifies whether to automatically destroy the user's ticket cache file on logout.
///
/// The default is **yes**.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum KerberosTicketCleanup {
#[doc = "yes"]
Yes,
#[doc = "no"]
No,
}
impl<'a> crate::ParseDirective<'a> for KerberosTicketCleanup {
type Output = KerberosTicketCleanup;
fn parse(input: &'a str) -> IResult<&'a str, Self::Output> {
preceded(
tag("KerberosTicketCleanup"),
preceded(
space1,
preceded(
space0,
alt((
value(KerberosTicketCleanup::Yes, tag_no_case("yes")),
value(KerberosTicketCleanup::No, tag_no_case("no")),
)),
),
),
)(input)
}
}
impl<'a> From<KerberosTicketCleanup> for crate::Directive<'a> {
fn from(directive: KerberosTicketCleanup) -> Self {
crate::directive::Directive::KerberosTicketCleanup(directive)
}
}
| 30.538462 | 112 | 0.599496 |
bb6167c7e1c55a314d7660947cad6ca8e8db6e70 | 651 | /*!
This module contains predefined types of series.
The series in Plotters is actually an iterator of elements, which
can be taken by `ChartContext::draw_series` function.
This module defines some "iterator transformer", which transform the data
iterator to the element iterator.
Any type that implements iterator emitting drawable elements are acceptable series.
So iterator combinator such as `map`, `zip`, etc can also be used.
*/
mod area_series;
mod histogram;
mod line_series;
mod point_series;
pub use area_series::AreaSeries;
pub use histogram::Histogram;
pub use line_series::LineSeries;
pub use point_series::PointSeries;
| 29.590909 | 85 | 0.781874 |
e50fbbceecd1ef92e602d388b52a3dc383444399 | 3,053 | #[doc = r"Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::PREMAC {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u32 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = r"Value of the field"]
pub struct SYSCTL_PREMAC_R0R {
bits: bool,
}
impl SYSCTL_PREMAC_R0R {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _SYSCTL_PREMAC_R0W<'a> {
w: &'a mut W,
}
impl<'a> _SYSCTL_PREMAC_R0W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 0);
self.w.bits |= ((value as u32) & 1) << 0;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Ethernet MAC Module 0 Peripheral Ready"]
#[inline(always)]
pub fn sysctl_premac_r0(&self) -> SYSCTL_PREMAC_R0R {
let bits = ((self.bits >> 0) & 1) != 0;
SYSCTL_PREMAC_R0R { bits }
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Ethernet MAC Module 0 Peripheral Ready"]
#[inline(always)]
pub fn sysctl_premac_r0(&mut self) -> _SYSCTL_PREMAC_R0W {
_SYSCTL_PREMAC_R0W { w: self }
}
}
| 25.441667 | 64 | 0.528333 |
162f4e6789b7780ff97f15b548055b50cd3e3256 | 1,415 | #![no_main]
#![no_std]
use cortex_m_rt::entry;
#[macro_use]
mod utilities;
use log::info;
use stm32h7xx_hal::{pac, prelude::*, serial::config::Config};
use core::fmt::Write;
#[entry]
fn main() -> ! {
utilities::logger::init();
let dp = pac::Peripherals::take().unwrap();
// Constrain and Freeze power
info!("Setup PWR... ");
let pwr = dp.PWR.constrain();
let pwrcfg = example_power!(pwr).freeze();
// Constrain and Freeze clock
info!("Setup RCC... ");
let rcc = dp.RCC.constrain();
let ccdr = rcc.sys_ck(160.mhz()).freeze(pwrcfg, &dp.SYSCFG);
// Acquire the GPIOC peripheral. This also enables the clock for
// GPIOC in the RCC register.
let gpioc = dp.GPIOC.split(ccdr.peripheral.GPIOC);
let tx = gpioc.pc10.into_alternate();
let rx = gpioc.pc11.into_alternate();
let clk = gpioc.pc12.into_alternate();
info!("");
info!("stm32h7xx-hal example - USART Advanced");
info!("");
// Configure the serial peripheral in synchronous mode
let config = Config::new(115_200.bps()).lastbitclockpulse(true);
let serial = dp
.USART3
.serial((tx, rx, clk), config, ccdr.peripheral.USART3, &ccdr.clocks)
.unwrap();
let (mut tx, _rx) = serial.split();
loop {
// core::fmt::Write is implemented for tx.
writeln!(tx, "Hello, world!").unwrap();
}
}
| 26.203704 | 76 | 0.601413 |
6929e3b5bf4231d495ecdb57c89b84952d50f9bb | 2,907 | enum Op {
Add,
Mul,
}
impl Op {
fn apply(&self, left: u64, right: u64) -> u64 {
match self {
Op::Add => left + right,
Op::Mul => left * right,
}
}
}
impl From<char> for Op {
fn from(c: char) -> Self {
match c {
'+' => Op::Add,
'*' => Op::Mul,
_ => unreachable!(),
}
}
}
fn inner_eval<I: Iterator<Item = char>>(iter: &mut I) -> u64 {
let mut acc = 0;
let mut op = Op::Add;
while let Some(c) = iter.next() {
match c {
('0'..='9') => {
let right = (c as u64) - ('0' as u64); // from WHATEWZ encoding to numeric value
acc = op.apply(acc, right);
}
'*' | '+' => {
op = c.into();
}
// This was a sub expression
')' => return acc,
// Start a sub process
'(' => {
let right = inner_eval(iter);
// take the result as value to apply
acc = op.apply(acc, right);
}
_ => {
// ignore every other character
}
}
}
acc
}
fn eval(s: &str) -> u64 {
let mut it = s.chars();
inner_eval(&mut it)
// TODO: add a check if "it" is ended
}
pub fn part1(input: &str) -> u64 {
input.lines().map(eval).sum()
// 701339185745
}
fn math_with_priority(input: &str) -> u64 {
match input.find(')') {
Some(i) => {
let j = input[..i].rfind('(').unwrap();
let subresult = math_with_priority(&input[(j + 1)..i]);
let new_string = format!("{}{}{}", &input[..j], subresult, &input[(i + 1)..]);
math_with_priority(&new_string)
}
_ => input
.split('*')
.map(|expr| {
expr.split('+')
.map(|x| x.parse::<u64>().unwrap())
.sum::<u64>()
})
.product(),
}
}
pub fn part2(input: &str) -> u64 {
input.replace(" ", "").lines().map(math_with_priority).sum()
// 4208490449905
}
#[cfg(test)]
mod ex18_tests {
use super::*;
#[test]
fn test_eval() {
assert_eq!(eval("2 + 3 * 9"), (2 + 3) * 9);
assert_eq!(eval("2 + 3 * 9 + 4"), (2 + 3) * 9 + 4);
assert_eq!(eval("2 + (3 * 9) + 4"), 2 + 3 * 9 + 4);
assert_eq!(eval("2 + (3 * (9 + 2)) + 4"), 2 + 3 * (9 + 2) + 4);
}
#[test]
fn test_math_with_priority() {
assert_eq!(math_with_priority("2+3*9"), 45);
assert_eq!(math_with_priority("9*2+3"), 45);
}
#[test]
fn part_1() {
let input = include_str!("../input.txt");
assert_eq!(part1(input), 701339185745);
}
#[test]
fn part_2() {
let input = include_str!("../input.txt");
assert_eq!(part2(input), 4208490449905);
}
}
| 24.225 | 96 | 0.429309 |
2142776e65c2faf5f1d9069516bea7abb1b5a01c | 4,117 | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
impl Foo {
pub const Bar: Foo = Foo(2);
}
impl Foo {
pub const Baz: Foo = Foo(4);
}
impl Foo {
pub const Duplicated: Foo = Foo(4);
}
impl Foo {
pub const Negative: Foo = Foo(-3);
}
impl ::std::ops::BitOr<Foo> for Foo {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
Foo(self.0 | other.0)
}
}
impl ::std::ops::BitOrAssign for Foo {
#[inline]
fn bitor_assign(&mut self, rhs: Foo) {
self.0 |= rhs.0;
}
}
impl ::std::ops::BitAnd<Foo> for Foo {
type Output = Self;
#[inline]
fn bitand(self, other: Self) -> Self {
Foo(self.0 & other.0)
}
}
impl ::std::ops::BitAndAssign for Foo {
#[inline]
fn bitand_assign(&mut self, rhs: Foo) {
self.0 &= rhs.0;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Foo(pub i32);
impl Buz {
pub const Bar: Buz = Buz(2);
}
impl Buz {
pub const Baz: Buz = Buz(4);
}
impl Buz {
pub const Duplicated: Buz = Buz(4);
}
impl Buz {
pub const Negative: Buz = Buz(-3);
}
impl ::std::ops::BitOr<Buz> for Buz {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
Buz(self.0 | other.0)
}
}
impl ::std::ops::BitOrAssign for Buz {
#[inline]
fn bitor_assign(&mut self, rhs: Buz) {
self.0 |= rhs.0;
}
}
impl ::std::ops::BitAnd<Buz> for Buz {
type Output = Self;
#[inline]
fn bitand(self, other: Self) -> Self {
Buz(self.0 & other.0)
}
}
impl ::std::ops::BitAndAssign for Buz {
#[inline]
fn bitand_assign(&mut self, rhs: Buz) {
self.0 &= rhs.0;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Buz(pub i8);
pub const NS_FOO: _bindgen_ty_1 = _bindgen_ty_1(1);
pub const NS_BAR: _bindgen_ty_1 = _bindgen_ty_1(2);
impl ::std::ops::BitOr<_bindgen_ty_1> for _bindgen_ty_1 {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
_bindgen_ty_1(self.0 | other.0)
}
}
impl ::std::ops::BitOrAssign for _bindgen_ty_1 {
#[inline]
fn bitor_assign(&mut self, rhs: _bindgen_ty_1) {
self.0 |= rhs.0;
}
}
impl ::std::ops::BitAnd<_bindgen_ty_1> for _bindgen_ty_1 {
type Output = Self;
#[inline]
fn bitand(self, other: Self) -> Self {
_bindgen_ty_1(self.0 & other.0)
}
}
impl ::std::ops::BitAndAssign for _bindgen_ty_1 {
#[inline]
fn bitand_assign(&mut self, rhs: _bindgen_ty_1) {
self.0 &= rhs.0;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct _bindgen_ty_1(pub u32);
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Dummy {
pub _address: u8,
}
pub const Dummy_DUMMY_FOO: Dummy__bindgen_ty_1 = Dummy__bindgen_ty_1(1);
pub const Dummy_DUMMY_BAR: Dummy__bindgen_ty_1 = Dummy__bindgen_ty_1(2);
impl ::std::ops::BitOr<Dummy__bindgen_ty_1> for Dummy__bindgen_ty_1 {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
Dummy__bindgen_ty_1(self.0 | other.0)
}
}
impl ::std::ops::BitOrAssign for Dummy__bindgen_ty_1 {
#[inline]
fn bitor_assign(&mut self, rhs: Dummy__bindgen_ty_1) {
self.0 |= rhs.0;
}
}
impl ::std::ops::BitAnd<Dummy__bindgen_ty_1> for Dummy__bindgen_ty_1 {
type Output = Self;
#[inline]
fn bitand(self, other: Self) -> Self {
Dummy__bindgen_ty_1(self.0 & other.0)
}
}
impl ::std::ops::BitAndAssign for Dummy__bindgen_ty_1 {
#[inline]
fn bitand_assign(&mut self, rhs: Dummy__bindgen_ty_1) {
self.0 &= rhs.0;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct Dummy__bindgen_ty_1(pub u32);
#[test]
fn bindgen_test_layout_Dummy() {
assert_eq!(
::std::mem::size_of::<Dummy>(),
1usize,
concat!("Size of: ", stringify!(Dummy))
);
assert_eq!(
::std::mem::align_of::<Dummy>(),
1usize,
concat!("Alignment of ", stringify!(Dummy))
);
}
| 23.936047 | 72 | 0.604809 |
ff14abc07ae80d084b0ef7f081782869036d4e3e | 3,293 | use std::fs::File;
use std::io::{self, BufReader};
use dialoguer::theme::ColorfulTheme;
use dialoguer::Select;
use log::{debug, info};
use nmk::gcs::{download_file, ObjectMeta};
use nmk::path::NmkPath;
use nmk::vendor::{extract_vendor_files, prepare_vendor_dir};
use crate::build::Target;
use crate::cmdline::CmdOpt;
use crate::os_release::OsReleaseId;
const LIST_OBJECTS_URL: &str =
"https://storage.googleapis.com/storage/v1/b/nmk.nuimk.com/o?delimiter=/&prefix=nmk-vendor/";
const TAG: &str = "vendor";
pub fn install(cmd_opt: &CmdOpt, nmk_path: &NmkPath) -> nmk::Result<()> {
let mut objects: Vec<_> = nmk::gcs::list_objects(LIST_OBJECTS_URL)?;
objects.retain(|obj| obj.name.ends_with(".tar.xz"));
if !cmd_opt.no_filter {
objects.retain(filter_by_os_release());
objects.retain(filter_by_arch());
}
let obj_meta = select_vendor_files(&objects)?;
let download_url = obj_meta.media_link.as_str();
info!("{}: Download url {}", TAG, download_url);
debug!("{}: Getting data.", TAG);
let tar_xz_data = BufReader::new(download_file(download_url)?);
debug!("{}: Received data.", TAG);
let vendor_dir = nmk_path.vendor();
prepare_vendor_dir(&vendor_dir)?;
debug!("{}: Extracting data.", TAG);
extract_vendor_files(tar_xz_data, &vendor_dir)?;
info!("{}: Done.", TAG);
Ok(())
}
fn filter_by_os_release() -> impl FnMut(&ObjectMeta) -> bool {
use crate::os_release::OsReleaseId::*;
let pattern = OsReleaseId::parse_os_release().map(|id| match id {
Amazon => "amazon",
CentOs => "centos",
Debian => "debian",
Ubuntu => "ubuntu",
});
move |item: &ObjectMeta| {
// Try to filter by os-release data, if we can't determine os-release, don't filter at all.
pattern.map_or(true, |pat| item.name.contains(pat))
}
}
fn filter_by_arch() -> impl FnMut(&ObjectMeta) -> bool {
let target = Target::detect().expect("unsupported target");
const ARM64_TAG: &str = "arm64";
move |item| {
let found_tag = item.name.to_lowercase().contains(ARM64_TAG);
match target {
Target::Amd64Linux => !found_tag,
Target::Arm64Linux => found_tag,
_ => panic!("unsupported arch"),
}
}
}
fn get_display_name(objects: &[ObjectMeta]) -> Vec<&str> {
objects
.iter()
.flat_map(|obj| obj.name.split('/').last())
.collect()
}
fn select_vendor_files(objects: &[ObjectMeta]) -> nmk::Result<&ObjectMeta> {
assert!(!objects.is_empty(), "Not found any vendor data to select");
let display_names = get_display_name(objects);
display_some_os_info()?;
let index = Select::with_theme(&ColorfulTheme::default())
.with_prompt("Pick vendor files to use?")
.default(0)
.items(&display_names)
.interact()?;
Ok(&objects[index])
}
/// Show OS information to help select correct vendor files
///
/// On CentOS, /etc/os-release doesn't show CentOS minor version
fn display_some_os_info() -> io::Result<()> {
info!("Displaying os information..");
let infos = ["/etc/centos-release", "/etc/os-release"].iter();
if let Some(mut f) = infos.flat_map(File::open).next() {
io::copy(&mut f, &mut io::stdout())?;
}
Ok(())
}
| 33.602041 | 99 | 0.633161 |
22f4859891d4a971b8bbcd8f0d314bd9089246b0 | 4,700 | #[doc = "Register `ENABLE` reader"]
pub struct R(crate::R<ENABLE_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<ENABLE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<ENABLE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<ENABLE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `ENABLE` writer"]
pub struct W(crate::W<ENABLE_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<ENABLE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<ENABLE_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<ENABLE_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Enable or disable QSPI\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ENABLE_A {
#[doc = "0: Disable QSPI"]
DISABLED = 0,
#[doc = "1: Enable QSPI"]
ENABLED = 1,
}
impl From<ENABLE_A> for bool {
#[inline(always)]
fn from(variant: ENABLE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `ENABLE` reader - Enable or disable QSPI"]
pub struct ENABLE_R(crate::FieldReader<bool, ENABLE_A>);
impl ENABLE_R {
pub(crate) fn new(bits: bool) -> Self {
ENABLE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ENABLE_A {
match self.bits {
false => ENABLE_A::DISABLED,
true => ENABLE_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == ENABLE_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == ENABLE_A::ENABLED
}
}
impl core::ops::Deref for ENABLE_R {
type Target = crate::FieldReader<bool, ENABLE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ENABLE` writer - Enable or disable QSPI"]
pub struct ENABLE_W<'a> {
w: &'a mut W,
}
impl<'a> ENABLE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ENABLE_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Disable QSPI"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(ENABLE_A::DISABLED)
}
#[doc = "Enable QSPI"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(ENABLE_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Enable or disable QSPI"]
#[inline(always)]
pub fn enable(&self) -> ENABLE_R {
ENABLE_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Enable or disable QSPI"]
#[inline(always)]
pub fn enable(&mut self) -> ENABLE_W {
ENABLE_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Enable QSPI peripheral and acquire the pins selected in PSELn registers\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [enable](index.html) module"]
pub struct ENABLE_SPEC;
impl crate::RegisterSpec for ENABLE_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [enable::R](R) reader structure"]
impl crate::Readable for ENABLE_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [enable::W](W) writer structure"]
impl crate::Writable for ENABLE_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets ENABLE to value 0"]
impl crate::Resettable for ENABLE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.375 | 458 | 0.584894 |
e5aae214429d9c0e398cd9794d75268cbba1b201 | 2,776 | // SPDX-License-Identifier: Apache-2.0
//! Everything needed for working with AMD SEV certificate chains.
pub mod builtin;
pub mod ca;
mod chain;
pub mod sev;
#[cfg(feature = "openssl")]
mod util;
#[cfg(feature = "openssl")]
mod crypto;
use std::convert::*;
use std::io::{Error, ErrorKind, Read, Result, Write};
pub use chain::Chain;
use crate::util::*;
#[cfg(feature = "openssl")]
use util::*;
#[cfg(feature = "openssl")]
use openssl::*;
#[cfg(feature = "openssl")]
struct Body;
#[cfg(feature = "openssl")]
/// An interface for types that may contain entities such as
/// signatures that must be verified.
pub trait Verifiable {
/// An output type for successful verification.
type Output;
/// Self-verifies signatures.
fn verify(self) -> Result<Self::Output>;
}
#[cfg(feature = "openssl")]
/// An interface for types that can sign another type (i.e., a certificate).
pub trait Signer<T> {
/// The now-signed type.
type Output;
/// Signs the target.
fn sign(&self, target: &mut T) -> Result<Self::Output>;
}
#[cfg(feature = "openssl")]
struct Signature {
id: Option<[u8; 16]>,
sig: Vec<u8>,
kind: pkey::Id,
hash: hash::MessageDigest,
usage: Usage,
}
#[cfg(feature = "openssl")]
/// Represents a private key.
pub struct PrivateKey<U> {
id: Option<[u8; 16]>,
key: pkey::PKey<pkey::Private>,
hash: hash::MessageDigest,
usage: U,
}
#[cfg(feature = "openssl")]
struct PublicKey<U> {
id: Option<[u8; 16]>,
key: pkey::PKey<pkey::Public>,
hash: hash::MessageDigest,
usage: U,
}
/// Denotes a certificate's usage.
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Usage(u32);
impl Usage {
/// Owner Certificate Authority.
pub const OCA: Usage = Usage(0x1001u32.to_le());
/// AMD Root Key.
pub const ARK: Usage = Usage(0x0000u32.to_le());
/// AMD Signing Key.
pub const ASK: Usage = Usage(0x0013u32.to_le());
/// Chip Endorsement Key.
pub const CEK: Usage = Usage(0x1004u32.to_le());
/// Platform Endorsement Key.
pub const PEK: Usage = Usage(0x1002u32.to_le());
/// Platform Diffie-Hellman.
pub const PDH: Usage = Usage(0x1003u32.to_le());
const INV: Usage = Usage(0x1000u32.to_le());
}
impl std::fmt::Display for Usage {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"{}",
match *self {
Usage::OCA => "OCA",
Usage::PEK => "PEK",
Usage::PDH => "PDH",
Usage::CEK => "CEK",
Usage::ARK => "ARK",
Usage::ASK => "ASK",
Usage::INV => "INV",
_ => return Err(std::fmt::Error),
}
)
}
}
| 22.754098 | 76 | 0.582493 |
4bce961472337a540dc83fe13177bc23205e9073 | 108,832 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn action_rules(&self) -> action_rules::Client {
action_rules::Client(self.clone())
}
pub fn alerts(&self) -> alerts::Client {
alerts::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn smart_groups(&self) -> smart_groups::Client {
smart_groups::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
ActionRules_ListBySubscription(#[from] action_rules::list_by_subscription::Error),
#[error(transparent)]
ActionRules_ListByResourceGroup(#[from] action_rules::list_by_resource_group::Error),
#[error(transparent)]
ActionRules_GetByName(#[from] action_rules::get_by_name::Error),
#[error(transparent)]
ActionRules_CreateUpdate(#[from] action_rules::create_update::Error),
#[error(transparent)]
ActionRules_Update(#[from] action_rules::update::Error),
#[error(transparent)]
ActionRules_Delete(#[from] action_rules::delete::Error),
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
Alerts_MetaData(#[from] alerts::meta_data::Error),
#[error(transparent)]
Alerts_GetAll(#[from] alerts::get_all::Error),
#[error(transparent)]
Alerts_GetById(#[from] alerts::get_by_id::Error),
#[error(transparent)]
Alerts_ChangeState(#[from] alerts::change_state::Error),
#[error(transparent)]
Alerts_GetHistory(#[from] alerts::get_history::Error),
#[error(transparent)]
Alerts_GetSummary(#[from] alerts::get_summary::Error),
#[error(transparent)]
SmartGroups_GetAll(#[from] smart_groups::get_all::Error),
#[error(transparent)]
SmartGroups_GetById(#[from] smart_groups::get_by_id::Error),
#[error(transparent)]
SmartGroups_ChangeState(#[from] smart_groups::change_state::Error),
#[error(transparent)]
SmartGroups_GetHistory(#[from] smart_groups::get_history::Error),
}
pub mod action_rules {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Get all action rule in a given subscription"]
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
target_resource_group: None,
target_resource_type: None,
target_resource: None,
severity: None,
monitor_service: None,
impacted_scope: None,
description: None,
alert_rule_id: None,
action_group: None,
name: None,
}
}
#[doc = "Get all action rules created in a resource group"]
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
target_resource_group: None,
target_resource_type: None,
target_resource: None,
severity: None,
monitor_service: None,
impacted_scope: None,
description: None,
alert_rule_id: None,
action_group: None,
name: None,
}
}
#[doc = "Get action rule by name"]
pub fn get_by_name(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
action_rule_name: impl Into<String>,
) -> get_by_name::Builder {
get_by_name::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
action_rule_name: action_rule_name.into(),
}
}
#[doc = "Create/update an action rule"]
pub fn create_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
action_rule_name: impl Into<String>,
action_rule: impl Into<models::ActionRule>,
) -> create_update::Builder {
create_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
action_rule_name: action_rule_name.into(),
action_rule: action_rule.into(),
}
}
#[doc = "Patch action rule"]
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
action_rule_name: impl Into<String>,
action_rule_patch: impl Into<models::PatchObject>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
action_rule_name: action_rule_name.into(),
action_rule_patch: action_rule_patch.into(),
}
}
#[doc = "Delete action rule"]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
action_rule_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
action_rule_name: action_rule_name.into(),
}
}
}
pub mod list_by_subscription {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) target_resource_group: Option<String>,
pub(crate) target_resource_type: Option<String>,
pub(crate) target_resource: Option<String>,
pub(crate) severity: Option<String>,
pub(crate) monitor_service: Option<String>,
pub(crate) impacted_scope: Option<String>,
pub(crate) description: Option<String>,
pub(crate) alert_rule_id: Option<String>,
pub(crate) action_group: Option<String>,
pub(crate) name: Option<String>,
}
impl Builder {
pub fn target_resource_group(mut self, target_resource_group: impl Into<String>) -> Self {
self.target_resource_group = Some(target_resource_group.into());
self
}
pub fn target_resource_type(mut self, target_resource_type: impl Into<String>) -> Self {
self.target_resource_type = Some(target_resource_type.into());
self
}
pub fn target_resource(mut self, target_resource: impl Into<String>) -> Self {
self.target_resource = Some(target_resource.into());
self
}
pub fn severity(mut self, severity: impl Into<String>) -> Self {
self.severity = Some(severity.into());
self
}
pub fn monitor_service(mut self, monitor_service: impl Into<String>) -> Self {
self.monitor_service = Some(monitor_service.into());
self
}
pub fn impacted_scope(mut self, impacted_scope: impl Into<String>) -> Self {
self.impacted_scope = Some(impacted_scope.into());
self
}
pub fn description(mut self, description: impl Into<String>) -> Self {
self.description = Some(description.into());
self
}
pub fn alert_rule_id(mut self, alert_rule_id: impl Into<String>) -> Self {
self.alert_rule_id = Some(alert_rule_id.into());
self
}
pub fn action_group(mut self, action_group: impl Into<String>) -> Self {
self.action_group = Some(action_group.into());
self
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ActionRulesList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/actionRules",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(target_resource_group) = &self.target_resource_group {
url.query_pairs_mut().append_pair("targetResourceGroup", target_resource_group);
}
if let Some(target_resource_type) = &self.target_resource_type {
url.query_pairs_mut().append_pair("targetResourceType", target_resource_type);
}
if let Some(target_resource) = &self.target_resource {
url.query_pairs_mut().append_pair("targetResource", target_resource);
}
if let Some(severity) = &self.severity {
url.query_pairs_mut().append_pair("severity", severity);
}
if let Some(monitor_service) = &self.monitor_service {
url.query_pairs_mut().append_pair("monitorService", monitor_service);
}
if let Some(impacted_scope) = &self.impacted_scope {
url.query_pairs_mut().append_pair("impactedScope", impacted_scope);
}
if let Some(description) = &self.description {
url.query_pairs_mut().append_pair("description", description);
}
if let Some(alert_rule_id) = &self.alert_rule_id {
url.query_pairs_mut().append_pair("alertRuleId", alert_rule_id);
}
if let Some(action_group) = &self.action_group {
url.query_pairs_mut().append_pair("actionGroup", action_group);
}
if let Some(name) = &self.name {
url.query_pairs_mut().append_pair("name", name);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ActionRulesList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) target_resource_group: Option<String>,
pub(crate) target_resource_type: Option<String>,
pub(crate) target_resource: Option<String>,
pub(crate) severity: Option<String>,
pub(crate) monitor_service: Option<String>,
pub(crate) impacted_scope: Option<String>,
pub(crate) description: Option<String>,
pub(crate) alert_rule_id: Option<String>,
pub(crate) action_group: Option<String>,
pub(crate) name: Option<String>,
}
impl Builder {
pub fn target_resource_group(mut self, target_resource_group: impl Into<String>) -> Self {
self.target_resource_group = Some(target_resource_group.into());
self
}
pub fn target_resource_type(mut self, target_resource_type: impl Into<String>) -> Self {
self.target_resource_type = Some(target_resource_type.into());
self
}
pub fn target_resource(mut self, target_resource: impl Into<String>) -> Self {
self.target_resource = Some(target_resource.into());
self
}
pub fn severity(mut self, severity: impl Into<String>) -> Self {
self.severity = Some(severity.into());
self
}
pub fn monitor_service(mut self, monitor_service: impl Into<String>) -> Self {
self.monitor_service = Some(monitor_service.into());
self
}
pub fn impacted_scope(mut self, impacted_scope: impl Into<String>) -> Self {
self.impacted_scope = Some(impacted_scope.into());
self
}
pub fn description(mut self, description: impl Into<String>) -> Self {
self.description = Some(description.into());
self
}
pub fn alert_rule_id(mut self, alert_rule_id: impl Into<String>) -> Self {
self.alert_rule_id = Some(alert_rule_id.into());
self
}
pub fn action_group(mut self, action_group: impl Into<String>) -> Self {
self.action_group = Some(action_group.into());
self
}
pub fn name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ActionRulesList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AlertsManagement/actionRules",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(target_resource_group) = &self.target_resource_group {
url.query_pairs_mut().append_pair("targetResourceGroup", target_resource_group);
}
if let Some(target_resource_type) = &self.target_resource_type {
url.query_pairs_mut().append_pair("targetResourceType", target_resource_type);
}
if let Some(target_resource) = &self.target_resource {
url.query_pairs_mut().append_pair("targetResource", target_resource);
}
if let Some(severity) = &self.severity {
url.query_pairs_mut().append_pair("severity", severity);
}
if let Some(monitor_service) = &self.monitor_service {
url.query_pairs_mut().append_pair("monitorService", monitor_service);
}
if let Some(impacted_scope) = &self.impacted_scope {
url.query_pairs_mut().append_pair("impactedScope", impacted_scope);
}
if let Some(description) = &self.description {
url.query_pairs_mut().append_pair("description", description);
}
if let Some(alert_rule_id) = &self.alert_rule_id {
url.query_pairs_mut().append_pair("alertRuleId", alert_rule_id);
}
if let Some(action_group) = &self.action_group {
url.query_pairs_mut().append_pair("actionGroup", action_group);
}
if let Some(name) = &self.name {
url.query_pairs_mut().append_pair("name", name);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ActionRulesList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_by_name {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) action_rule_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ActionRule, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AlertsManagement/actionRules/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.action_rule_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ActionRule =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) action_rule_name: String,
pub(crate) action_rule: models::ActionRule,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ActionRule, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AlertsManagement/actionRules/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.action_rule_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.action_rule).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ActionRule =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) action_rule_name: String,
pub(crate) action_rule_patch: models::PatchObject,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ActionRule, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AlertsManagement/actionRules/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.action_rule_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.action_rule_patch).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ActionRule =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) action_rule_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<bool, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.AlertsManagement/actionRules/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.action_rule_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: bool =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod operations {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationsList, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.AlertsManagement/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationsList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod alerts {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn meta_data(&self, identifier: impl Into<String>) -> meta_data::Builder {
meta_data::Builder {
client: self.0.clone(),
identifier: identifier.into(),
}
}
pub fn get_all(&self, subscription_id: impl Into<String>) -> get_all::Builder {
get_all::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
target_resource: None,
target_resource_type: None,
target_resource_group: None,
monitor_service: None,
monitor_condition: None,
severity: None,
alert_state: None,
alert_rule: None,
smart_group_id: None,
include_context: None,
include_egress_config: None,
page_count: None,
sort_by: None,
sort_order: None,
select: None,
time_range: None,
custom_time_range: None,
}
}
#[doc = "Get a specific alert."]
pub fn get_by_id(&self, subscription_id: impl Into<String>, alert_id: impl Into<String>) -> get_by_id::Builder {
get_by_id::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
alert_id: alert_id.into(),
}
}
pub fn change_state(
&self,
subscription_id: impl Into<String>,
alert_id: impl Into<String>,
new_state: impl Into<String>,
) -> change_state::Builder {
change_state::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
alert_id: alert_id.into(),
new_state: new_state.into(),
}
}
pub fn get_history(&self, subscription_id: impl Into<String>, alert_id: impl Into<String>) -> get_history::Builder {
get_history::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
alert_id: alert_id.into(),
}
}
pub fn get_summary(&self, subscription_id: impl Into<String>, groupby: impl Into<String>) -> get_summary::Builder {
get_summary::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
groupby: groupby.into(),
include_smart_groups_count: None,
target_resource: None,
target_resource_type: None,
target_resource_group: None,
monitor_service: None,
monitor_condition: None,
severity: None,
alert_state: None,
alert_rule: None,
time_range: None,
custom_time_range: None,
}
}
}
pub mod meta_data {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) identifier: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AlertsMetaData, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.AlertsManagement/alertsMetaData", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let identifier = &self.identifier;
url.query_pairs_mut().append_pair("identifier", identifier);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AlertsMetaData =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) target_resource: Option<String>,
pub(crate) target_resource_type: Option<String>,
pub(crate) target_resource_group: Option<String>,
pub(crate) monitor_service: Option<String>,
pub(crate) monitor_condition: Option<String>,
pub(crate) severity: Option<String>,
pub(crate) alert_state: Option<String>,
pub(crate) alert_rule: Option<String>,
pub(crate) smart_group_id: Option<String>,
pub(crate) include_context: Option<bool>,
pub(crate) include_egress_config: Option<bool>,
pub(crate) page_count: Option<i64>,
pub(crate) sort_by: Option<String>,
pub(crate) sort_order: Option<String>,
pub(crate) select: Option<String>,
pub(crate) time_range: Option<String>,
pub(crate) custom_time_range: Option<String>,
}
impl Builder {
pub fn target_resource(mut self, target_resource: impl Into<String>) -> Self {
self.target_resource = Some(target_resource.into());
self
}
pub fn target_resource_type(mut self, target_resource_type: impl Into<String>) -> Self {
self.target_resource_type = Some(target_resource_type.into());
self
}
pub fn target_resource_group(mut self, target_resource_group: impl Into<String>) -> Self {
self.target_resource_group = Some(target_resource_group.into());
self
}
pub fn monitor_service(mut self, monitor_service: impl Into<String>) -> Self {
self.monitor_service = Some(monitor_service.into());
self
}
pub fn monitor_condition(mut self, monitor_condition: impl Into<String>) -> Self {
self.monitor_condition = Some(monitor_condition.into());
self
}
pub fn severity(mut self, severity: impl Into<String>) -> Self {
self.severity = Some(severity.into());
self
}
pub fn alert_state(mut self, alert_state: impl Into<String>) -> Self {
self.alert_state = Some(alert_state.into());
self
}
pub fn alert_rule(mut self, alert_rule: impl Into<String>) -> Self {
self.alert_rule = Some(alert_rule.into());
self
}
pub fn smart_group_id(mut self, smart_group_id: impl Into<String>) -> Self {
self.smart_group_id = Some(smart_group_id.into());
self
}
pub fn include_context(mut self, include_context: bool) -> Self {
self.include_context = Some(include_context);
self
}
pub fn include_egress_config(mut self, include_egress_config: bool) -> Self {
self.include_egress_config = Some(include_egress_config);
self
}
pub fn page_count(mut self, page_count: i64) -> Self {
self.page_count = Some(page_count);
self
}
pub fn sort_by(mut self, sort_by: impl Into<String>) -> Self {
self.sort_by = Some(sort_by.into());
self
}
pub fn sort_order(mut self, sort_order: impl Into<String>) -> Self {
self.sort_order = Some(sort_order.into());
self
}
pub fn select(mut self, select: impl Into<String>) -> Self {
self.select = Some(select.into());
self
}
pub fn time_range(mut self, time_range: impl Into<String>) -> Self {
self.time_range = Some(time_range.into());
self
}
pub fn custom_time_range(mut self, custom_time_range: impl Into<String>) -> Self {
self.custom_time_range = Some(custom_time_range.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AlertsList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/alerts",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(target_resource) = &self.target_resource {
url.query_pairs_mut().append_pair("targetResource", target_resource);
}
if let Some(target_resource_type) = &self.target_resource_type {
url.query_pairs_mut().append_pair("targetResourceType", target_resource_type);
}
if let Some(target_resource_group) = &self.target_resource_group {
url.query_pairs_mut().append_pair("targetResourceGroup", target_resource_group);
}
if let Some(monitor_service) = &self.monitor_service {
url.query_pairs_mut().append_pair("monitorService", monitor_service);
}
if let Some(monitor_condition) = &self.monitor_condition {
url.query_pairs_mut().append_pair("monitorCondition", monitor_condition);
}
if let Some(severity) = &self.severity {
url.query_pairs_mut().append_pair("severity", severity);
}
if let Some(alert_state) = &self.alert_state {
url.query_pairs_mut().append_pair("alertState", alert_state);
}
if let Some(alert_rule) = &self.alert_rule {
url.query_pairs_mut().append_pair("alertRule", alert_rule);
}
if let Some(smart_group_id) = &self.smart_group_id {
url.query_pairs_mut().append_pair("smartGroupId", smart_group_id);
}
if let Some(include_context) = &self.include_context {
url.query_pairs_mut().append_pair("includeContext", &include_context.to_string());
}
if let Some(include_egress_config) = &self.include_egress_config {
url.query_pairs_mut()
.append_pair("includeEgressConfig", &include_egress_config.to_string());
}
if let Some(page_count) = &self.page_count {
url.query_pairs_mut().append_pair("pageCount", &page_count.to_string());
}
if let Some(sort_by) = &self.sort_by {
url.query_pairs_mut().append_pair("sortBy", sort_by);
}
if let Some(sort_order) = &self.sort_order {
url.query_pairs_mut().append_pair("sortOrder", sort_order);
}
if let Some(select) = &self.select {
url.query_pairs_mut().append_pair("select", select);
}
if let Some(time_range) = &self.time_range {
url.query_pairs_mut().append_pair("timeRange", time_range);
}
if let Some(custom_time_range) = &self.custom_time_range {
url.query_pairs_mut().append_pair("customTimeRange", custom_time_range);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AlertsList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_by_id {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) alert_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Alert, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/alerts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.alert_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Alert =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod change_state {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) alert_id: String,
pub(crate) new_state: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Alert, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/alerts/{}/changestate",
self.client.endpoint(),
&self.subscription_id,
&self.alert_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let new_state = &self.new_state;
url.query_pairs_mut().append_pair("newState", new_state);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Alert =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_history {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) alert_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AlertModification, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/alerts/{}/history",
self.client.endpoint(),
&self.subscription_id,
&self.alert_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AlertModification =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_summary {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) groupby: String,
pub(crate) include_smart_groups_count: Option<bool>,
pub(crate) target_resource: Option<String>,
pub(crate) target_resource_type: Option<String>,
pub(crate) target_resource_group: Option<String>,
pub(crate) monitor_service: Option<String>,
pub(crate) monitor_condition: Option<String>,
pub(crate) severity: Option<String>,
pub(crate) alert_state: Option<String>,
pub(crate) alert_rule: Option<String>,
pub(crate) time_range: Option<String>,
pub(crate) custom_time_range: Option<String>,
}
impl Builder {
pub fn include_smart_groups_count(mut self, include_smart_groups_count: bool) -> Self {
self.include_smart_groups_count = Some(include_smart_groups_count);
self
}
pub fn target_resource(mut self, target_resource: impl Into<String>) -> Self {
self.target_resource = Some(target_resource.into());
self
}
pub fn target_resource_type(mut self, target_resource_type: impl Into<String>) -> Self {
self.target_resource_type = Some(target_resource_type.into());
self
}
pub fn target_resource_group(mut self, target_resource_group: impl Into<String>) -> Self {
self.target_resource_group = Some(target_resource_group.into());
self
}
pub fn monitor_service(mut self, monitor_service: impl Into<String>) -> Self {
self.monitor_service = Some(monitor_service.into());
self
}
pub fn monitor_condition(mut self, monitor_condition: impl Into<String>) -> Self {
self.monitor_condition = Some(monitor_condition.into());
self
}
pub fn severity(mut self, severity: impl Into<String>) -> Self {
self.severity = Some(severity.into());
self
}
pub fn alert_state(mut self, alert_state: impl Into<String>) -> Self {
self.alert_state = Some(alert_state.into());
self
}
pub fn alert_rule(mut self, alert_rule: impl Into<String>) -> Self {
self.alert_rule = Some(alert_rule.into());
self
}
pub fn time_range(mut self, time_range: impl Into<String>) -> Self {
self.time_range = Some(time_range.into());
self
}
pub fn custom_time_range(mut self, custom_time_range: impl Into<String>) -> Self {
self.custom_time_range = Some(custom_time_range.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AlertsSummary, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/alertsSummary",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let groupby = &self.groupby;
url.query_pairs_mut().append_pair("groupby", groupby);
if let Some(include_smart_groups_count) = &self.include_smart_groups_count {
url.query_pairs_mut()
.append_pair("includeSmartGroupsCount", &include_smart_groups_count.to_string());
}
if let Some(target_resource) = &self.target_resource {
url.query_pairs_mut().append_pair("targetResource", target_resource);
}
if let Some(target_resource_type) = &self.target_resource_type {
url.query_pairs_mut().append_pair("targetResourceType", target_resource_type);
}
if let Some(target_resource_group) = &self.target_resource_group {
url.query_pairs_mut().append_pair("targetResourceGroup", target_resource_group);
}
if let Some(monitor_service) = &self.monitor_service {
url.query_pairs_mut().append_pair("monitorService", monitor_service);
}
if let Some(monitor_condition) = &self.monitor_condition {
url.query_pairs_mut().append_pair("monitorCondition", monitor_condition);
}
if let Some(severity) = &self.severity {
url.query_pairs_mut().append_pair("severity", severity);
}
if let Some(alert_state) = &self.alert_state {
url.query_pairs_mut().append_pair("alertState", alert_state);
}
if let Some(alert_rule) = &self.alert_rule {
url.query_pairs_mut().append_pair("alertRule", alert_rule);
}
if let Some(time_range) = &self.time_range {
url.query_pairs_mut().append_pair("timeRange", time_range);
}
if let Some(custom_time_range) = &self.custom_time_range {
url.query_pairs_mut().append_pair("customTimeRange", custom_time_range);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::AlertsSummary =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod smart_groups {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Get all Smart Groups within a specified subscription"]
pub fn get_all(&self, subscription_id: impl Into<String>) -> get_all::Builder {
get_all::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
target_resource: None,
target_resource_group: None,
target_resource_type: None,
monitor_service: None,
monitor_condition: None,
severity: None,
smart_group_state: None,
time_range: None,
page_count: None,
sort_by: None,
sort_order: None,
}
}
#[doc = "Get information related to a specific Smart Group."]
pub fn get_by_id(&self, subscription_id: impl Into<String>, smart_group_id: impl Into<String>) -> get_by_id::Builder {
get_by_id::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
smart_group_id: smart_group_id.into(),
}
}
pub fn change_state(
&self,
subscription_id: impl Into<String>,
smart_group_id: impl Into<String>,
new_state: impl Into<String>,
) -> change_state::Builder {
change_state::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
smart_group_id: smart_group_id.into(),
new_state: new_state.into(),
}
}
pub fn get_history(&self, subscription_id: impl Into<String>, smart_group_id: impl Into<String>) -> get_history::Builder {
get_history::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
smart_group_id: smart_group_id.into(),
}
}
}
pub mod get_all {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) target_resource: Option<String>,
pub(crate) target_resource_group: Option<String>,
pub(crate) target_resource_type: Option<String>,
pub(crate) monitor_service: Option<String>,
pub(crate) monitor_condition: Option<String>,
pub(crate) severity: Option<String>,
pub(crate) smart_group_state: Option<String>,
pub(crate) time_range: Option<String>,
pub(crate) page_count: Option<i64>,
pub(crate) sort_by: Option<String>,
pub(crate) sort_order: Option<String>,
}
impl Builder {
pub fn target_resource(mut self, target_resource: impl Into<String>) -> Self {
self.target_resource = Some(target_resource.into());
self
}
pub fn target_resource_group(mut self, target_resource_group: impl Into<String>) -> Self {
self.target_resource_group = Some(target_resource_group.into());
self
}
pub fn target_resource_type(mut self, target_resource_type: impl Into<String>) -> Self {
self.target_resource_type = Some(target_resource_type.into());
self
}
pub fn monitor_service(mut self, monitor_service: impl Into<String>) -> Self {
self.monitor_service = Some(monitor_service.into());
self
}
pub fn monitor_condition(mut self, monitor_condition: impl Into<String>) -> Self {
self.monitor_condition = Some(monitor_condition.into());
self
}
pub fn severity(mut self, severity: impl Into<String>) -> Self {
self.severity = Some(severity.into());
self
}
pub fn smart_group_state(mut self, smart_group_state: impl Into<String>) -> Self {
self.smart_group_state = Some(smart_group_state.into());
self
}
pub fn time_range(mut self, time_range: impl Into<String>) -> Self {
self.time_range = Some(time_range.into());
self
}
pub fn page_count(mut self, page_count: i64) -> Self {
self.page_count = Some(page_count);
self
}
pub fn sort_by(mut self, sort_by: impl Into<String>) -> Self {
self.sort_by = Some(sort_by.into());
self
}
pub fn sort_order(mut self, sort_order: impl Into<String>) -> Self {
self.sort_order = Some(sort_order.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SmartGroupsList, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/smartGroups",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(target_resource) = &self.target_resource {
url.query_pairs_mut().append_pair("targetResource", target_resource);
}
if let Some(target_resource_group) = &self.target_resource_group {
url.query_pairs_mut().append_pair("targetResourceGroup", target_resource_group);
}
if let Some(target_resource_type) = &self.target_resource_type {
url.query_pairs_mut().append_pair("targetResourceType", target_resource_type);
}
if let Some(monitor_service) = &self.monitor_service {
url.query_pairs_mut().append_pair("monitorService", monitor_service);
}
if let Some(monitor_condition) = &self.monitor_condition {
url.query_pairs_mut().append_pair("monitorCondition", monitor_condition);
}
if let Some(severity) = &self.severity {
url.query_pairs_mut().append_pair("severity", severity);
}
if let Some(smart_group_state) = &self.smart_group_state {
url.query_pairs_mut().append_pair("smartGroupState", smart_group_state);
}
if let Some(time_range) = &self.time_range {
url.query_pairs_mut().append_pair("timeRange", time_range);
}
if let Some(page_count) = &self.page_count {
url.query_pairs_mut().append_pair("pageCount", &page_count.to_string());
}
if let Some(sort_by) = &self.sort_by {
url.query_pairs_mut().append_pair("sortBy", sort_by);
}
if let Some(sort_order) = &self.sort_order {
url.query_pairs_mut().append_pair("sortOrder", sort_order);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SmartGroupsList =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_by_id {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) smart_group_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SmartGroup, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/smartGroups/{}",
self.client.endpoint(),
&self.subscription_id,
&self.smart_group_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SmartGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod change_state {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) smart_group_id: String,
pub(crate) new_state: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SmartGroup, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/smartGroups/{}/changeState",
self.client.endpoint(),
&self.subscription_id,
&self.smart_group_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let new_state = &self.new_state;
url.query_pairs_mut().append_pair("newState", new_state);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SmartGroup =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_history {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) smart_group_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SmartGroupModification, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.AlertsManagement/smartGroups/{}/history",
self.client.endpoint(),
&self.subscription_id,
&self.smart_group_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::SmartGroupModification =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorResponse =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| 51.118835 | 137 | 0.518643 |
728dd993484dd233b85a4b325086b83424d863e2 | 3,518 | //! CDC-ACM serial port example using polling in a busy loop.
//! Target board: any STM32F7 with an OTG FS/HS peripheral and a 25MHz HSE generator
//! This example works on the 32F723EDISCOVERY board.
//!
//! For FS operation:
//! > cargo run --example usb_serial --features "stm32f723, rt, usb_fs" --release
//!
//! For HS operation:
//! > cargo run --example usb_serial --features "stm32f723, rt, usb_hs" --release
//!
//! Note that `usbd-serial` library used in this example doesn't support
//! HighSpeed mode properly at the moment. See
//! https://github.com/mvirkkunen/usbd-serial/pull/14 for a potential workaround.
#![no_std]
#![no_main]
use panic_semihosting as _;
use cortex_m_rt::entry;
#[cfg(feature = "usb_fs")]
use stm32f7xx_hal::otg_fs::{UsbBus, USB};
#[cfg(feature = "usb_hs")]
use stm32f7xx_hal::otg_hs::{UsbBus, USB};
use stm32f7xx_hal::pac;
use stm32f7xx_hal::prelude::*;
use stm32f7xx_hal::rcc::{HSEClock, HSEClockMode};
use usb_device::prelude::*;
#[entry]
fn main() -> ! {
let dp = pac::Peripherals::take().unwrap();
let rcc = dp.RCC.constrain();
let clocks = rcc
.cfgr
.hse(HSEClock::new(25_000_000.Hz(), HSEClockMode::Bypass))
.use_pll()
.use_pll48clk()
.sysclk(216_000_000.Hz())
.freeze();
#[cfg(feature = "usb_fs")]
let gpioa = dp.GPIOA.split();
#[cfg(feature = "usb_hs")]
let gpiob = dp.GPIOB.split();
#[cfg(feature = "usb_fs")]
let usb = USB::new(
dp.OTG_FS_GLOBAL,
dp.OTG_FS_DEVICE,
dp.OTG_FS_PWRCLK,
(gpioa.pa11.into_alternate(), gpioa.pa12.into_alternate()),
clocks,
);
#[cfg(all(feature = "usb_hs", not(feature = "usb_hs_phy")))]
let usb = USB::new(
dp.OTG_HS_GLOBAL,
dp.OTG_HS_DEVICE,
dp.OTG_HS_PWRCLK,
(gpiob.pb14.into_alternate(), gpiob.pb15.into_alternate()),
clocks,
);
#[cfg(all(feature = "usb_hs", feature = "usb_hs_phy"))]
let usb = USB::new_with_internal_hs_phy(
dp.OTG_HS_GLOBAL,
dp.OTG_HS_DEVICE,
dp.OTG_HS_PWRCLK,
dp.USBPHYC,
(gpiob.pb14.into_alternate(), gpiob.pb15.into_alternate()),
clocks,
);
static mut EP_MEMORY: [u32; 1024] = [0; 1024];
let usb_bus = UsbBus::new(usb, unsafe { &mut EP_MEMORY });
let mut serial = usbd_serial::SerialPort::new(&usb_bus);
let mut usb_dev = UsbDeviceBuilder::new(&usb_bus, UsbVidPid(0x16c0, 0x27dd))
.manufacturer("Fake company")
.product("Serial port")
.serial_number("TEST")
.device_class(usbd_serial::USB_CLASS_CDC)
.max_packet_size_0(64) // Size required for HS, and ok for FS
.build();
loop {
if !usb_dev.poll(&mut [&mut serial]) {
continue;
}
let mut buf = [0u8; 512];
match serial.read(&mut buf) {
Ok(count) if count > 0 => {
// Echo back in upper case
for c in buf[0..count].iter_mut() {
if 0x61 <= *c && *c <= 0x7a {
*c &= !0x20;
}
}
let mut write_offset = 0;
while write_offset < count {
match serial.write(&buf[write_offset..count]) {
Ok(len) if len > 0 => {
write_offset += len;
}
_ => {}
}
}
}
_ => {}
}
}
}
| 30.068376 | 84 | 0.554576 |
5bbbd1828ff3409113b39ef09c16d2ce7b4fcbb3 | 424 | const FAKE_GENESIS_HASH: &str = "19c9852ca0a68f15d0f7de5d1a26acd67a3a3251640c6066bdb91d22e2000193";
#[test]
pub fn test_add_account_for_utxo_delegation_address_fails() {
let sender = startup::create_new_delegation_address();
JCLITransactionWrapper::new_transaction(FAKE_GENESIS_HASH).assert_add_account_fail(
&sender.address,
&100,
"Invalid input account, this is a UTxO address",
);
}
| 32.615385 | 99 | 0.761792 |
79d8cdafa7ad5cf9ef187d41e341ee4b6cdd023e | 5,420 | use amethyst::{
assets::{AssetStorage, Loader, Handle},
core::{
transform::Transform,
},
ecs::prelude::{Component, DenseVecStorage, Entity},
prelude::*,
renderer::{Camera, ImageFormat, SpriteRender, SpriteSheet, SpriteSheetFormat, Texture},
ui::{Anchor, TtfFormat, UiText, UiTransform},
};
use crate::audio::initialize_audio;
pub const PADDLE_WIDTH: f32 = 4.0;
pub const PADDLE_HEIGHT: f32 = 16.0;
pub const PADDLE_SPEED: f32 = 70.0;
#[derive(PartialEq, Eq)]
pub enum Side {
Left,
Right,
}
pub struct Paddle {
pub side: Side,
pub width: f32,
pub height: f32,
}
impl Paddle {
fn new(side: Side) -> Paddle {
Paddle {
side,
width: PADDLE_WIDTH,
height: PADDLE_HEIGHT,
}
}
}
impl Component for Paddle {
type Storage = DenseVecStorage<Self>;
}
fn initialize_paddles(world: &mut World, sprite_sheet: Handle<SpriteSheet>) {
let mut left_transform = Transform::default();
let mut right_transform = Transform::default();
let y = ARENA_HEIGHT * 0.5;
left_transform.set_translation_xyz(PADDLE_WIDTH * 0.5, y, 0.0);
right_transform.set_translation_xyz(ARENA_WIDTH - PADDLE_WIDTH * 0.5, y, 0.0);
let sprite_render = SpriteRender {
sprite_sheet: sprite_sheet.clone(),
sprite_number: 0,
};
world.create_entity()
.with(sprite_render.clone())
.with(Paddle::new(Side::Left))
.with(left_transform)
.build();
world.create_entity()
.with(sprite_render.clone())
.with(Paddle::new(Side::Right))
.with(right_transform)
.build();
}
fn load_sprite_sheet(world: &mut World) -> Handle<SpriteSheet> {
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(
"texture/pong_spritesheet.png",
ImageFormat::default(),
(),
&texture_storage,
)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
"texture/pong_spritesheet.ron",
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
pub const ARENA_WIDTH: f32 = 100.0;
pub const ARENA_HEIGHT: f32 = 100.0;
fn initialize_camera(world: &mut World) -> () {
let mut transform = Transform::default();
transform.set_translation_xyz(ARENA_WIDTH * 0.5, ARENA_HEIGHT * 0.5, 1.0);
world.create_entity()
.with(Camera::standard_2d(ARENA_WIDTH, ARENA_HEIGHT))
.with(transform)
.build();
}
pub const BALL_VELOCITY_X: f32 = 75.0;
pub const BALL_VELOCITY_Y: f32 = 50.0;
pub const BALL_RADIUS: f32 = 2.0;
pub const BALL_SLOWDOWN_TIME: f32 = 1.0;
pub const BALL_SLOWDOWN_FACTOR: f32 = 5.0;
pub struct Ball {
pub velocity: [f32; 2],
pub radius: f32,
pub slowdown_timer: Option<f32>,
}
impl Component for Ball {
type Storage = DenseVecStorage<Self>;
}
fn initialize_ball(world: &mut World, sprite_sheet_handle: Handle<SpriteSheet>) {
let mut local_transform = Transform::default();
local_transform.set_translation_xyz(ARENA_WIDTH * 0.5, ARENA_HEIGHT * 0.5, 0.0);
let sprite_render = SpriteRender {
sprite_sheet: sprite_sheet_handle,
sprite_number: 1,
};
world.create_entity()
.with(sprite_render)
.with(Ball {
radius: BALL_RADIUS,
velocity: [BALL_VELOCITY_X, BALL_VELOCITY_Y],
slowdown_timer: Some(BALL_SLOWDOWN_TIME),
})
.with(local_transform)
.build();
}
#[derive(Default)]
pub struct ScoreBoard {
pub score_left: i32,
pub score_right: i32,
}
pub struct ScoreText {
pub p1_score: Entity,
pub p2_score: Entity,
}
fn initialize_scoreboard(world: &mut World) {
let font = world.read_resource::<Loader>().load(
"font/square.ttf",
TtfFormat,
(),
&world.read_resource(),
);
let p1_transform = UiTransform::new(
"P1".to_string(), Anchor::TopMiddle, Anchor::TopMiddle,
-50., -50., 1., 200., 50.,
);
let p2_transform = UiTransform::new(
"P2".to_string(), Anchor::TopMiddle, Anchor::TopMiddle,
50., -50., 1., 200., 50.,
);
let p1_score = world
.create_entity()
.with(p1_transform)
.with(UiText::new(
font.clone(),
"0".to_string(),
[1., 1., 1., 1.],
50.,
)).build();
let p2_score = world
.create_entity()
.with(p2_transform)
.with(UiText::new(
font.clone(),
"0".to_string(),
[1., 1., 1., 1.],
50.,
)).build();
world.insert(ScoreText { p1_score, p2_score });
}
#[derive(Default)]
pub struct Pong {
ball_respawn_timer: Option<f32>,
}
impl SimpleState for Pong {
fn on_start(&mut self, data: StateData<'_, GameData<'_, '_>>) {
let world = data.world;
self.ball_respawn_timer.replace(1.0);
let sprite_sheet_handle = load_sprite_sheet(world);
initialize_paddles(world, sprite_sheet_handle.clone());
initialize_ball(world, sprite_sheet_handle);
initialize_camera(world);
initialize_scoreboard(world);
initialize_audio(world);
}
} | 25.933014 | 91 | 0.612546 |
141820cb99151ca2a80c27dffcf569978f6c193f | 4,326 | use super::*;
use memo::Memo;
use std::str::FromStr;
#[derive(Debug, StructOpt)]
/// Burn HNT to Data Credits (DC) from this wallet to given payees wallet.
pub struct Cmd {
/// Account address to send the resulting DC to.
#[structopt(long)]
payee: PublicKey,
/// Memo field to include. Provide as a base64 encoded string
#[structopt(long, default_value)]
memo: Memo,
/// Amount of HNT to burn to DC
#[structopt(long)]
amount: Hnt,
/// Manually set the nonce to use for the transaction
#[structopt(long)]
nonce: Option<u64>,
/// Manually set the DC fee to pay for the transaction
#[structopt(long)]
fee: Option<u64>,
}
impl Cmd {
pub(crate) async fn run(
self,
opts: Opts,
version: Version,
) -> Result<Option<(String, Network)>> {
if version.major < 2 && (version.major == 2 && version.minor < 2) && opts.account != 0 {
panic!("Upgrade the Helium Ledger App to use additional wallet accounts");
};
match ledger(opts, self).await? {
Response::Txn(_txn, hash, network) => Ok(Some((hash, network))),
Response::InsufficientBalance(balance, send_request) => {
println!(
"Account balance insufficient. {} HNT on account but attempting to burn {}",
balance, send_request,
);
Err(Error::txn())
}
Response::UserDeniedTransaction => {
println!("Transaction not confirmed");
Err(Error::txn())
}
}
}
}
async fn ledger(opts: Opts, cmd: Cmd) -> Result<Response<BlockchainTxnTokenBurnV1>> {
let ledger_transport = get_ledger_transport(&opts).await?;
let amount = cmd.amount;
let payee = cmd.payee;
// get nonce
let pubkey = get_pubkey(opts.account, &ledger_transport, PubkeyDisplay::Off).await?;
let client = new_client(pubkey.network);
let account = accounts::get(&client, &pubkey.to_string()).await?;
let nonce: u64 = if let Some(nonce) = cmd.nonce {
nonce
} else {
account.speculative_nonce + 1
};
if account.balance.get_decimal() < amount.get_decimal() {
return Ok(Response::InsufficientBalance(account.balance, amount));
}
// serialize payer
let payer = PublicKey::from_str(&account.address)?;
let mut txn = BlockchainTxnTokenBurnV1 {
payee: payee.to_vec(),
payer: payer.to_vec(),
amount: u64::from(amount),
memo: u64::from(&cmd.memo),
nonce,
fee: 0,
signature: vec![],
};
txn.fee = if let Some(fee) = cmd.fee {
fee
} else {
txn.txn_fee(
&get_txn_fees(&client)
.await
.map_err(|_| Error::getting_fees())?,
)
.map_err(|_| Error::getting_fees())?
};
print_proposed_txn(&txn)?;
let adpu_cmd = txn.apdu_serialize(opts.account)?;
let exchange_pay_tx_result = read_from_ledger(&ledger_transport, adpu_cmd).await?;
if exchange_pay_tx_result.data.len() == 1 {
return Ok(Response::UserDeniedTransaction);
}
let data = exchange_pay_tx_result.data;
println!("{:?}", data);
let txn = BlockchainTxnTokenBurnV1::decode(data.as_slice())?;
let envelope = txn.in_envelope();
// submit the signed tansaction to the API
let pending_txn_status = submit_txn(&client, &envelope).await?;
Ok(Response::Txn(txn, pending_txn_status.hash, payer.network))
}
pub fn print_proposed_txn(txn: &BlockchainTxnTokenBurnV1) -> Result {
let payee = PublicKey::try_from(txn.payee.clone())?;
let units = match payee.network {
Network::TestNet => "TNT",
Network::MainNet => "HNT",
};
let mut table = Table::new();
println!("Creating the following transaction:");
table.add_row(row![
"Payee",
&format!("Burn Amount {}", units),
"Memo",
"Nonce",
"DC Fee"
]);
table.add_row(row![
payee,
Hnt::from(txn.amount),
Memo::from(txn.memo),
txn.nonce,
txn.fee
]);
table.printstd();
println!(
"WARNING: do not use this output as the source of truth. Instead, rely \
on the Ledger Display"
);
Ok(())
}
| 29.22973 | 96 | 0.586685 |
896767b4e872fc6e783bcd2156619d85d7602e36 | 9,655 | use crate::internal::*;
#[derive(Debug, Clone, new, Default)]
pub struct LayerHardmax {
axis: isize,
}
impl LayerHardmax {
fn eval_t<D: Datum + ::num_traits::Float + ::num_traits::FromPrimitive>(
&self,
input: Arc<Tensor>,
) -> TractResult<TVec<Arc<Tensor>>> {
let array = input.into_tensor().into_array::<D>()?;
let shape = array.shape().to_vec();
let axis =
if self.axis < 0 { shape.len() as isize + self.axis } else { self.axis } as usize;
let first_dim: usize = array.shape()[0..axis].iter().product();
let second_dim: usize = array.len() / first_dim;
let mut array = array.into_shape((first_dim, second_dim))?;
array.outer_iter_mut().for_each(|mut layer| {
let max = layer
.iter()
.enumerate()
.rev()
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(b.0.cmp(&a.0)))
.map(|(ix, _)| ix)
.unwrap_or(0);
layer
.iter_mut()
.enumerate()
.for_each(|(ix, r)| *r = D::from_usize((ix == max) as usize).unwrap());
});
Ok(tvec!(array.into_shape(shape)?.into_arc_tensor()))
}
}
impl Op for LayerHardmax {
fn name(&self) -> Cow<str> {
"LayerHardmax".into()
}
fn info(&self) -> TractResult<Vec<String>> {
Ok(vec![format!("axis: {}", self.axis)])
}
canonic!();
op_as_typed_op!();
op_as_pulsed_op!();
}
impl StatelessOp for LayerHardmax {
fn eval(&self, mut inputs: TVec<Arc<Tensor>>) -> TractResult<TVec<Arc<Tensor>>> {
let input = args_1!(inputs);
dispatch_floatlike!(Self::eval_t(input.datum_type())(self, input))
}
}
impl InferenceRulesOp for LayerHardmax {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p [TensorProxy],
outputs: &'p [TensorProxy],
) -> InferenceResult {
rules(solver, inputs, outputs)
}
inference_op_as_op!();
to_typed!();
}
impl TypedOp for LayerHardmax {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
Ok(tvec!(inputs[0].clone()))
}
fn pulsify(
&self,
_source: &NormalizedModel,
node: &NormalizedNode,
target: &mut PulsedModel,
mapping: &HashMap<OutletId, OutletId>,
_pulse: usize,
) -> TractResult<TVec<OutletId>> {
pulsify(self, self.axis, node, target, mapping)
}
typed_op_as_op!();
}
impl PulsedOp for LayerHardmax {
fn pulsed_output_facts(&self, inputs: &[&PulsedFact]) -> TractResult<TVec<PulsedFact>> {
Ok(tvec!(inputs[0].clone()))
}
pulsed_op_as_op!();
pulsed_op_to_typed_op!();
}
#[derive(Debug, Clone, new, Default)]
pub struct LayerLogSoftmax {
axis: isize,
}
impl LayerLogSoftmax {
fn eval_t<D: Datum + ::num_traits::Float + ::num_traits::FromPrimitive + ::std::iter::Sum>(
&self,
input: Arc<Tensor>,
) -> TractResult<TVec<Arc<Tensor>>> {
let array = input.into_tensor().into_array::<D>()?;
let shape = array.shape().to_vec();
let axis =
if self.axis < 0 { shape.len() as isize + self.axis } else { self.axis } as usize;
let first_dim: usize = array.shape()[0..axis].iter().product();
let second_dim: usize = array.len() / first_dim;
let mut array = array.into_shape((first_dim, second_dim))?;
array.outer_iter_mut().for_each(|mut layer| {
// https://jamesmccaffrey.wordpress.com/2016/03/04/the-max-trick-when-computing-softmax/
let max: Option<D> = layer
.iter()
.max_by(|a, b| a.partial_cmp(&b).unwrap_or(::std::cmp::Ordering::Equal))
.cloned();
layer.mapv_inplace(|x| (x - max.unwrap()).exp());
let divisor = layer.iter().cloned().sum();
layer.mapv_inplace(|x| (x / divisor).ln());
});
Ok(tvec!(array.into_shape(shape)?.into_arc_tensor()))
}
}
impl Op for LayerLogSoftmax {
fn name(&self) -> Cow<str> {
"LayerLogSoftmax".into()
}
fn info(&self) -> TractResult<Vec<String>> {
Ok(vec![format!("axis: {}", self.axis)])
}
fn validation(&self) -> Validation {
Validation::Rounding
}
canonic!();
op_as_typed_op!();
op_as_pulsed_op!();
}
impl StatelessOp for LayerLogSoftmax {
fn eval(&self, mut inputs: TVec<Arc<Tensor>>) -> TractResult<TVec<Arc<Tensor>>> {
let input = args_1!(inputs);
dispatch_floatlike!(Self::eval_t(input.datum_type())(self, input))
}
}
impl InferenceRulesOp for LayerLogSoftmax {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p [TensorProxy],
outputs: &'p [TensorProxy],
) -> InferenceResult {
rules(solver, inputs, outputs)
}
inference_op_as_op!();
to_typed!();
}
impl TypedOp for LayerLogSoftmax {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
Ok(tvec!(inputs[0].clone()))
}
fn pulsify(
&self,
_source: &NormalizedModel,
node: &NormalizedNode,
target: &mut PulsedModel,
mapping: &HashMap<OutletId, OutletId>,
_pulse: usize,
) -> TractResult<TVec<OutletId>> {
pulsify(self, self.axis, node, target, mapping)
}
typed_op_as_op!();
}
impl PulsedOp for LayerLogSoftmax {
fn pulsed_output_facts(&self, inputs: &[&PulsedFact]) -> TractResult<TVec<PulsedFact>> {
Ok(tvec!(inputs[0].clone()))
}
pulsed_op_as_op!();
pulsed_op_to_typed_op!();
}
#[derive(Debug, Clone, new, Default)]
pub struct LayerSoftmax {
axis: isize,
}
impl LayerSoftmax {
fn eval_t<D: Datum + ::num_traits::Float + ::num_traits::FromPrimitive + ::std::iter::Sum>(
&self,
input: Arc<Tensor>,
) -> TractResult<TVec<Arc<Tensor>>> {
let array = input.into_tensor().into_array::<D>()?;
let shape = array.shape().to_vec();
let axis =
if self.axis < 0 { shape.len() as isize + self.axis } else { self.axis } as usize;
let first_dim: usize = array.shape()[0..axis].iter().product();
let second_dim: usize = array.len() / first_dim;
let mut array = array.into_shape((first_dim, second_dim))?;
array.outer_iter_mut().for_each(|mut layer| {
// https://jamesmccaffrey.wordpress.com/2016/03/04/the-max-trick-when-computing-softmax/
let max: Option<D> = layer
.iter()
.max_by(|a, b| a.partial_cmp(&b).unwrap_or(::std::cmp::Ordering::Equal))
.cloned();
layer.mapv_inplace(|x| (x - max.unwrap()).exp());
let divisor = layer.iter().cloned().sum();
layer.mapv_inplace(|x| x / divisor);
});
Ok(tvec!(array.into_shape(shape)?.into_arc_tensor()))
}
}
impl Op for LayerSoftmax {
fn name(&self) -> Cow<str> {
"LayerSoftmax".into()
}
fn info(&self) -> TractResult<Vec<String>> {
Ok(vec![format!("axis: {}", self.axis)])
}
fn validation(&self) -> Validation {
Validation::Rounding
}
canonic!();
op_as_typed_op!();
op_as_pulsed_op!();
}
impl StatelessOp for LayerSoftmax {
fn eval(&self, mut inputs: TVec<Arc<Tensor>>) -> TractResult<TVec<Arc<Tensor>>> {
let input = args_1!(inputs);
dispatch_floatlike!(Self::eval_t(input.datum_type())(self, input))
}
}
impl InferenceRulesOp for LayerSoftmax {
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
solver: &mut Solver<'r>,
inputs: &'p [TensorProxy],
outputs: &'p [TensorProxy],
) -> InferenceResult {
rules(solver, inputs, outputs)
}
inference_op_as_op!();
to_typed!();
}
impl TypedOp for LayerSoftmax {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
Ok(tvec!(inputs[0].clone()))
}
fn pulsify(
&self,
_source: &NormalizedModel,
node: &NormalizedNode,
target: &mut PulsedModel,
mapping: &HashMap<OutletId, OutletId>,
_pulse: usize,
) -> TractResult<TVec<OutletId>> {
pulsify(self, self.axis, node, target, mapping)
}
typed_op_as_op!();
}
impl PulsedOp for LayerSoftmax {
fn pulsed_output_facts(&self, inputs: &[&PulsedFact]) -> TractResult<TVec<PulsedFact>> {
Ok(tvec!(inputs[0].clone()))
}
pulsed_op_as_op!();
pulsed_op_to_typed_op!();
}
fn rules<'r, 'p: 'r, 's: 'r>(
s: &mut Solver<'r>,
inputs: &'p [TensorProxy],
outputs: &'p [TensorProxy],
) -> InferenceResult {
check_output_arity(&outputs, 1)?;
s.equals(&outputs[0].datum_type, &inputs[0].datum_type)?;
s.equals(&outputs[0].rank, &inputs[0].rank)?;
s.equals(&outputs[0].shape, &inputs[0].shape)?;
Ok(())
}
fn pulsify(
op: &dyn PulsedOp,
axis: isize,
node: &NormalizedNode,
target: &mut PulsedModel,
mapping: &HashMap<OutletId, OutletId>,
) -> TractResult<TVec<OutletId>> {
let input_fact = target.outlet_fact(mapping[&node.inputs[0]])?.clone();
let axis = if axis < 0 { input_fact.shape.len() as isize + axis } else { axis } as usize;
if input_fact.axis != axis {
let id = target.add_node(&*node.name, objekt::clone_box(op), tvec!(input_fact))?;
target.add_edge(mapping[&node.inputs[0]], InletId::new(id, 0))?;
return Ok(tvec!(OutletId::new(id, 0)));
} else {
bail!("No pulsification on max axis");
}
}
| 30.457413 | 100 | 0.574728 |
dd048504d7390e8714316deddb1f39dd15aebe23 | 3,200 | macro_rules! create_options {
(
$(#[$attr:meta])*
pub struct Options {
$(
$(#[$field_attr:meta])*
pub $name:ident: $ty:ty,
)+
}
) => {
#[cfg_attr(feature = "schema", derive(JsonSchema))]
$(#[$attr])*
pub struct Options {
$(
$(#[$field_attr])*
pub $name: $ty,
)+
}
impl Options {
#[doc(hidden)]
pub fn update(&mut self, incomplete: OptionsIncomplete) {
$(
if let Some(v) = incomplete.$name {
self.$name = v;
}
)+
}
#[doc(hidden)]
pub fn update_camel(&mut self, incomplete: OptionsIncompleteCamel) {
$(
if let Some(v) = incomplete.$name {
self.$name = v;
}
)+
}
pub fn update_from_str<S: AsRef<str>, I: Iterator<Item = (S, S)>>(
&mut self,
values: I,
) -> Result<(), OptionParseError> {
for (key, val) in values {
$(
if key.as_ref() == stringify!($name) {
self.$name =
val.as_ref()
.parse()
.map_err(|error| OptionParseError::InvalidValue {
key: key.as_ref().into(),
error: Box::new(error),
})?;
continue;
}
)+
return Err(OptionParseError::InvalidOption(key.as_ref().into()));
}
Ok(())
}
}
#[cfg_attr(feature = "schema", derive(JsonSchema))]
$(#[$attr])*
#[doc(hidden)]
#[derive(Default)]
pub struct OptionsIncomplete {
$(
$(#[$field_attr])*
pub $name: Option<$ty>,
)+
}
impl OptionsIncomplete {
#[doc(hidden)]
pub fn from_options(opts: Options) -> Self {
let mut o = Self::default();
$(
o.$name = Some(opts.$name);
)+
o
}
}
#[cfg_attr(feature = "schema", derive(JsonSchema))]
$(#[$attr])*
#[doc(hidden)]
#[derive(Default)]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub struct OptionsIncompleteCamel {
$(
$(#[$field_attr])*
pub $name: Option<$ty>,
)+
}
impl OptionsIncompleteCamel {
#[doc(hidden)]
pub fn from_options(opts: Options) -> Self {
let mut o = Self::default();
$(
o.$name = Some(opts.$name);
)+
o
}
}
};
}
| 27.586207 | 85 | 0.345313 |
8a82f876fbb6c5113693f1db29e2c89771e66d9f | 6,611 | //! This module contains the Streamlet structure.
//!
//! A streamlet is a component where every [Interface] has a [LogicalType].
use crate::logical::LogicalType;
use crate::traits::Identify;
use crate::util::UniquelyNamedBuilder;
use crate::{Document, Error, Name, Result};
use std::convert::TryInto;
use std::str::FromStr;
/// Streamlet interface mode.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Mode {
/// The interface is an output of the streamlet.
Out,
/// The interface is an input of the streamlet.
In,
}
impl FromStr for Mode {
type Err = Error;
fn from_str(input: &str) -> Result<Self> {
match input {
"in" => Ok(Mode::In),
"out" => Ok(Mode::Out),
_ => Err(Error::InvalidArgument(format!(
"{} is not a valid interface Mode. Expected \"in\" or \"out\"",
input
))),
}
}
}
/// A Streamlet interface.
///
/// The names "clk" and "rst" are reserved.
#[derive(Clone, Debug, PartialEq)]
pub struct Interface {
/// The name of the interface.
name: Name,
/// The mode of the interface.
mode: Mode,
/// The type of the interface.
typ: LogicalType,
/// The documentation string of the interface, if any.
doc: Option<String>,
}
impl Interface {
/// Return the [Mode] of the interface.
pub fn mode(&self) -> Mode {
self.mode
}
/// Return the [LogicalStreamType] of the interface.
pub fn typ(&self) -> LogicalType {
self.typ.clone()
}
}
impl Identify for Interface {
fn identifier(&self) -> &str {
self.name.as_ref()
}
}
impl Interface {
/// Try to construct a new interface.
///
/// # Example:
/// ```
/// use tydi::logical::LogicalType;
/// use tydi::design::{Interface, Mode};
///
/// // Define a type.
/// let a_type = LogicalType::try_new_bits(3);
/// assert!(a_type.is_ok());
///
/// // Attempt to construct an interface.
/// let dolphins = Interface::try_new("dolphins",
/// Mode::In,
/// a_type.unwrap(),
/// Some("Look at them swim!"));
/// assert!(dolphins.is_ok());
///
/// // The names "clk" and "rst" are reserved!
/// let clk_type = LogicalType::try_new_bits(1);
/// assert!(clk_type.is_ok());
/// assert!(Interface::try_new("clk", Mode::In, clk_type.unwrap(), None).is_err());
/// ```
pub fn try_new(
name: impl TryInto<Name, Error = impl Into<Box<dyn std::error::Error>>>,
mode: Mode,
typ: impl TryInto<LogicalType, Error = impl Into<Box<dyn std::error::Error>>>,
doc: Option<&str>,
) -> Result<Self> {
let n: Name = name
.try_into()
.map_err(|e| Error::InterfaceError(e.into().to_string()))?;
let t: LogicalType = typ
.try_into()
.map_err(|e| Error::InterfaceError(e.into().to_string()))?;
match n.to_string().as_str() {
"clk" | "rst" => Err(Error::InterfaceError(format!("Name {} forbidden.", n))),
_ => Ok(Interface {
name: n,
mode,
typ: t,
doc: doc.map(|d| d.to_string()),
}),
}
}
pub fn with_doc(mut self, doc: impl Into<String>) -> Self {
self.doc = Some(doc.into());
self
}
}
impl Document for Interface {
fn doc(&self) -> Option<String> {
self.doc.clone()
}
}
/// Streamlet interface definition.
#[derive(Clone, Debug, PartialEq)]
pub struct Streamlet {
/// The name of the streamlet.
name: Name,
/// The interfaces of the streamlet.
interfaces: Vec<Interface>,
/// An optional documentation string for the streamlet to be used by back-ends.
doc: Option<String>,
/// Placeholder for future implementation of the streamlet. If this is None, it is a primitive.
implementation: Option<()>,
}
impl Streamlet {
/// Return an iterator over the interfaces of this Streamlet.
pub fn interfaces(&self) -> impl Iterator<Item = &Interface> {
self.interfaces.iter()
}
/// Construct a new streamlet from an interface builder that makes sure all interface names
/// are unique.
///
/// # Example
/// ```
/// use tydi::{Name, UniquelyNamedBuilder};
/// use tydi::logical::LogicalType;
/// use tydi::design::{Mode, Interface, Streamlet};
///
/// let dough_type = LogicalType::try_new_bits(3);
/// assert!(dough_type.is_ok());
/// let dough = Interface::try_new("dough", Mode::In, dough_type.unwrap(), None);
/// assert!(dough.is_ok());
/// let cookies_type = LogicalType::try_new_bits(1);
/// assert!(cookies_type.is_ok());
/// let cookies = Interface::try_new("cookies", Mode::In, cookies_type.unwrap(), None);
/// assert!(cookies.is_ok());
///
/// let my_streamlet = Streamlet::from_builder(
/// Name::try_new("baker").unwrap(),
/// UniquelyNamedBuilder::new().with_items(vec![dough.unwrap(), cookies.unwrap()]),
/// Some("I bake cookies")
/// );
/// assert!(my_streamlet.is_ok());
/// ```
pub fn from_builder(
name: Name,
builder: UniquelyNamedBuilder<Interface>,
doc: Option<&str>,
) -> Result<Self> {
Ok(Streamlet {
name,
interfaces: builder.finish()?,
doc: doc.map(|d| d.to_string()),
implementation: None,
})
}
/// Return this streamlet with documentation added.
pub fn with_doc(mut self, doc: impl Into<String>) -> Self {
self.doc = Some(doc.into());
self
}
}
impl Document for Streamlet {
fn doc(&self) -> Option<String> {
self.doc.clone()
}
}
impl Identify for Streamlet {
fn identifier(&self) -> &str {
self.name.as_ref()
}
}
#[cfg(test)]
pub mod tests {
use super::*;
/// Streamlets that can be used throughout tests.
pub mod streamlets {
use super::*;
pub(crate) fn nulls_streamlet(name: impl Into<String>) -> Streamlet {
Streamlet::from_builder(
Name::try_new(name).unwrap(),
UniquelyNamedBuilder::new().with_items(vec![
Interface::try_new("a", Mode::In, LogicalType::Null, None).unwrap(),
Interface::try_new("b", Mode::Out, LogicalType::Null, None).unwrap(),
]),
None,
)
.unwrap()
}
}
}
| 29.382222 | 99 | 0.553623 |
9b42307acf5be91c824c96afa27c13573c77f6f8 | 4,377 | //! This shows the implementation of a singly-linked queue with dequeue and enqueue. There are two
//! peek implementations, one returns an immutable reference, the other returns a mutable one. This
//! implementation also shows iteration over the Queue by value (consumes queue), immutable
//! reference, and mutable reference.
use std::ptr;
pub struct Queue<T> {
head: Link<T>,
/// Raw, C-like pointer. Cannot be guaranteed safe
tail: *mut Item<T>,
}
type Link<T> = Option<Box<Item<T>>>;
struct Item<T> {
elem: T,
next: Link<T>,
}
pub struct IntoIter<T>(Queue<T>);
pub struct Iter<'a, T: 'a> {
next: Option<&'a Item<T>>,
}
pub struct IterMut<'a, T: 'a> {
next: Option<&'a mut Item<T>>,
}
impl<T> Queue<T> {
pub fn new() -> Self {
Queue {
head: None,
tail: ptr::null_mut(),
}
}
pub fn enqueue(&mut self, elem: T) {
let mut new_tail = Box::new(Item { elem, next: None });
let raw_tail: *mut _ = &mut *new_tail;
if !self.tail.is_null() {
unsafe {
(*self.tail).next = Some(new_tail);
}
} else {
self.head = Some(new_tail);
}
self.tail = raw_tail;
}
pub fn dequeue(&mut self) -> Option<T> {
self.head.take().map(|head| {
let head = *head;
self.head = head.next;
if self.head.is_none() {
self.tail = ptr::null_mut();
}
head.elem
})
}
pub fn peek(&self) -> Option<&T> {
self.head.as_ref().map(|item| &item.elem)
}
pub fn peek_mut(&mut self) -> Option<&mut T> {
self.head.as_mut().map(|item| &mut item.elem)
}
pub fn iter(&self) -> Iter<T> {
Iter {
next: self.head.as_deref(),
}
}
pub fn iter_mut(&mut self) -> IterMut<T> {
IterMut {
next: self.head.as_deref_mut(),
}
}
}
impl<T> Default for Queue<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
let mut cur_link = self.head.take();
while let Some(mut boxed_item) = cur_link {
cur_link = boxed_item.next.take();
}
}
}
impl<T> IntoIterator for Queue<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(self)
}
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.0.dequeue()
}
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.next.map(|item| {
self.next = item.next.as_deref();
&item.elem
})
}
}
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
self.next.take().map(|item| {
self.next = item.next.as_deref_mut();
&mut item.elem
})
}
}
fn main() {
// The standard library has a double-ended queue implementation
// (VecDeque<T>) which will work here.
use std::collections::VecDeque;
let mut deque = VecDeque::new();
deque.push_back("Element1");
deque.push_back("Element2");
deque.push_back("Element3");
assert_eq!(Some(&"Element1"), deque.front());
assert_eq!(Some("Element1"), deque.pop_front());
assert_eq!(Some("Element2"), deque.pop_front());
assert_eq!(Some("Element3"), deque.pop_front());
assert_eq!(None, deque.pop_front());
let mut queue = Queue::new();
queue.enqueue("Element1");
queue.enqueue("Element2");
queue.enqueue("Element3");
assert_eq!(Some(&"Element1"), queue.peek());
assert_eq!(Some("Element1"), queue.dequeue());
assert_eq!(Some("Element2"), queue.dequeue());
assert_eq!(Some("Element3"), queue.dequeue());
assert_eq!(None, queue.dequeue());
}
#[test]
fn test_queue() {
let mut queue = Queue::new();
queue.enqueue("Element1");
queue.enqueue("Element2");
queue.enqueue("Element3");
assert_eq!(Some(&"Element1"), queue.peek());
assert_eq!(Some("Element1"), queue.dequeue());
assert_eq!(Some("Element2"), queue.dequeue());
assert_eq!(Some("Element3"), queue.dequeue());
assert_eq!(None, queue.dequeue());
}
| 23.659459 | 99 | 0.555632 |
48dbaf0aa1348b3d7936ed8e8d72d3a39c68f7ef | 1,335 | // structs2.rs
// Address all the TODOs to make the tests pass!
#[derive(Debug)]
struct Order {
name: String,
year: u32,
made_by_phone: bool,
made_by_mobile: bool,
made_by_email: bool,
item_number: u32,
count: u32,
}
fn create_order_template() -> Order {
Order {
name: String::from("Bob"),
year: 2019,
made_by_phone: false,
made_by_mobile: false,
made_by_email: true,
item_number: 123,
count: 0,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn your_order() {
let order_template = create_order_template();
// TODO: Create your own order using the update syntax and template above!
let mut your_order = create_order_template();
your_order.name = String::from("Hacker in Rust");
your_order.count = your_order.count + 1;
assert_eq!(your_order.name, "Hacker in Rust");
assert_eq!(your_order.year, order_template.year);
assert_eq!(your_order.made_by_phone, order_template.made_by_phone);
assert_eq!(your_order.made_by_mobile, order_template.made_by_mobile);
assert_eq!(your_order.made_by_email, order_template.made_by_email);
assert_eq!(your_order.item_number, order_template.item_number);
assert_eq!(your_order.count, 1);
}
}
| 28.404255 | 82 | 0.64794 |
e917a65c9c5ad51c8c7e6576245a0f7c65ae28c0 | 98,563 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A double-ended queue implemented with a growable ring buffer.
//!
//! This queue has `O(1)` amortized inserts and removals from both ends of the
//! container. It also has `O(1)` indexing like a vector. The contained elements
//! are not required to be copyable, and the queue will be sendable if the
//! contained type is sendable.
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::Ordering;
use core::fmt;
use core::iter::{repeat, FromIterator, FusedIterator};
use core::mem;
use core::ops::Bound::{Excluded, Included, Unbounded};
use core::ops::{Index, IndexMut, RangeBounds};
use core::ptr;
use core::ptr::NonNull;
use core::slice;
use core::hash::{Hash, Hasher};
use core::cmp;
use alloc::CollectionAllocErr;
use raw_vec::RawVec;
use vec::Vec;
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
#[cfg(target_pointer_width = "32")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two
#[cfg(target_pointer_width = "64")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of two
/// A double-ended queue implemented with a growable ring buffer.
///
/// The "default" usage of this type as a queue is to use [`push_back`] to add to
/// the queue, and [`pop_front`] to remove from the queue. [`extend`] and [`append`]
/// push onto the back in this manner, and iterating over `VecDeque` goes front
/// to back.
///
/// [`push_back`]: #method.push_back
/// [`pop_front`]: #method.pop_front
/// [`extend`]: #method.extend
/// [`append`]: #method.append
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VecDeque<T> {
// tail and head are pointers into the buffer. Tail always points
// to the first element that could be read, Head always points
// to where data should be written.
// If tail == head the buffer is empty. The length of the ringbuffer
// is defined as the distance between the two.
tail: usize,
head: usize,
buf: RawVec<T>,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for VecDeque<T> {
fn clone(&self) -> VecDeque<T> {
self.iter().cloned().collect()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T> Drop for VecDeque<T> {
fn drop(&mut self) {
let (front, back) = self.as_mut_slices();
unsafe {
// use drop for [T]
ptr::drop_in_place(front);
ptr::drop_in_place(back);
}
// RawVec handles deallocation
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for VecDeque<T> {
/// Creates an empty `VecDeque<T>`.
#[inline]
fn default() -> VecDeque<T> {
VecDeque::new()
}
}
impl<T> VecDeque<T> {
/// Marginally more convenient
#[inline]
fn ptr(&self) -> *mut T {
self.buf.ptr()
}
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 {
// For zero sized types, we are always at maximum capacity
MAXIMUM_ZST_CAPACITY
} else {
self.buf.cap()
}
}
/// Turn ptr into a slice
#[inline]
unsafe fn buffer_as_slice(&self) -> &[T] {
slice::from_raw_parts(self.ptr(), self.cap())
}
/// Turn ptr into a mut slice
#[inline]
unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
slice::from_raw_parts_mut(self.ptr(), self.cap())
}
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
ptr::read(self.ptr().offset(off as isize))
}
/// Writes an element into the buffer, moving it.
#[inline]
unsafe fn buffer_write(&mut self, off: usize, value: T) {
ptr::write(self.ptr().offset(off as isize), value);
}
/// Returns `true` if and only if the buffer is at full capacity.
#[inline]
fn is_full(&self) -> bool {
self.cap() - self.len() == 1
}
/// Returns the index in the underlying buffer for a given logical element
/// index.
#[inline]
fn wrap_index(&self, idx: usize) -> usize {
wrap_index(idx, self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
wrap_index(idx.wrapping_add(addend), self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
wrap_index(idx.wrapping_sub(subtrahend), self.cap())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
debug_assert!(src + len <= self.cap(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
ptr::copy(self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
debug_assert!(src + len <= self.cap(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
ptr::copy_nonoverlapping(self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a potentially wrapping block of memory len long from src to dest.
/// (abs(dst - src) + len) must be no larger than cap() (There must be at
/// most one continuous overlapping region between src and dest).
unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
#[allow(dead_code)]
fn diff(a: usize, b: usize) -> usize {
if a <= b { b - a } else { a - b }
}
debug_assert!(cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
"wrc dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
if src == dst || len == 0 {
return;
}
let dst_after_src = self.wrap_sub(dst, src) < len;
let src_pre_wrap_len = self.cap() - src;
let dst_pre_wrap_len = self.cap() - dst;
let src_wraps = src_pre_wrap_len < len;
let dst_wraps = dst_pre_wrap_len < len;
match (dst_after_src, src_wraps, dst_wraps) {
(_, false, false) => {
// src doesn't wrap, dst doesn't wrap
//
// S . . .
// 1 [_ _ A A B B C C _]
// 2 [_ _ A A A A B B _]
// D . . .
//
self.copy(dst, src, len);
}
(false, false, true) => {
// dst before src, src doesn't wrap, dst wraps
//
// S . . .
// 1 [A A B B _ _ _ C C]
// 2 [A A B B _ _ _ A A]
// 3 [B B B B _ _ _ A A]
// . . D .
//
self.copy(dst, src, dst_pre_wrap_len);
self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
}
(true, false, true) => {
// src before dst, src doesn't wrap, dst wraps
//
// S . . .
// 1 [C C _ _ _ A A B B]
// 2 [B B _ _ _ A A B B]
// 3 [B B _ _ _ A A A A]
// . . D .
//
self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
self.copy(dst, src, dst_pre_wrap_len);
}
(false, true, false) => {
// dst before src, src wraps, dst doesn't wrap
//
// . . S .
// 1 [C C _ _ _ A A B B]
// 2 [C C _ _ _ B B B B]
// 3 [C C _ _ _ B B C C]
// D . . .
//
self.copy(dst, src, src_pre_wrap_len);
self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
}
(true, true, false) => {
// src before dst, src wraps, dst doesn't wrap
//
// . . S .
// 1 [A A B B _ _ _ C C]
// 2 [A A A A _ _ _ C C]
// 3 [C C A A _ _ _ C C]
// D . . .
//
self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
self.copy(dst, src, src_pre_wrap_len);
}
(false, true, true) => {
// dst before src, src wraps, dst wraps
//
// . . . S .
// 1 [A B C D _ E F G H]
// 2 [A B C D _ E G H H]
// 3 [A B C D _ E G H A]
// 4 [B C C D _ E G H A]
// . . D . .
//
debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
let delta = dst_pre_wrap_len - src_pre_wrap_len;
self.copy(dst, src, src_pre_wrap_len);
self.copy(dst + src_pre_wrap_len, 0, delta);
self.copy(0, delta, len - dst_pre_wrap_len);
}
(true, true, true) => {
// src before dst, src wraps, dst wraps
//
// . . S . .
// 1 [A B C D _ E F G H]
// 2 [A A B D _ E F G H]
// 3 [H A B D _ E F G H]
// 4 [H A B D _ E F F G]
// . . . D .
//
debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
let delta = src_pre_wrap_len - dst_pre_wrap_len;
self.copy(delta, 0, len - src_pre_wrap_len);
self.copy(0, self.cap() - delta, delta);
self.copy(dst, src, dst_pre_wrap_len);
}
}
}
/// Frobs the head and tail sections around to handle the fact that we
/// just reallocated. Unsafe because it trusts old_cap.
#[inline]
unsafe fn handle_cap_increase(&mut self, old_cap: usize) {
let new_cap = self.cap();
// Move the shortest contiguous section of the ring buffer
// T H
// [o o o o o o o . ]
// T H
// A [o o o o o o o . . . . . . . . . ]
// H T
// [o o . o o o o o ]
// T H
// B [. . . o o o o o o o . . . . . . ]
// H T
// [o o o o o . o o ]
// H T
// C [o o o o o . . . . . . . . . o o ]
if self.tail <= self.head {
// A
// Nop
} else if self.head < old_cap - self.tail {
// B
self.copy_nonoverlapping(old_cap, 0, self.head);
self.head += old_cap;
debug_assert!(self.head > self.tail);
} else {
// C
let new_tail = new_cap - (old_cap - self.tail);
self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail);
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
impl<T> VecDeque<T> {
/// Creates an empty `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let vector: VecDeque<u32> = VecDeque::new();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> VecDeque<T> {
VecDeque::with_capacity(INITIAL_CAPACITY)
}
/// Creates an empty `VecDeque` with space for at least `n` elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let vector: VecDeque<u32> = VecDeque::with_capacity(10);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(n: usize) -> VecDeque<T> {
// +1 since the ringbuffer always leaves one space empty
let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
assert!(cap > n, "capacity overflow");
VecDeque {
tail: 0,
head: 0,
buf: RawVec::with_capacity(cap),
}
}
/// Retrieves an element in the `VecDeque` by index.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// assert_eq!(buf.get(1), Some(&4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
unsafe { Some(&*self.ptr().offset(idx as isize)) }
} else {
None
}
}
/// Retrieves an element in the `VecDeque` mutably by index.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// if let Some(elem) = buf.get_mut(1) {
/// *elem = 7;
/// }
///
/// assert_eq!(buf[1], 7);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
unsafe { Some(&mut *self.ptr().offset(idx as isize)) }
} else {
None
}
}
/// Swaps elements at indices `i` and `j`.
///
/// `i` and `j` may be equal.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if either index is out of bounds.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// assert_eq!(buf, [3, 4, 5]);
/// buf.swap(0, 2);
/// assert_eq!(buf, [5, 4, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
ptr::swap(self.ptr().offset(ri as isize),
self.ptr().offset(rj as isize))
}
}
/// Returns the number of elements the `VecDeque` can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let buf: VecDeque<i32> = VecDeque::with_capacity(10);
/// assert!(buf.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.cap() - 1
}
/// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
/// given `VecDeque`. Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it requests. Therefore
/// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
/// insertions are expected.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<i32> = vec![1].into_iter().collect();
/// buf.reserve_exact(10);
/// assert!(buf.capacity() >= 11);
/// ```
///
/// [`reserve`]: #method.reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.reserve(additional);
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
/// `VecDeque`. The collection may reserve more space to avoid frequent reallocations.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<i32> = vec![1].into_iter().collect();
/// buf.reserve(10);
/// assert!(buf.capacity() >= 11);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
let old_cap = self.cap();
let used_cap = self.len() + 1;
let new_cap = used_cap.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
.expect("capacity overflow");
if new_cap > old_cap {
self.buf.reserve_exact(used_cap, new_cap - used_cap);
unsafe {
self.handle_cap_increase(old_cap);
}
}
}
/// Tries to reserves the minimum capacity for exactly `additional` more elements to
/// be inserted in the given `VecDeque<T>`. After calling `reserve_exact`,
/// capacity will be greater than or equal to `self.len() + additional`.
/// Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it
/// requests. Therefore capacity can not be relied upon to be precisely
/// minimal. Prefer `reserve` if future insertions are expected.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::CollectionAllocErr;
/// use std::collections::VecDeque;
///
/// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, CollectionAllocErr> {
/// let mut output = VecDeque::new();
///
/// // Pre-reserve the memory, exiting if we can't
/// output.try_reserve_exact(data.len())?;
///
/// // Now we know this can't OOM in the middle of our complex work
/// output.extend(data.iter().map(|&val| {
/// val * 2 + 5 // very complicated
/// }));
///
/// Ok(output)
/// }
/// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
self.try_reserve(additional)
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the given `VecDeque<T>`. The collection may reserve more space to avoid
/// frequent reallocations. After calling `reserve`, capacity will be
/// greater than or equal to `self.len() + additional`. Does nothing if
/// capacity is already sufficient.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::CollectionAllocErr;
/// use std::collections::VecDeque;
///
/// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, CollectionAllocErr> {
/// let mut output = VecDeque::new();
///
/// // Pre-reserve the memory, exiting if we can't
/// output.try_reserve(data.len())?;
///
/// // Now we know this can't OOM in the middle of our complex work
/// output.extend(data.iter().map(|&val| {
/// val * 2 + 5 // very complicated
/// }));
///
/// Ok(output)
/// }
/// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
let old_cap = self.cap();
let used_cap = self.len() + 1;
let new_cap = used_cap.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
.ok_or(CollectionAllocErr::CapacityOverflow)?;
if new_cap > old_cap {
self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
unsafe {
self.handle_cap_increase(old_cap);
}
}
Ok(())
}
/// Shrinks the capacity of the `VecDeque` as much as possible.
///
/// It will drop down as close as possible to the length but the allocator may still inform the
/// `VecDeque` that there is space for a few more elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::with_capacity(15);
/// buf.extend(0..4);
/// assert_eq!(buf.capacity(), 15);
/// buf.shrink_to_fit();
/// assert!(buf.capacity() >= 4);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn shrink_to_fit(&mut self) {
self.shrink_to(0);
}
/// Shrinks the capacity of the `VecDeque` with a lower bound.
///
/// The capacity will remain at least as large as both the length
/// and the supplied value.
///
/// Panics if the current capacity is smaller than the supplied
/// minimum capacity.
///
/// # Examples
///
/// ```
/// #![feature(shrink_to)]
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::with_capacity(15);
/// buf.extend(0..4);
/// assert_eq!(buf.capacity(), 15);
/// buf.shrink_to(6);
/// assert!(buf.capacity() >= 6);
/// buf.shrink_to(0);
/// assert!(buf.capacity() >= 4);
/// ```
#[unstable(feature = "shrink_to", reason = "new API", issue="0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity");
// +1 since the ringbuffer always leaves one space empty
// len + 1 can't overflow for an existing, well-formed ringbuffer.
let target_cap = cmp::max(
cmp::max(min_capacity, self.len()) + 1,
MINIMUM_CAPACITY + 1
).next_power_of_two();
if target_cap < self.cap() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
// Elements are discontiguous, and tail is out of desired bounds
//
// At all other times, element positions are unaffected.
//
// Indicates that elements at the head should be moved.
let head_outside = self.head == 0 || self.head >= target_cap;
// Move elements from out of desired bounds (positions after target_cap)
if self.tail >= target_cap && head_outside {
// T H
// [. . . . . . . . o o o o o o o . ]
// T H
// [o o o o o o o . ]
unsafe {
self.copy_nonoverlapping(0, self.tail, self.len());
}
self.head = self.len();
self.tail = 0;
} else if self.tail != 0 && self.tail < target_cap && head_outside {
// T H
// [. . . o o o o o o o . . . . . . ]
// H T
// [o o . o o o o o ]
let len = self.wrap_sub(self.head, target_cap);
unsafe {
self.copy_nonoverlapping(0, target_cap, len);
}
self.head = len;
debug_assert!(self.head < self.tail);
} else if self.tail >= target_cap {
// H T
// [o o o o o . . . . . . . . . o o ]
// H T
// [o o o o o . o o ]
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
let len = self.cap() - self.tail;
let new_tail = target_cap - len;
unsafe {
self.copy_nonoverlapping(new_tail, self.tail, len);
}
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
self.buf.shrink_to_fit(target_cap);
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
/// Shortens the `VecDeque`, dropping excess elements from the back.
///
/// If `len` is greater than the `VecDeque`'s current length, this has no
/// effect.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(10);
/// buf.push_back(15);
/// assert_eq!(buf, [5, 10, 15]);
/// buf.truncate(1);
/// assert_eq!(buf, [5]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn truncate(&mut self, len: usize) {
for _ in len..self.len() {
self.pop_back();
}
}
/// Returns a front-to-back iterator.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(3);
/// buf.push_back(4);
/// let b: &[_] = &[&5, &3, &4];
/// let c: Vec<&i32> = buf.iter().collect();
/// assert_eq!(&c[..], b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<T> {
Iter {
tail: self.tail,
head: self.head,
ring: unsafe { self.buffer_as_slice() },
}
}
/// Returns a front-to-back iterator that returns mutable references.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(3);
/// buf.push_back(4);
/// for num in buf.iter_mut() {
/// *num = *num - 2;
/// }
/// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
/// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<T> {
IterMut {
tail: self.tail,
head: self.head,
ring: unsafe { self.buffer_as_mut_slice() },
}
}
/// Returns a pair of slices which contain, in order, the contents of the
/// `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
/// vector.push_back(2);
///
/// assert_eq!(vector.as_slices(), (&[0, 1, 2][..], &[][..]));
///
/// vector.push_front(10);
/// vector.push_front(9);
///
/// assert_eq!(vector.as_slices(), (&[9, 10][..], &[0, 1, 2][..]));
/// ```
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
unsafe {
let buf = self.buffer_as_slice();
RingSlices::ring_slices(buf, self.head, self.tail)
}
}
/// Returns a pair of slices which contain, in order, the contents of the
/// `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
///
/// vector.push_front(10);
/// vector.push_front(9);
///
/// vector.as_mut_slices().0[0] = 42;
/// vector.as_mut_slices().1[0] = 24;
/// assert_eq!(vector.as_slices(), (&[42, 10][..], &[24, 1][..]));
/// ```
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
unsafe {
let head = self.head;
let tail = self.tail;
let buf = self.buffer_as_mut_slice();
RingSlices::ring_slices(buf, head, tail)
}
}
/// Returns the number of elements in the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert_eq!(v.len(), 0);
/// v.push_back(1);
/// assert_eq!(v.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
count(self.tail, self.head, self.cap())
}
/// Returns `true` if the `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert!(v.is_empty());
/// v.push_front(1);
/// assert!(!v.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.tail == self.head
}
/// Create a draining iterator that removes the specified range in the
/// `VecDeque` and yields the removed items.
///
/// Note 1: The element range is removed even if the iterator is not
/// consumed until the end.
///
/// Note 2: It is unspecified how many elements are removed from the deque,
/// if the `Drain` value is not dropped, but the borrow it holds expires
/// (eg. due to mem::forget).
///
/// # Panics
///
/// Panics if the starting point is greater than the end point or if
/// the end point is greater than the length of the vector.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect();
/// let drained = v.drain(2..).collect::<VecDeque<_>>();
/// assert_eq!(drained, [3]);
/// assert_eq!(v, [1, 2]);
///
/// // A full range clears all contents
/// v.drain(..);
/// assert!(v.is_empty());
/// ```
#[inline]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain<R>(&mut self, range: R) -> Drain<T>
where R: RangeBounds<usize>
{
// Memory safety
//
// When the Drain is first created, the source deque is shortened to
// make sure no uninitialized or moved-from elements are accessible at
// all if the Drain's destructor never gets to run.
//
// Drain will ptr::read out the values to remove.
// When finished, the remaining data will be copied back to cover the hole,
// and the head/tail values will be restored correctly.
//
let len = self.len();
let start = match range.start_bound() {
Included(&n) => n,
Excluded(&n) => n + 1,
Unbounded => 0,
};
let end = match range.end_bound() {
Included(&n) => n + 1,
Excluded(&n) => n,
Unbounded => len,
};
assert!(start <= end, "drain lower bound was too large");
assert!(end <= len, "drain upper bound was too large");
// The deque's elements are parted into three segments:
// * self.tail -> drain_tail
// * drain_tail -> drain_head
// * drain_head -> self.head
//
// T = self.tail; H = self.head; t = drain_tail; h = drain_head
//
// We store drain_tail as self.head, and drain_head and self.head as
// after_tail and after_head respectively on the Drain. This also
// truncates the effective array such that if the Drain is leaked, we
// have forgotten about the potentially moved values after the start of
// the drain.
//
// T t h H
// [. . . o o x x o o . . .]
//
let drain_tail = self.wrap_add(self.tail, start);
let drain_head = self.wrap_add(self.tail, end);
let head = self.head;
// "forget" about the values after the start of the drain until after
// the drain is complete and the Drain destructor is run.
self.head = drain_tail;
Drain {
deque: NonNull::from(&mut *self),
after_tail: drain_head,
after_head: head,
iter: Iter {
tail: drain_tail,
head: drain_head,
ring: unsafe { self.buffer_as_mut_slice() },
},
}
}
/// Clears the `VecDeque`, removing all values.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// v.push_back(1);
/// v.clear();
/// assert!(v.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn clear(&mut self) {
self.drain(..);
}
/// Returns `true` if the `VecDeque` contains an element equal to the
/// given value.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector: VecDeque<u32> = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
///
/// assert_eq!(vector.contains(&1), true);
/// assert_eq!(vector.contains(&10), false);
/// ```
#[stable(feature = "vec_deque_contains", since = "1.12.0")]
pub fn contains(&self, x: &T) -> bool
where T: PartialEq<T>
{
let (a, b) = self.as_slices();
a.contains(x) || b.contains(x)
}
/// Provides a reference to the front element, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.front(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// assert_eq!(d.front(), Some(&1));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front(&self) -> Option<&T> {
if !self.is_empty() {
Some(&self[0])
} else {
None
}
}
/// Provides a mutable reference to the front element, or `None` if the
/// `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.front_mut(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// match d.front_mut() {
/// Some(x) => *x = 9,
/// None => (),
/// }
/// assert_eq!(d.front(), Some(&9));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front_mut(&mut self) -> Option<&mut T> {
if !self.is_empty() {
Some(&mut self[0])
} else {
None
}
}
/// Provides a reference to the back element, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.back(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// assert_eq!(d.back(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back(&self) -> Option<&T> {
if !self.is_empty() {
Some(&self[self.len() - 1])
} else {
None
}
}
/// Provides a mutable reference to the back element, or `None` if the
/// `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.back(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// match d.back_mut() {
/// Some(x) => *x = 9,
/// None => (),
/// }
/// assert_eq!(d.back(), Some(&9));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back_mut(&mut self) -> Option<&mut T> {
let len = self.len();
if !self.is_empty() {
Some(&mut self[len - 1])
} else {
None
}
}
/// Removes the first element and returns it, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// d.push_back(1);
/// d.push_back(2);
///
/// assert_eq!(d.pop_front(), Some(1));
/// assert_eq!(d.pop_front(), Some(2));
/// assert_eq!(d.pop_front(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
let tail = self.tail;
self.tail = self.wrap_add(self.tail, 1);
unsafe { Some(self.buffer_read(tail)) }
}
}
/// Prepends an element to the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// d.push_front(1);
/// d.push_front(2);
/// assert_eq!(d.front(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, value: T) {
self.grow_if_necessary();
self.tail = self.wrap_sub(self.tail, 1);
let tail = self.tail;
unsafe {
self.buffer_write(tail, value);
}
}
/// Appends an element to the back of the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(1);
/// buf.push_back(3);
/// assert_eq!(3, *buf.back().unwrap());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, value: T) {
self.grow_if_necessary();
let head = self.head;
self.head = self.wrap_add(self.head, 1);
unsafe { self.buffer_write(head, value) }
}
/// Removes the last element from the `VecDeque` and returns it, or `None` if
/// it is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.pop_back(), None);
/// buf.push_back(1);
/// buf.push_back(3);
/// assert_eq!(buf.pop_back(), Some(3));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop_back(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
self.head = self.wrap_sub(self.head, 1);
let head = self.head;
unsafe { Some(self.buffer_read(head)) }
}
}
#[inline]
fn is_contiguous(&self) -> bool {
self.tail <= self.head
}
/// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the
/// last element.
///
/// This does not preserve ordering, but is O(1).
///
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_remove_back(0), None);
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.swap_remove_back(0), Some(1));
/// assert_eq!(buf, [3, 2]);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
let length = self.len();
if length > 0 && index < length - 1 {
self.swap(index, length - 1);
} else if index >= length {
return None;
}
self.pop_back()
}
/// Removes an element from anywhere in the `VecDeque` and returns it,
/// replacing it with the first element.
///
/// This does not preserve ordering, but is O(1).
///
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_remove_front(0), None);
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.swap_remove_front(2), Some(3));
/// assert_eq!(buf, [2, 1]);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
let length = self.len();
if length > 0 && index < length && index != 0 {
self.swap(index, 0);
} else if index >= length {
return None;
}
self.pop_front()
}
/// Inserts an element at `index` within the `VecDeque`, shifting all elements with indices
/// greater than or equal to `index` towards the back.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if `index` is greater than `VecDeque`'s length
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vec_deque = VecDeque::new();
/// vec_deque.push_back('a');
/// vec_deque.push_back('b');
/// vec_deque.push_back('c');
/// assert_eq!(vec_deque, &['a', 'b', 'c']);
///
/// vec_deque.insert(1, 'd');
/// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn insert(&mut self, index: usize, value: T) {
assert!(index <= self.len(), "index out of bounds");
self.grow_if_necessary();
// Move the least number of elements in the ring buffer and insert
// the given object
//
// At most len/2 - 1 elements will be moved. O(min(n, n-i))
//
// There are three main cases:
// Elements are contiguous
// - special case when tail is 0
// Elements are discontiguous and the insert is in the tail section
// Elements are discontiguous and the insert is in the head section
//
// For each of those there are two more cases:
// Insert is closer to tail
// Insert is closer to head
//
// Key: H - self.head
// T - self.tail
// o - Valid element
// I - Insertion element
// A - The element that should be after the insertion point
// M - Indicates element was moved
let idx = self.wrap_add(self.tail, index);
let distance_to_tail = index;
let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
(true, true, _) if index == 0 => {
// push_front
//
// T
// I H
// [A o o o o o o . . . . . . . . .]
//
// H T
// [A o o o o o o o . . . . . I]
//
self.tail = self.wrap_sub(self.tail, 1);
}
(true, true, _) => {
unsafe {
// contiguous, insert closer to tail:
//
// T I H
// [. . . o o A o o o o . . . . . .]
//
// T H
// [. . o o I A o o o o . . . . . .]
// M M
//
// contiguous, insert closer to tail and tail is 0:
//
//
// T I H
// [o o A o o o o . . . . . . . . .]
//
// H T
// [o I A o o o o o . . . . . . . o]
// M M
let new_tail = self.wrap_sub(self.tail, 1);
self.copy(new_tail, self.tail, 1);
// Already moved the tail, so we only copy `index - 1` elements.
self.copy(self.tail, self.tail + 1, index - 1);
self.tail = new_tail;
}
}
(true, false, _) => {
unsafe {
// contiguous, insert closer to head:
//
// T I H
// [. . . o o o o A o o . . . . . .]
//
// T H
// [. . . o o o o I A o o . . . . .]
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head = self.wrap_add(self.head, 1);
}
}
(false, true, true) => {
unsafe {
// discontiguous, insert closer to tail, tail section:
//
// H T I
// [o o o o o o . . . . . o o A o o]
//
// H T
// [o o o o o o . . . . o o I A o o]
// M M
self.copy(self.tail - 1, self.tail, index);
self.tail -= 1;
}
}
(false, false, true) => {
unsafe {
// discontiguous, insert closer to head, tail section:
//
// H T I
// [o o . . . . . . . o o o o o A o]
//
// H T
// [o o o . . . . . . o o o o o I A]
// M M M M
// copy elements up to new head
self.copy(1, 0, self.head);
// copy last element into empty spot at bottom of buffer
self.copy(0, self.cap() - 1, 1);
// move elements from idx to end forward not including ^ element
self.copy(idx + 1, idx, self.cap() - 1 - idx);
self.head += 1;
}
}
(false, true, false) if idx == 0 => {
unsafe {
// discontiguous, insert is closer to tail, head section,
// and is at index zero in the internal buffer:
//
// I H T
// [A o o o o o o o o o . . . o o o]
//
// H T
// [A o o o o o o o o o . . o o o I]
// M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap() - 1, 0, 1);
self.tail -= 1;
}
}
(false, true, false) => {
unsafe {
// discontiguous, insert closer to tail, head section:
//
// I H T
// [o o o A o o o o o o . . . o o o]
//
// H T
// [o o I A o o o o o o . . o o o o]
// M M M M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap() - 1, 0, 1);
// move elements from idx-1 to end forward not including ^ element
self.copy(0, 1, idx - 1);
self.tail -= 1;
}
}
(false, false, false) => {
unsafe {
// discontiguous, insert closer to head, head section:
//
// I H T
// [o o o o A o o . . . . . . o o o]
//
// H T
// [o o o o I A o o . . . . . o o o]
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head += 1;
}
}
}
// tail might've been changed so we need to recalculate
let new_idx = self.wrap_add(self.tail, index);
unsafe {
self.buffer_write(new_idx, value);
}
}
/// Removes and returns the element at `index` from the `VecDeque`.
/// Whichever end is closer to the removal point will be moved to make
/// room, and all the affected elements will be moved to new positions.
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.remove(1), Some(2));
/// assert_eq!(buf, [1, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, index: usize) -> Option<T> {
if self.is_empty() || self.len() <= index {
return None;
}
// There are three main cases:
// Elements are contiguous
// Elements are discontiguous and the removal is in the tail section
// Elements are discontiguous and the removal is in the head section
// - special case when elements are technically contiguous,
// but self.head = 0
//
// For each of those there are two more cases:
// Insert is closer to tail
// Insert is closer to head
//
// Key: H - self.head
// T - self.tail
// o - Valid element
// x - Element marked for removal
// R - Indicates element that is being removed
// M - Indicates element was moved
let idx = self.wrap_add(self.tail, index);
let elem = unsafe { Some(self.buffer_read(idx)) };
let distance_to_tail = index;
let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
(true, true, _) => {
unsafe {
// contiguous, remove closer to tail:
//
// T R H
// [. . . o o x o o o o . . . . . .]
//
// T H
// [. . . . o o o o o o . . . . . .]
// M M
self.copy(self.tail + 1, self.tail, index);
self.tail += 1;
}
}
(true, false, _) => {
unsafe {
// contiguous, remove closer to head:
//
// T R H
// [. . . o o o o x o o . . . . . .]
//
// T H
// [. . . o o o o o o . . . . . . .]
// M M
self.copy(idx, idx + 1, self.head - idx - 1);
self.head -= 1;
}
}
(false, true, true) => {
unsafe {
// discontiguous, remove closer to tail, tail section:
//
// H T R
// [o o o o o o . . . . . o o x o o]
//
// H T
// [o o o o o o . . . . . . o o o o]
// M M
self.copy(self.tail + 1, self.tail, index);
self.tail = self.wrap_add(self.tail, 1);
}
}
(false, false, false) => {
unsafe {
// discontiguous, remove closer to head, head section:
//
// R H T
// [o o o o x o o . . . . . . o o o]
//
// H T
// [o o o o o o . . . . . . . o o o]
// M M
self.copy(idx, idx + 1, self.head - idx - 1);
self.head -= 1;
}
}
(false, false, true) => {
unsafe {
// discontiguous, remove closer to head, tail section:
//
// H T R
// [o o o . . . . . . o o o o o x o]
//
// H T
// [o o . . . . . . . o o o o o o o]
// M M M M
//
// or quasi-discontiguous, remove next to head, tail section:
//
// H T R
// [. . . . . . . . . o o o o o x o]
//
// T H
// [. . . . . . . . . o o o o o o .]
// M
// draw in elements in the tail section
self.copy(idx, idx + 1, self.cap() - idx - 1);
// Prevents underflow.
if self.head != 0 {
// copy first element into empty spot
self.copy(self.cap() - 1, 0, 1);
// move elements in the head section backwards
self.copy(0, 1, self.head - 1);
}
self.head = self.wrap_sub(self.head, 1);
}
}
(false, true, false) => {
unsafe {
// discontiguous, remove closer to tail, head section:
//
// R H T
// [o o x o o o o o o o . . . o o o]
//
// H T
// [o o o o o o o o o o . . . . o o]
// M M M M M
// draw in elements up to idx
self.copy(1, 0, idx);
// copy last element into empty spot
self.copy(0, self.cap() - 1, 1);
// move elements from tail to end forward, excluding the last one
self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
self.tail = self.wrap_add(self.tail, 1);
}
}
}
return elem;
}
/// Splits the `VecDeque` into two at the given index.
///
/// Returns a newly allocated `VecDeque`. `self` contains elements `[0, at)`,
/// and the returned `VecDeque` contains elements `[at, len)`.
///
/// Note that the capacity of `self` does not change.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if `at > len`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect();
/// let buf2 = buf.split_off(1);
/// assert_eq!(buf, [1]);
/// assert_eq!(buf2, [2, 3]);
/// ```
#[inline]
#[stable(feature = "split_off", since = "1.4.0")]
pub fn split_off(&mut self, at: usize) -> Self {
let len = self.len();
assert!(at <= len, "`at` out of bounds");
let other_len = len - at;
let mut other = VecDeque::with_capacity(other_len);
unsafe {
let (first_half, second_half) = self.as_slices();
let first_len = first_half.len();
let second_len = second_half.len();
if at < first_len {
// `at` lies in the first half.
let amount_in_first = first_len - at;
ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
other.ptr(),
amount_in_first);
// just take all of the second half.
ptr::copy_nonoverlapping(second_half.as_ptr(),
other.ptr().offset(amount_in_first as isize),
second_len);
} else {
// `at` lies in the second half, need to factor in the elements we skipped
// in the first half.
let offset = at - first_len;
let amount_in_second = second_len - offset;
ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
other.ptr(),
amount_in_second);
}
}
// Cleanup where the ends of the buffers are
self.head = self.wrap_sub(self.head, other_len);
other.head = other.wrap_index(other_len);
other
}
/// Moves all the elements of `other` into `Self`, leaving `other` empty.
///
/// # Panics
///
/// Panics if the new number of elements in self overflows a `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1, 2].into_iter().collect();
/// let mut buf2: VecDeque<_> = vec![3, 4].into_iter().collect();
/// buf.append(&mut buf2);
/// assert_eq!(buf, [1, 2, 3, 4]);
/// assert_eq!(buf2, []);
/// ```
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
// naive impl
self.extend(other.drain(..));
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all elements `e` such that `f(&e)` returns false.
/// This method operates in place and preserves the order of the retained
/// elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.extend(1..5);
/// buf.retain(|&x| x%2 == 0);
/// assert_eq!(buf, [2, 4]);
/// ```
#[stable(feature = "vec_deque_retain", since = "1.4.0")]
pub fn retain<F>(&mut self, mut f: F)
where F: FnMut(&T) -> bool
{
let len = self.len();
let mut del = 0;
for i in 0..len {
if !f(&self[i]) {
del += 1;
} else if del > 0 {
self.swap(i - del, i);
}
}
if del > 0 {
self.truncate(len - del);
}
}
// This may panic or abort
#[inline]
fn grow_if_necessary(&mut self) {
if self.is_full() {
let old_cap = self.cap();
self.buf.double();
unsafe {
self.handle_cap_increase(old_cap);
}
debug_assert!(!self.is_full());
}
}
}
impl<T: Clone> VecDeque<T> {
/// Modifies the `VecDeque` in-place so that `len()` is equal to new_len,
/// either by removing excess elements from the back or by appending clones of `value`
/// to the back.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(10);
/// buf.push_back(15);
/// assert_eq!(buf, [5, 10, 15]);
///
/// buf.resize(2, 0);
/// assert_eq!(buf, [5, 10]);
///
/// buf.resize(5, 20);
/// assert_eq!(buf, [5, 10, 20, 20, 20]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn resize(&mut self, new_len: usize, value: T) {
let len = self.len();
if new_len > len {
self.extend(repeat(value).take(new_len - len))
} else {
self.truncate(new_len);
}
}
}
/// Returns the index in the underlying buffer for a given logical element index.
#[inline]
fn wrap_index(index: usize, size: usize) -> usize {
// size is always a power of 2
debug_assert!(size.is_power_of_two());
index & (size - 1)
}
/// Returns the two slices that cover the `VecDeque`'s valid range
trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<'a, T> RingSlices for &'a [T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<'a, T> RingSlices for &'a mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
/// Calculate the number of elements left to be read in the buffer
#[inline]
fn count(tail: usize, head: usize, size: usize) -> usize {
// size is always a power of 2
(head.wrapping_sub(tail)) & (size - 1)
}
/// An iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`iter`]: struct.VecDeque.html#method.iter
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ring: &'a [T],
tail: usize,
head: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Iter")
.field(&self.ring)
.field(&self.tail)
.field(&self.head)
.finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> {
Iter {
ring: self.ring,
tail: self.tail,
head: self.head,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(tail)) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = front.iter().fold(accum, &mut f);
back.iter().fold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(self.head)) }
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = back.iter().rfold(accum, &mut f);
front.iter().rfold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {
fn is_empty(&self) -> bool {
self.head == self.tail
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, T> FusedIterator for Iter<'a, T> {}
/// A mutable iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter_mut`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.VecDeque.html#method.iter_mut
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ring: &'a mut [T],
tail: usize,
head: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IterMut")
.field(&self.ring)
.field(&self.tail)
.field(&self.head)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(tail);
Some(&mut *(elem as *mut _))
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = front.iter_mut().fold(accum, &mut f);
back.iter_mut().fold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(self.head);
Some(&mut *(elem as *mut _))
}
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = back.iter_mut().rfold(accum, &mut f);
front.iter_mut().rfold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {
fn is_empty(&self) -> bool {
self.head == self.tail
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, T> FusedIterator for IterMut<'a, T> {}
/// An owning iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`into_iter`] method on [`VecDeque`][`VecDeque`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.VecDeque.html#method.into_iter
/// [`VecDeque`]: struct.VecDeque.html
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
inner: VecDeque<T>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IntoIter")
.field(&self.inner)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Iterator for IntoIter<T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.inner.pop_front()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.inner.len();
(len, Some(len))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> DoubleEndedIterator for IntoIter<T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.inner.pop_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {
fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
/// A draining iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`drain`]: struct.VecDeque.html#method.drain
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, T: 'a> {
after_tail: usize,
after_head: usize,
iter: Iter<'a, T>,
deque: NonNull<VecDeque<T>>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Drain<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
.field(&self.iter)
.finish()
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Send> Send for Drain<'a, T> {}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Drop for Drain<'a, T> {
fn drop(&mut self) {
self.for_each(drop);
let source_deque = unsafe { self.deque.as_mut() };
// T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
//
// T t h H
// [. . . o o x x o o . . .]
//
let orig_tail = source_deque.tail;
let drain_tail = source_deque.head;
let drain_head = self.after_tail;
let orig_head = self.after_head;
let tail_len = count(orig_tail, drain_tail, source_deque.cap());
let head_len = count(drain_head, orig_head, source_deque.cap());
// Restore the original head value
source_deque.head = orig_head;
match (tail_len, head_len) {
(0, 0) => {
source_deque.head = 0;
source_deque.tail = 0;
}
(0, _) => {
source_deque.tail = drain_head;
}
(_, 0) => {
source_deque.head = drain_tail;
}
_ => unsafe {
if tail_len <= head_len {
source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
} else {
source_deque.head = source_deque.wrap_add(drain_tail, head_len);
source_deque.wrap_copy(drain_tail, drain_head, head_len);
}
},
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Iterator for Drain<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.iter.next().map(|elt| unsafe { ptr::read(elt) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialEq> PartialEq for VecDeque<A> {
fn eq(&self, other: &VecDeque<A>) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other.as_slices();
if sa.len() == oa.len() {
sa == oa && sb == ob
} else if sa.len() < oa.len() {
// Always divisible in three sections, for example:
// self: [a b c|d e f]
// other: [0 1 2 3|4 5]
// front = 3, mid = 1,
// [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5]
let front = sa.len();
let mid = oa.len() - front;
let (oa_front, oa_mid) = oa.split_at(front);
let (sb_mid, sb_back) = sb.split_at(mid);
debug_assert_eq!(sa.len(), oa_front.len());
debug_assert_eq!(sb_mid.len(), oa_mid.len());
debug_assert_eq!(sb_back.len(), ob.len());
sa == oa_front && sb_mid == oa_mid && sb_back == ob
} else {
let front = oa.len();
let mid = sa.len() - front;
let (sa_front, sa_mid) = sa.split_at(front);
let (ob_mid, ob_back) = ob.split_at(mid);
debug_assert_eq!(sa_front.len(), oa.len());
debug_assert_eq!(sa_mid.len(), ob_mid.len());
debug_assert_eq!(sb.len(), ob_back.len());
sa_front == oa && sa_mid == ob_mid && sb == ob_back
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Eq> Eq for VecDeque<A> {}
macro_rules! __impl_slice_eq1 {
($Lhs: ty, $Rhs: ty) => {
__impl_slice_eq1! { $Lhs, $Rhs, Sized }
};
($Lhs: ty, $Rhs: ty, $Bound: ident) => {
#[stable(feature = "vec-deque-partial-eq-slice", since = "1.17.0")]
impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> {
fn eq(&self, other: &$Rhs) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other[..].split_at(sa.len());
sa == oa && sb == ob
}
}
}
}
__impl_slice_eq1! { VecDeque<A>, Vec<B> }
__impl_slice_eq1! { VecDeque<A>, &'b [B] }
__impl_slice_eq1! { VecDeque<A>, &'b mut [B] }
macro_rules! array_impls {
($($N: expr)+) => {
$(
__impl_slice_eq1! { VecDeque<A>, [B; $N] }
__impl_slice_eq1! { VecDeque<A>, &'b [B; $N] }
__impl_slice_eq1! { VecDeque<A>, &'b mut [B; $N] }
)+
}
}
array_impls! {
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialOrd> PartialOrd for VecDeque<A> {
fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Ord> Ord for VecDeque<A> {
#[inline]
fn cmp(&self, other: &VecDeque<A>) -> Ordering {
self.iter().cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Hash> Hash for VecDeque<A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.len().hash(state);
let (a, b) = self.as_slices();
Hash::hash_slice(a, state);
Hash::hash_slice(b, state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Index<usize> for VecDeque<A> {
type Output = A;
#[inline]
fn index(&self, index: usize) -> &A {
self.get(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> IndexMut<usize> for VecDeque<A> {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut A {
self.get_mut(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> FromIterator<A> for VecDeque<A> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> VecDeque<A> {
let iterator = iter.into_iter();
let (lower, _) = iterator.size_hint();
let mut deq = VecDeque::with_capacity(lower);
deq.extend(iterator);
deq
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for VecDeque<T> {
type Item = T;
type IntoIter = IntoIter<T>;
/// Consumes the `VecDeque` into a front-to-back iterator yielding elements by
/// value.
fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a VecDeque<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut VecDeque<T> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Extend<A> for VecDeque<A> {
fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) {
for elt in iter {
self.push_back(elt);
}
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for VecDeque<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}
#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
impl<T> From<Vec<T>> for VecDeque<T> {
fn from(mut other: Vec<T>) -> Self {
unsafe {
let other_buf = other.as_mut_ptr();
let mut buf = RawVec::from_raw_parts(other_buf, other.capacity());
let len = other.len();
mem::forget(other);
// We need to extend the buf if it's not a power of two, too small
// or doesn't have at least one free space
if !buf.cap().is_power_of_two() || (buf.cap() < (MINIMUM_CAPACITY + 1)) ||
(buf.cap() == len) {
let cap = cmp::max(buf.cap() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
buf.reserve_exact(len, cap - len);
}
VecDeque {
tail: 0,
head: len,
buf,
}
}
}
}
#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
impl<T> From<VecDeque<T>> for Vec<T> {
fn from(other: VecDeque<T>) -> Self {
unsafe {
let buf = other.buf.ptr();
let len = other.len();
let tail = other.tail;
let head = other.head;
let cap = other.cap();
// Need to move the ring to the front of the buffer, as vec will expect this.
if other.is_contiguous() {
ptr::copy(buf.offset(tail as isize), buf, len);
} else {
if (tail - head) >= cmp::min(cap - tail, head) {
// There is enough free space in the centre for the shortest block so we can
// do this in at most three copy moves.
if (cap - tail) > head {
// right hand block is the long one; move that enough for the left
ptr::copy(buf.offset(tail as isize),
buf.offset((tail - head) as isize),
cap - tail);
// copy left in the end
ptr::copy(buf, buf.offset((cap - head) as isize), head);
// shift the new thing to the start
ptr::copy(buf.offset((tail - head) as isize), buf, len);
} else {
// left hand block is the long one, we can do it in two!
ptr::copy(buf, buf.offset((cap - tail) as isize), head);
ptr::copy(buf.offset(tail as isize), buf, cap - tail);
}
} else {
// Need to use N swaps to move the ring
// We can use the space at the end of the ring as a temp store
let mut left_edge: usize = 0;
let mut right_edge: usize = tail;
// The general problem looks like this
// GHIJKLM...ABCDEF - before any swaps
// ABCDEFM...GHIJKL - after 1 pass of swaps
// ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
// - then restart the algorithm with a new (smaller) store
// Sometimes the temp store is reached when the right edge is at the end
// of the buffer - this means we've hit the right order with fewer swaps!
// E.g
// EF..ABCD
// ABCDEF.. - after four only swaps we've finished
while left_edge < len && right_edge != cap {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
let src: isize = (right_edge + right_offset) as isize;
ptr::swap(buf.offset(i as isize), buf.offset(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
right_edge += right_offset + 1;
}
}
}
let out = Vec::from_raw_parts(buf, len, cap);
mem::forget(other);
out
}
}
}
#[cfg(test)]
mod tests {
use test;
use super::VecDeque;
#[bench]
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_back(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_front(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
fn bench_pop_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
})
}
#[bench]
fn bench_pop_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
})
}
#[test]
fn test_swap_front_back_remove() {
fn test(back: bool) {
// This test checks that every single combination of tail position and length is tested.
// Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
let usable_cap = tester.capacity();
let final_len = usable_cap / 2;
for len in 0..final_len {
let expected: VecDeque<_> = if back {
(0..len).collect()
} else {
(0..len).rev().collect()
};
for tail_pos in 0..usable_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
if back {
for i in 0..len * 2 {
tester.push_front(i);
}
for i in 0..len {
assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
}
} else {
for i in 0..len * 2 {
tester.push_back(i);
}
for i in 0..len {
let idx = tester.len() - 1 - i;
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
test(true);
test(false);
}
#[test]
fn test_insert() {
// This test checks that every single combination of tail position, length, and
// insertion position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* insertion
for len in 1..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_insert in 0..len {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_remove() {
// This test checks that every single combination of tail position, length, and
// removal position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* removal
for len in 0..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_remove in 0..len + 1 {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
}
tester.push_back(i);
}
if to_remove == len {
tester.push_back(1234);
}
tester.remove(to_remove);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_drain() {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
for len in 0..cap + 1 {
for tail in 0..cap + 1 {
for drain_start in 0..len + 1 {
for drain_end in drain_start..len + 1 {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
tester.push_back(i);
}
// Check that we drain the correct values
let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
assert_eq!(drained, drained_expected);
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start)
.chain(drain_end..len)
.collect();
assert_eq!(expected, tester);
}
}
}
}
}
#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
// is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
tester.reserve(63);
let max_cap = tester.capacity();
for len in 0..cap + 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..max_cap + 1 {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
#[test]
fn test_split_off() {
// This test checks that every single combination of tail position, length, and
// split position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *before* splitting
for len in 0..cap {
// index to split at
for at in 0..len + 1 {
// 0, 1, 2, .., at - 1 (may be empty)
let expected_self = (0..).take(at).collect::<VecDeque<_>>();
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(result.tail < result.cap());
assert!(result.head < result.cap());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
}
}
}
#[test]
fn test_from_vec() {
use super::super::vec::Vec;
for cap in 0..35 {
for len in 0..cap + 1 {
let mut vec = Vec::with_capacity(cap);
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
}
#[test]
fn test_vec_from_vecdeque() {
use super::super::vec::Vec;
fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) {
let mut vd = VecDeque::with_capacity(cap);
for _ in 0..offset {
vd.push_back(0);
vd.pop_front();
}
vd.extend(0..len);
let vec: Vec<_> = Vec::from(vd.clone());
assert_eq!(vec.len(), vd.len());
assert!(vec.into_iter().eq(vd));
}
for cap_pwr in 0..7 {
// Make capacity as a (2^x)-1, so that the ring size is 2^x
let cap = (2i32.pow(cap_pwr) - 1) as usize;
// In these cases there is enough free space to solve it with copies
for len in 0..((cap + 1) / 2) {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
// Now there's not (necessarily) space to straighten the ring with simple copies,
// the ring will use swapping when:
// (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
// right block size > free space && left block size > free space
for len in ((cap + 1) / 2)..cap {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
}
}
}
| 33.175025 | 100 | 0.4704 |
0a2b848dbcff05003e2695322c2cb783c4610225 | 12,151 | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::device::DeviceOwned;
use crate::format::ClearValue;
use crate::framebuffer::FramebufferSys;
use crate::framebuffer::RenderPassDesc;
use crate::framebuffer::RenderPassSys;
use crate::image::view::ImageViewAbstract;
use crate::pipeline::shader::ShaderInterfaceDef;
use crate::SafeDeref;
/// Trait for objects that contain a Vulkan framebuffer object.
///
/// Any `Framebuffer` object implements this trait. You can therefore turn a `Arc<Framebuffer<_>>`
/// into a `Arc<FramebufferAbstract + Send + Sync>` for easier storage.
pub unsafe trait FramebufferAbstract: RenderPassAbstract {
/// Returns an opaque struct that represents the framebuffer's internals.
fn inner(&self) -> FramebufferSys;
/// Returns the width, height and array layers of the framebuffer.
fn dimensions(&self) -> [u32; 3];
/// Returns the attachment of the framebuffer with the given index.
///
/// If the `index` is not between `0` and `num_attachments`, then `None` should be returned.
fn attached_image_view(&self, index: usize) -> Option<&dyn ImageViewAbstract>;
/// Returns the width of the framebuffer in pixels.
#[inline]
fn width(&self) -> u32 {
self.dimensions()[0]
}
/// Returns the height of the framebuffer in pixels.
#[inline]
fn height(&self) -> u32 {
self.dimensions()[1]
}
/// Returns the number of layers (or depth) of the framebuffer.
#[inline]
fn layers(&self) -> u32 {
self.dimensions()[2]
}
}
unsafe impl<T> FramebufferAbstract for T
where
T: SafeDeref,
T::Target: FramebufferAbstract,
{
#[inline]
fn inner(&self) -> FramebufferSys {
FramebufferAbstract::inner(&**self)
}
#[inline]
fn dimensions(&self) -> [u32; 3] {
(**self).dimensions()
}
#[inline]
fn attached_image_view(&self, index: usize) -> Option<&dyn ImageViewAbstract> {
(**self).attached_image_view(index)
}
}
/// Trait for objects that contain a Vulkan render pass object.
///
/// Any `RenderPass` object implements this trait. You can therefore turn a `Arc<RenderPass<_>>`
/// into a `Arc<RenderPassAbstract + Send + Sync>` for easier storage.
///
/// The `Arc<RenderPassAbstract + Send + Sync>` accepts a `Vec<ClearValue>` for clear values and a
/// `Vec<Arc<ImageView + Send + Sync>>` for the list of attachments.
///
/// # Example
///
/// ```
/// use std::sync::Arc;
/// use vulkano::framebuffer::EmptySinglePassRenderPassDesc;
/// use vulkano::framebuffer::RenderPass;
/// use vulkano::framebuffer::RenderPassAbstract;
///
/// # let device: Arc<vulkano::device::Device> = return;
/// let render_pass = RenderPass::new(device.clone(), EmptySinglePassRenderPassDesc).unwrap();
///
/// // For easier storage, turn this render pass into a `Arc<RenderPassAbstract + Send + Sync>`.
/// let stored_rp = Arc::new(render_pass) as Arc<RenderPassAbstract + Send + Sync>;
/// ```
pub unsafe trait RenderPassAbstract: DeviceOwned + RenderPassDesc {
/// Returns an opaque object representing the render pass' internals.
///
/// # Safety
///
/// The trait implementation must return the same value every time.
fn inner(&self) -> RenderPassSys;
}
unsafe impl<T> RenderPassAbstract for T
where
T: SafeDeref,
T::Target: RenderPassAbstract,
{
#[inline]
fn inner(&self) -> RenderPassSys {
(**self).inner()
}
}
/// Extension trait for `RenderPassDesc`. Defines which types are allowed as a list of clear values.
///
/// When the user enters a render pass, they need to pass a list of clear values to apply to
/// the attachments of the framebuffer. To do so, the render pass object or the framebuffer
/// (depending on the function you use) must implement `RenderPassDescClearValues<C>` where `C` is
/// the parameter that the user passed. The trait method is then responsible for checking the
/// correctness of these values and turning them into a list that can be processed by vulkano.
pub unsafe trait RenderPassDescClearValues<C> {
/// Decodes a `C` into a list of clear values where each element corresponds
/// to an attachment. The size of the returned iterator must be the same as the number of
/// attachments.
///
/// The format of the clear value **must** match the format of the attachment. Attachments
/// that are not loaded with `LoadOp::Clear` must have an entry equal to `ClearValue::None`.
///
/// # Safety
///
/// This trait is unsafe because vulkano doesn't check whether the clear value is in a format
/// that matches the attachment.
///
// TODO: meh for boxing
fn convert_clear_values(&self, vals: C) -> Box<dyn Iterator<Item = ClearValue>>;
}
unsafe impl<T, C> RenderPassDescClearValues<C> for T
where
T: SafeDeref,
T::Target: RenderPassDescClearValues<C>,
{
#[inline]
fn convert_clear_values(&self, vals: C) -> Box<dyn Iterator<Item = ClearValue>> {
(**self).convert_clear_values(vals)
}
}
/// Extension trait for `RenderPassDesc` that checks whether a subpass of this render pass accepts
/// the output of a fragment shader.
///
/// The trait is automatically implemented for all type that implement `RenderPassDesc` and
/// `RenderPassDesc`.
///
/// > **Note**: This trait exists so that you can specialize it once specialization lands in Rust.
// TODO: once specialization lands, this trait can be specialized for pairs that are known to
// always be compatible
pub unsafe trait RenderPassSubpassInterface<Other: ?Sized>: RenderPassDesc
where
Other: ShaderInterfaceDef,
{
/// Returns `true` if this subpass is compatible with the fragment output definition.
/// Also returns `false` if the subpass is out of range.
// TODO: return proper error
fn is_compatible_with(&self, subpass: u32, other: &Other) -> bool;
}
unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
where
A: RenderPassDesc,
B: ShaderInterfaceDef,
{
fn is_compatible_with(&self, subpass: u32, other: &B) -> bool {
let pass_descr = match RenderPassDesc::subpass_descs(self)
.skip(subpass as usize)
.next()
{
Some(s) => s,
None => return false,
};
for element in other.elements() {
for location in element.location.clone() {
let attachment_id = match pass_descr.color_attachments.get(location as usize) {
Some(a) => a.0,
None => return false,
};
let attachment_desc = (&self)
.attachment_descs()
.skip(attachment_id)
.next()
.unwrap();
// FIXME: compare formats depending on the number of components and data type
/*if attachment_desc.format != element.format {
return false;
}*/
}
}
true
}
}
/// Trait implemented on render pass objects to check whether they are compatible
/// with another render pass.
///
/// The trait is automatically implemented for all type that implement `RenderPassDesc`.
///
/// > **Note**: This trait exists so that you can specialize it once specialization lands in Rust.
// TODO: once specialization lands, this trait can be specialized for pairs that are known to
// always be compatible
// TODO: maybe this can be unimplemented on some pairs, to provide compile-time checks?
pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc
where
Other: RenderPassDesc,
{
/// Returns `true` if this layout is compatible with the other layout, as defined in the
/// `Render Pass Compatibility` section of the Vulkan specs.
// TODO: return proper error
fn is_compatible_with(&self, other: &Other) -> bool;
}
unsafe impl<A: ?Sized, B: ?Sized> RenderPassCompatible<B> for A
where
A: RenderPassDesc,
B: RenderPassDesc,
{
fn is_compatible_with(&self, other: &B) -> bool {
if self.num_attachments() != other.num_attachments() {
return false;
}
for atch_num in 0..self.num_attachments() {
let my_atch = self.attachment_desc(atch_num).unwrap();
let other_atch = other.attachment_desc(atch_num).unwrap();
if !my_atch.is_compatible_with(&other_atch) {
return false;
}
}
return true;
// FIXME: finish
}
}
/// Represents a subpass within a `RenderPassAbstract` object.
///
/// This struct doesn't correspond to anything in Vulkan. It is simply an equivalent to a
/// tuple of a render pass and subpass index. Contrary to a tuple, however, the existence of the
/// subpass is checked when the object is created. When you have a `Subpass` you are guaranteed
/// that the given subpass does exist.
#[derive(Debug, Copy, Clone)]
pub struct Subpass<L> {
render_pass: L,
subpass_id: u32,
}
impl<L> Subpass<L>
where
L: RenderPassDesc,
{
/// Returns a handle that represents a subpass of a render pass.
#[inline]
pub fn from(render_pass: L, id: u32) -> Option<Subpass<L>> {
if (id as usize) < render_pass.num_subpasses() {
Some(Subpass {
render_pass,
subpass_id: id,
})
} else {
None
}
}
/// Returns the number of color attachments in this subpass.
#[inline]
pub fn num_color_attachments(&self) -> u32 {
self.render_pass
.num_color_attachments(self.subpass_id)
.unwrap()
}
/// Returns true if the subpass has a depth attachment or a depth-stencil attachment.
#[inline]
pub fn has_depth(&self) -> bool {
self.render_pass.has_depth(self.subpass_id).unwrap()
}
/// Returns true if the subpass has a depth attachment or a depth-stencil attachment whose
/// layout is not `DepthStencilReadOnlyOptimal`.
#[inline]
pub fn has_writable_depth(&self) -> bool {
self.render_pass
.has_writable_depth(self.subpass_id)
.unwrap()
}
/// Returns true if the subpass has a stencil attachment or a depth-stencil attachment.
#[inline]
pub fn has_stencil(&self) -> bool {
self.render_pass.has_stencil(self.subpass_id).unwrap()
}
/// Returns true if the subpass has a stencil attachment or a depth-stencil attachment whose
/// layout is not `DepthStencilReadOnlyOptimal`.
#[inline]
pub fn has_writable_stencil(&self) -> bool {
self.render_pass
.has_writable_stencil(self.subpass_id)
.unwrap()
}
/// Returns true if the subpass has any color or depth/stencil attachment.
#[inline]
pub fn has_color_or_depth_stencil_attachment(&self) -> bool {
self.num_color_attachments() >= 1
|| self
.render_pass
.has_depth_stencil_attachment(self.subpass_id)
.unwrap()
!= (false, false)
}
/// Returns the number of samples in the color and/or depth/stencil attachments. Returns `None`
/// if there is no such attachment in this subpass.
#[inline]
pub fn num_samples(&self) -> Option<u32> {
self.render_pass.num_samples(self.subpass_id)
}
}
impl<L> Subpass<L> {
/// Returns the render pass of this subpass.
#[inline]
pub fn render_pass(&self) -> &L {
&self.render_pass
}
/// Returns the index of this subpass within the renderpass.
#[inline]
pub fn index(&self) -> u32 {
self.subpass_id
}
}
impl<L> Into<(L, u32)> for Subpass<L> {
#[inline]
fn into(self) -> (L, u32) {
(self.render_pass, self.subpass_id)
}
}
| 33.752778 | 100 | 0.649165 |
e4ea8099d51202baf98e494bf5630de0115466b4 | 5,599 | //! The [glutin] windowing implementation for [luminance-windowing].
//!
//! [glutin]: https://crates.io/crates/glutin
//! [luminance-windowing]: https://crates.io/crates/luminance-windowing
#![deny(missing_docs)]
use gl;
use glutin::{
event_loop::EventLoop, window::WindowBuilder, Api, ContextBuilder, ContextError, CreationError,
GlProfile, GlRequest, NotCurrent, PossiblyCurrent, WindowedContext,
};
use luminance::context::GraphicsContext;
use luminance::framebuffer::{Framebuffer, FramebufferError};
use luminance::texture::Dim2;
pub use luminance_gl::gl33::StateQueryError;
use luminance_gl::GL33;
use std::error;
use std::fmt;
use std::os::raw::c_void;
/// Error that might occur when creating a Glutin surface.
#[derive(Debug)]
pub enum GlutinError {
/// Something went wrong when creating the Glutin surface. The carried [`CreationError`] provides
/// more information.
CreationError(CreationError),
/// OpenGL context error.
ContextError(ContextError),
/// Graphics state error that might occur when querying the initial state.
GraphicsStateError(StateQueryError),
}
impl fmt::Display for GlutinError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
GlutinError::CreationError(ref e) => write!(f, "Glutin surface creation error: {}", e),
GlutinError::ContextError(ref e) => write!(f, "Glutin OpenGL context creation error: {}", e),
GlutinError::GraphicsStateError(ref e) => {
write!(f, "OpenGL graphics state initialization error: {}", e)
}
}
}
}
impl error::Error for GlutinError {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
GlutinError::CreationError(e) => Some(e),
GlutinError::ContextError(e) => Some(e),
GlutinError::GraphicsStateError(e) => Some(e),
}
}
}
impl From<CreationError> for GlutinError {
fn from(e: CreationError) -> Self {
GlutinError::CreationError(e)
}
}
impl From<ContextError> for GlutinError {
fn from(e: ContextError) -> Self {
GlutinError::ContextError(e)
}
}
impl From<StateQueryError> for GlutinError {
fn from(e: StateQueryError) -> Self {
GlutinError::GraphicsStateError(e)
}
}
/// The Glutin surface.
///
/// You want to create such an object in order to use any [luminance] construct.
///
/// [luminance]: https://crates.io/crates/luminance
pub struct GlutinSurface {
/// The windowed context.
pub ctx: WindowedContext<PossiblyCurrent>,
/// OpenGL 3.3 state.
gl: GL33,
}
unsafe impl GraphicsContext for GlutinSurface {
type Backend = GL33;
fn backend(&mut self) -> &mut Self::Backend {
&mut self.gl
}
}
impl GlutinSurface {
/// Create a new [`GlutinSurface`] by consuming a [`WindowBuilder`].
///
/// This is an alternative method to [`new`] that is more flexible as you have access to the
/// whole `glutin` types.
///
/// `window_builder` is the default object when passed to your closure and `ctx_builder` is
/// already initialized for the OpenGL context (you’re not supposed to change it!).
pub fn new_gl33_from_builders<'a, WB, CB>(
window_builder: WB,
ctx_builder: CB,
) -> Result<(Self, EventLoop<()>), GlutinError>
where
WB: FnOnce(&mut EventLoop<()>, WindowBuilder) -> WindowBuilder,
CB:
FnOnce(&mut EventLoop<()>, ContextBuilder<'a, NotCurrent>) -> ContextBuilder<'a, NotCurrent>,
{
let mut event_loop = EventLoop::new();
let window_builder = window_builder(&mut event_loop, WindowBuilder::new());
let windowed_ctx = ctx_builder(
&mut event_loop,
ContextBuilder::new()
.with_gl(GlRequest::Specific(Api::OpenGl, (3, 3)))
.with_gl_profile(GlProfile::Core),
)
.build_windowed(window_builder, &event_loop)?;
let ctx = unsafe { windowed_ctx.make_current().map_err(|(_, e)| e)? };
// init OpenGL
gl::load_with(|s| ctx.get_proc_address(s) as *const c_void);
ctx.window().set_visible(true);
let gl = GL33::new().map_err(GlutinError::GraphicsStateError)?;
let surface = GlutinSurface { ctx, gl };
Ok((surface, event_loop))
}
/// Create a new [`GlutinSurface`] from scratch.
pub fn new_gl33(
window_builder: WindowBuilder,
samples: u16,
) -> Result<(Self, EventLoop<()>), GlutinError> {
let event_loop = EventLoop::new();
let windowed_ctx = ContextBuilder::new()
.with_gl(GlRequest::Specific(Api::OpenGl, (3, 3)))
.with_gl_profile(GlProfile::Core)
.with_multisampling(samples)
.with_double_buffer(Some(true))
.build_windowed(window_builder, &event_loop)?;
let ctx = unsafe { windowed_ctx.make_current().map_err(|(_, e)| e)? };
// init OpenGL
gl::load_with(|s| ctx.get_proc_address(s) as *const c_void);
ctx.window().set_visible(true);
let gl = GL33::new().map_err(GlutinError::GraphicsStateError)?;
let surface = GlutinSurface { ctx, gl };
Ok((surface, event_loop))
}
/// Get the underlying size (in physical pixels) of the surface.
///
/// This is equivalent to getting the inner size of the windowed context and converting it to
/// a physical size by using the HiDPI factor of the windowed context.
pub fn size(&self) -> [u32; 2] {
let size = self.ctx.window().inner_size();
[size.width, size.height]
}
/// Get access to the back buffer.
pub fn back_buffer(&mut self) -> Result<Framebuffer<GL33, Dim2, (), ()>, FramebufferError> {
Framebuffer::back_buffer(self, self.size())
}
/// Swap the back and front buffers.
pub fn swap_buffers(&mut self) {
let _ = self.ctx.swap_buffers();
}
}
| 30.763736 | 99 | 0.675656 |
1efc6e6ea60bf30406085fc7f7f70d70e9876f82 | 1,647 | use crate::net::graphql::volatile_village_info_query;
use crate::prelude::*;
use paddlers_shared_lib::api::shop::*;
#[derive(Default, Debug, Clone, Copy)]
pub struct TownResources {
feathers: i64,
sticks: i64,
logs: i64,
}
impl TownResources {
pub fn read(&self, rt: ResourceType) -> i64 {
match rt {
ResourceType::Feathers => self.feathers,
ResourceType::Sticks => self.sticks,
ResourceType::Logs => self.logs,
}
}
fn write(&mut self, rt: ResourceType) -> &mut i64 {
match rt {
ResourceType::Feathers => &mut self.feathers,
ResourceType::Sticks => &mut self.sticks,
ResourceType::Logs => &mut self.logs,
}
}
pub fn update(&mut self, data: volatile_village_info_query::ResponseData) {
self.feathers = data.village.feathers;
self.sticks = data.village.sticks;
self.logs = data.village.logs;
}
pub fn non_zero_resources(&self) -> Vec<(ResourceType, i64)> {
use paddlers_shared_lib::strum::IntoEnumIterator;
ResourceType::iter()
.map(|rt| (rt, self.read(rt)))
.filter(|t| t.1 > 0)
.collect()
}
fn spend_res(&mut self, rt: ResourceType, amount: i64) {
*self.write(rt) -= amount;
}
pub fn spend(&mut self, p: &Price) {
for (rt, n) in p.0.iter() {
self.spend_res(*rt, *n);
}
}
pub fn can_afford(&self, p: &Price) -> bool {
for (rt, n) in p.0.iter() {
if self.read(*rt) < *n {
return false;
}
}
true
}
}
| 29.410714 | 79 | 0.545841 |
217abe17eada227968778f3a17f1d5925b5bed85 | 35,472 | #[derive(Clone, PartialEq, ::prost::Message)]
pub struct PubResponse {
#[prost(string, tag="1")]
pub r#pub: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RootFingerprintRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RootFingerprintResponse {
#[prost(bytes="vec", tag="1")]
pub fingerprint: ::prost::alloc::vec::Vec<u8>,
}
/// See https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki.
/// version field dropped as it will set dynamically based on the context (xpub, ypub, etc.).
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct XPub {
#[prost(bytes="vec", tag="1")]
pub depth: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="2")]
pub parent_fingerprint: ::prost::alloc::vec::Vec<u8>,
#[prost(uint32, tag="3")]
pub child_num: u32,
#[prost(bytes="vec", tag="4")]
pub chain_code: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="5")]
pub public_key: ::prost::alloc::vec::Vec<u8>,
}
/// This message exists for use in oneof or repeated fields, where one can't inline `repeated uint32` due to protobuf rules.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Keypath {
#[prost(uint32, repeated, tag="1")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CheckBackupRequest {
#[prost(bool, tag="1")]
pub silent: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CheckBackupResponse {
#[prost(string, tag="1")]
pub id: ::prost::alloc::string::String,
}
/// Timestamp must be in UTC
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateBackupRequest {
#[prost(uint32, tag="1")]
pub timestamp: u32,
#[prost(int32, tag="2")]
pub timezone_offset: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListBackupsRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BackupInfo {
#[prost(string, tag="1")]
pub id: ::prost::alloc::string::String,
#[prost(uint32, tag="2")]
pub timestamp: u32,
/// uint32 timezone_offset = 3;
#[prost(string, tag="4")]
pub name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListBackupsResponse {
#[prost(message, repeated, tag="1")]
pub info: ::prost::alloc::vec::Vec<BackupInfo>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RestoreBackupRequest {
#[prost(string, tag="1")]
pub id: ::prost::alloc::string::String,
#[prost(uint32, tag="2")]
pub timestamp: u32,
#[prost(int32, tag="3")]
pub timezone_offset: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CheckSdCardRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CheckSdCardResponse {
#[prost(bool, tag="1")]
pub inserted: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeviceInfoRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeviceInfoResponse {
#[prost(string, tag="1")]
pub name: ::prost::alloc::string::String,
#[prost(bool, tag="2")]
pub initialized: bool,
#[prost(string, tag="3")]
pub version: ::prost::alloc::string::String,
#[prost(bool, tag="4")]
pub mnemonic_passphrase_enabled: bool,
#[prost(uint32, tag="5")]
pub monotonic_increments_remaining: u32,
/// From v9.6.0: "ATECC608A" or "ATECC608B".
#[prost(string, tag="6")]
pub securechip_model: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InsertRemoveSdCardRequest {
#[prost(enumeration="insert_remove_sd_card_request::SdCardAction", tag="1")]
pub action: i32,
}
/// Nested message and enum types in `InsertRemoveSDCardRequest`.
pub mod insert_remove_sd_card_request {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SdCardAction {
RemoveCard = 0,
InsertCard = 1,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ResetRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SetDeviceLanguageRequest {
#[prost(string, tag="1")]
pub language: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SetDeviceNameRequest {
#[prost(string, tag="1")]
pub name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SetPasswordRequest {
#[prost(bytes="vec", tag="1")]
pub entropy: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AntiKleptoHostNonceCommitment {
#[prost(bytes="vec", tag="1")]
pub commitment: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AntiKleptoSignerCommitment {
#[prost(bytes="vec", tag="1")]
pub commitment: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AntiKleptoSignatureRequest {
#[prost(bytes="vec", tag="1")]
pub host_nonce: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcScriptConfig {
#[prost(oneof="btc_script_config::Config", tags="1, 2")]
pub config: ::core::option::Option<btc_script_config::Config>,
}
/// Nested message and enum types in `BTCScriptConfig`.
pub mod btc_script_config {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Multisig {
#[prost(uint32, tag="1")]
pub threshold: u32,
/// xpubs are acount-level xpubs. Addresses are going to be derived from it using: m/<change>/<receive>.
/// The number of xpubs defines the number of cosigners.
#[prost(message, repeated, tag="2")]
pub xpubs: ::prost::alloc::vec::Vec<super::XPub>,
/// Index to the xpub of our keystore in xpubs. The keypath to it is provided via
/// BTCPubRequest/BTCSignInit.
#[prost(uint32, tag="3")]
pub our_xpub_index: u32,
#[prost(enumeration="multisig::ScriptType", tag="4")]
pub script_type: i32,
}
/// Nested message and enum types in `Multisig`.
pub mod multisig {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ScriptType {
/// native segwit v0 multisig (bech32 addresses)
P2wsh = 0,
/// wrapped segwit for legacy address compatibility
P2wshP2sh = 1,
}
}
/// SimpleType is a "simple" script: one public key, no additional inputs.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SimpleType {
P2wpkhP2sh = 0,
P2wpkh = 1,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Config {
#[prost(enumeration="SimpleType", tag="1")]
SimpleType(i32),
#[prost(message, tag="2")]
Multisig(Multisig),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcPubRequest {
#[prost(enumeration="BtcCoin", tag="1")]
pub coin: i32,
#[prost(uint32, repeated, tag="2")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
#[prost(bool, tag="5")]
pub display: bool,
#[prost(oneof="btc_pub_request::Output", tags="3, 4")]
pub output: ::core::option::Option<btc_pub_request::Output>,
}
/// Nested message and enum types in `BTCPubRequest`.
pub mod btc_pub_request {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum XPubType {
Tpub = 0,
Xpub = 1,
Ypub = 2,
/// zpub
Zpub = 3,
/// vpub
Vpub = 4,
Upub = 5,
/// Vpub
CapitalVpub = 6,
/// Zpub
CapitalZpub = 7,
/// Upub
CapitalUpub = 8,
/// Ypub
CapitalYpub = 9,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Output {
#[prost(enumeration="XPubType", tag="3")]
XpubType(i32),
#[prost(message, tag="4")]
ScriptConfig(super::BtcScriptConfig),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcScriptConfigWithKeypath {
#[prost(message, optional, tag="2")]
pub script_config: ::core::option::Option<BtcScriptConfig>,
#[prost(uint32, repeated, tag="3")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSignInitRequest {
#[prost(enumeration="BtcCoin", tag="1")]
pub coin: i32,
/// used script configs in inputs and changes
#[prost(message, repeated, tag="2")]
pub script_configs: ::prost::alloc::vec::Vec<BtcScriptConfigWithKeypath>,
/// must be 1 or 2
#[prost(uint32, tag="4")]
pub version: u32,
#[prost(uint32, tag="5")]
pub num_inputs: u32,
#[prost(uint32, tag="6")]
pub num_outputs: u32,
/// must be <500000000
#[prost(uint32, tag="7")]
pub locktime: u32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSignNextResponse {
#[prost(enumeration="btc_sign_next_response::Type", tag="1")]
pub r#type: i32,
/// index of the current input or output
#[prost(uint32, tag="2")]
pub index: u32,
/// only as a response to BTCSignInputRequest
#[prost(bool, tag="3")]
pub has_signature: bool,
/// 64 bytes (32 bytes big endian R, 32 bytes big endian S). Only if has_signature is true.
#[prost(bytes="vec", tag="4")]
pub signature: ::prost::alloc::vec::Vec<u8>,
/// Previous tx's input/output index in case of PREV_INPUT or PREV_OUTPUT, for the input at `index`.
#[prost(uint32, tag="5")]
pub prev_index: u32,
#[prost(message, optional, tag="6")]
pub anti_klepto_signer_commitment: ::core::option::Option<AntiKleptoSignerCommitment>,
}
/// Nested message and enum types in `BTCSignNextResponse`.
pub mod btc_sign_next_response {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Type {
Input = 0,
Output = 1,
Done = 2,
/// For the previous transaction at input `index`.
PrevtxInit = 3,
PrevtxInput = 4,
PrevtxOutput = 5,
HostNonce = 6,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSignInputRequest {
#[prost(bytes="vec", tag="1")]
pub prev_out_hash: ::prost::alloc::vec::Vec<u8>,
#[prost(uint32, tag="2")]
pub prev_out_index: u32,
#[prost(uint64, tag="3")]
pub prev_out_value: u64,
/// must be 0xffffffff-2, 0xffffffff-1 or 0xffffffff
#[prost(uint32, tag="4")]
pub sequence: u32,
/// all inputs must be ours.
#[prost(uint32, repeated, tag="6")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
/// References a script config from BTCSignInitRequest
#[prost(uint32, tag="7")]
pub script_config_index: u32,
#[prost(message, optional, tag="8")]
pub host_nonce_commitment: ::core::option::Option<AntiKleptoHostNonceCommitment>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSignOutputRequest {
#[prost(bool, tag="1")]
pub ours: bool,
/// if ours is false
#[prost(enumeration="BtcOutputType", tag="2")]
pub r#type: i32,
/// 20 bytes for p2pkh, p2sh, pw2wpkh. 32 bytes for p2wsh.
#[prost(uint64, tag="3")]
pub value: u64,
/// if ours is false
#[prost(bytes="vec", tag="4")]
pub hash: ::prost::alloc::vec::Vec<u8>,
/// if ours is true
#[prost(uint32, repeated, tag="5")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
/// If ours is true. References a script config from BTCSignInitRequest
#[prost(uint32, tag="6")]
pub script_config_index: u32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcScriptConfigRegistration {
#[prost(enumeration="BtcCoin", tag="1")]
pub coin: i32,
#[prost(message, optional, tag="2")]
pub script_config: ::core::option::Option<BtcScriptConfig>,
#[prost(uint32, repeated, tag="3")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSuccess {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcIsScriptConfigRegisteredRequest {
#[prost(message, optional, tag="1")]
pub registration: ::core::option::Option<BtcScriptConfigRegistration>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcIsScriptConfigRegisteredResponse {
#[prost(bool, tag="1")]
pub is_registered: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcRegisterScriptConfigRequest {
#[prost(message, optional, tag="1")]
pub registration: ::core::option::Option<BtcScriptConfigRegistration>,
/// If empty, the name is entered on the device instead.
#[prost(string, tag="2")]
pub name: ::prost::alloc::string::String,
#[prost(enumeration="btc_register_script_config_request::XPubType", tag="3")]
pub xpub_type: i32,
}
/// Nested message and enum types in `BTCRegisterScriptConfigRequest`.
pub mod btc_register_script_config_request {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum XPubType {
/// Automatically choose to match Electrum's xpub format (e.g. Zpub/Vpub for p2wsh multisig mainnet/testnet).
AutoElectrum = 0,
/// Always xpub for mainnets, tpub for testnets.
AutoXpubTpub = 1,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcPrevTxInitRequest {
#[prost(uint32, tag="1")]
pub version: u32,
#[prost(uint32, tag="2")]
pub num_inputs: u32,
#[prost(uint32, tag="3")]
pub num_outputs: u32,
#[prost(uint32, tag="4")]
pub locktime: u32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcPrevTxInputRequest {
#[prost(bytes="vec", tag="1")]
pub prev_out_hash: ::prost::alloc::vec::Vec<u8>,
#[prost(uint32, tag="2")]
pub prev_out_index: u32,
#[prost(bytes="vec", tag="3")]
pub signature_script: ::prost::alloc::vec::Vec<u8>,
#[prost(uint32, tag="4")]
pub sequence: u32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcPrevTxOutputRequest {
#[prost(uint64, tag="1")]
pub value: u64,
#[prost(bytes="vec", tag="2")]
pub pubkey_script: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSignMessageRequest {
#[prost(enumeration="BtcCoin", tag="1")]
pub coin: i32,
#[prost(message, optional, tag="2")]
pub script_config: ::core::option::Option<BtcScriptConfigWithKeypath>,
#[prost(bytes="vec", tag="3")]
pub msg: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="4")]
pub host_nonce_commitment: ::core::option::Option<AntiKleptoHostNonceCommitment>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcSignMessageResponse {
/// 65 bytes (32 bytes big endian R, 32 bytes big endian S, 1 recid).
#[prost(bytes="vec", tag="1")]
pub signature: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcRequest {
#[prost(oneof="btc_request::Request", tags="1, 2, 3, 4, 5, 6, 7")]
pub request: ::core::option::Option<btc_request::Request>,
}
/// Nested message and enum types in `BTCRequest`.
pub mod btc_request {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
#[prost(message, tag="1")]
IsScriptConfigRegistered(super::BtcIsScriptConfigRegisteredRequest),
#[prost(message, tag="2")]
RegisterScriptConfig(super::BtcRegisterScriptConfigRequest),
#[prost(message, tag="3")]
PrevtxInit(super::BtcPrevTxInitRequest),
#[prost(message, tag="4")]
PrevtxInput(super::BtcPrevTxInputRequest),
#[prost(message, tag="5")]
PrevtxOutput(super::BtcPrevTxOutputRequest),
#[prost(message, tag="6")]
SignMessage(super::BtcSignMessageRequest),
#[prost(message, tag="7")]
AntikleptoSignature(super::AntiKleptoSignatureRequest),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BtcResponse {
#[prost(oneof="btc_response::Response", tags="1, 2, 3, 4, 5")]
pub response: ::core::option::Option<btc_response::Response>,
}
/// Nested message and enum types in `BTCResponse`.
pub mod btc_response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag="1")]
Success(super::BtcSuccess),
#[prost(message, tag="2")]
IsScriptConfigRegistered(super::BtcIsScriptConfigRegisteredResponse),
#[prost(message, tag="3")]
SignNext(super::BtcSignNextResponse),
#[prost(message, tag="4")]
SignMessage(super::BtcSignMessageResponse),
#[prost(message, tag="5")]
AntikleptoSignerCommitment(super::AntiKleptoSignerCommitment),
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum BtcCoin {
Btc = 0,
Tbtc = 1,
Ltc = 2,
Tltc = 3,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum BtcOutputType {
Unknown = 0,
P2pkh = 1,
P2sh = 2,
P2wpkh = 3,
P2wsh = 4,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoXpubsRequest {
#[prost(message, repeated, tag="1")]
pub keypaths: ::prost::alloc::vec::Vec<Keypath>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoXpubsResponse {
#[prost(bytes="vec", repeated, tag="1")]
pub xpubs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoScriptConfig {
/// Entries correspond to address types as described in:
/// https://github.com/cardano-foundation/CIPs/blob/6c249ef48f8f5b32efc0ec768fadf4321f3173f2/CIP-0019/CIP-0019.md
/// See also:
/// https://github.com/input-output-hk/cardano-ledger-specs/blob/d0aa86ded0b973b09b629e5aa62aa1e71364d088/eras/alonzo/test-suite/cddl-files/alonzo.cddl#L137
#[prost(oneof="cardano_script_config::Config", tags="1")]
pub config: ::core::option::Option<cardano_script_config::Config>,
}
/// Nested message and enum types in `CardanoScriptConfig`.
pub mod cardano_script_config {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PkhSkh {
#[prost(uint32, repeated, tag="1")]
pub keypath_payment: ::prost::alloc::vec::Vec<u32>,
#[prost(uint32, repeated, tag="2")]
pub keypath_stake: ::prost::alloc::vec::Vec<u32>,
}
/// Entries correspond to address types as described in:
/// https://github.com/cardano-foundation/CIPs/blob/6c249ef48f8f5b32efc0ec768fadf4321f3173f2/CIP-0019/CIP-0019.md
/// See also:
/// https://github.com/input-output-hk/cardano-ledger-specs/blob/d0aa86ded0b973b09b629e5aa62aa1e71364d088/eras/alonzo/test-suite/cddl-files/alonzo.cddl#L137
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Config {
/// Shelley PaymentKeyHash & StakeKeyHash
#[prost(message, tag="1")]
PkhSkh(PkhSkh),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoAddressRequest {
#[prost(enumeration="CardanoNetwork", tag="1")]
pub network: i32,
#[prost(bool, tag="2")]
pub display: bool,
#[prost(message, optional, tag="3")]
pub script_config: ::core::option::Option<CardanoScriptConfig>,
}
/// Max allowed transaction size is 16384 bytes according to
/// https://github.com/cardano-foundation/CIPs/blob/master/CIP-0009/CIP-0009.md. Unlike with BTC, we
/// can fit the whole request in RAM and don't need to stream.
///
/// See also: https://github.com/input-output-hk/cardano-ledger-specs/blob/d0aa86ded0b973b09b629e5aa62aa1e71364d088/eras/alonzo/test-suite/cddl-files/alonzo.cddl#L50
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoSignTransactionRequest {
#[prost(enumeration="CardanoNetwork", tag="1")]
pub network: i32,
#[prost(message, repeated, tag="2")]
pub inputs: ::prost::alloc::vec::Vec<cardano_sign_transaction_request::Input>,
#[prost(message, repeated, tag="3")]
pub outputs: ::prost::alloc::vec::Vec<cardano_sign_transaction_request::Output>,
#[prost(uint64, tag="4")]
pub fee: u64,
#[prost(uint64, tag="5")]
pub ttl: u64,
#[prost(message, repeated, tag="6")]
pub certificates: ::prost::alloc::vec::Vec<cardano_sign_transaction_request::Certificate>,
#[prost(message, repeated, tag="7")]
pub withdrawals: ::prost::alloc::vec::Vec<cardano_sign_transaction_request::Withdrawal>,
#[prost(uint64, tag="8")]
pub validity_interval_start: u64,
}
/// Nested message and enum types in `CardanoSignTransactionRequest`.
pub mod cardano_sign_transaction_request {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Input {
#[prost(uint32, repeated, tag="1")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
#[prost(bytes="vec", tag="2")]
pub prev_out_hash: ::prost::alloc::vec::Vec<u8>,
#[prost(uint32, tag="3")]
pub prev_out_index: u32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Output {
#[prost(string, tag="1")]
pub encoded_address: ::prost::alloc::string::String,
#[prost(uint64, tag="2")]
pub value: u64,
/// Optional. If provided, this is validated as a change output.
#[prost(message, optional, tag="3")]
pub script_config: ::core::option::Option<super::CardanoScriptConfig>,
}
/// See https://github.com/input-output-hk/cardano-ledger-specs/blob/d0aa86ded0b973b09b629e5aa62aa1e71364d088/eras/alonzo/test-suite/cddl-files/alonzo.cddl#L150
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Certificate {
#[prost(oneof="certificate::Cert", tags="1, 2, 3")]
pub cert: ::core::option::Option<certificate::Cert>,
}
/// Nested message and enum types in `Certificate`.
pub mod certificate {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StakeDelegation {
#[prost(uint32, repeated, tag="1")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
#[prost(bytes="vec", tag="2")]
pub pool_keyhash: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Cert {
#[prost(message, tag="1")]
StakeRegistration(super::super::Keypath),
#[prost(message, tag="2")]
StakeDeregistration(super::super::Keypath),
#[prost(message, tag="3")]
StakeDelegation(StakeDelegation),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Withdrawal {
#[prost(uint32, repeated, tag="1")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
#[prost(uint64, tag="2")]
pub value: u64,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoSignTransactionResponse {
#[prost(message, repeated, tag="1")]
pub shelley_witnesses: ::prost::alloc::vec::Vec<cardano_sign_transaction_response::ShelleyWitness>,
}
/// Nested message and enum types in `CardanoSignTransactionResponse`.
pub mod cardano_sign_transaction_response {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ShelleyWitness {
#[prost(bytes="vec", tag="1")]
pub public_key: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="2")]
pub signature: ::prost::alloc::vec::Vec<u8>,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoRequest {
#[prost(oneof="cardano_request::Request", tags="1, 2, 3")]
pub request: ::core::option::Option<cardano_request::Request>,
}
/// Nested message and enum types in `CardanoRequest`.
pub mod cardano_request {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
#[prost(message, tag="1")]
Xpubs(super::CardanoXpubsRequest),
#[prost(message, tag="2")]
Address(super::CardanoAddressRequest),
#[prost(message, tag="3")]
SignTransaction(super::CardanoSignTransactionRequest),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CardanoResponse {
#[prost(oneof="cardano_response::Response", tags="1, 2, 3")]
pub response: ::core::option::Option<cardano_response::Response>,
}
/// Nested message and enum types in `CardanoResponse`.
pub mod cardano_response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag="1")]
Xpubs(super::CardanoXpubsResponse),
#[prost(message, tag="2")]
Pub(super::PubResponse),
#[prost(message, tag="3")]
SignTransaction(super::CardanoSignTransactionResponse),
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum CardanoNetwork {
CardanoMainnet = 0,
CardanoTestnet = 1,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EthPubRequest {
#[prost(uint32, repeated, tag="1")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
#[prost(enumeration="EthCoin", tag="2")]
pub coin: i32,
#[prost(enumeration="eth_pub_request::OutputType", tag="3")]
pub output_type: i32,
#[prost(bool, tag="4")]
pub display: bool,
#[prost(bytes="vec", tag="5")]
pub contract_address: ::prost::alloc::vec::Vec<u8>,
}
/// Nested message and enum types in `ETHPubRequest`.
pub mod eth_pub_request {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum OutputType {
Address = 0,
Xpub = 1,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EthSignRequest {
#[prost(enumeration="EthCoin", tag="1")]
pub coin: i32,
#[prost(uint32, repeated, tag="2")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
/// smallest big endian serialization, max. 16 bytes
#[prost(bytes="vec", tag="3")]
pub nonce: ::prost::alloc::vec::Vec<u8>,
/// smallest big endian serialization, max. 16 bytes
#[prost(bytes="vec", tag="4")]
pub gas_price: ::prost::alloc::vec::Vec<u8>,
/// smallest big endian serialization, max. 16 bytes
#[prost(bytes="vec", tag="5")]
pub gas_limit: ::prost::alloc::vec::Vec<u8>,
/// 20 byte recipient
#[prost(bytes="vec", tag="6")]
pub recipient: ::prost::alloc::vec::Vec<u8>,
/// smallest big endian serialization, max. 32 bytes
#[prost(bytes="vec", tag="7")]
pub value: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="8")]
pub data: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="9")]
pub host_nonce_commitment: ::core::option::Option<AntiKleptoHostNonceCommitment>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EthSignMessageRequest {
#[prost(enumeration="EthCoin", tag="1")]
pub coin: i32,
#[prost(uint32, repeated, tag="2")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
#[prost(bytes="vec", tag="3")]
pub msg: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="4")]
pub host_nonce_commitment: ::core::option::Option<AntiKleptoHostNonceCommitment>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EthSignResponse {
/// 65 bytes, last byte is the recid
#[prost(bytes="vec", tag="1")]
pub signature: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EthRequest {
#[prost(oneof="eth_request::Request", tags="1, 2, 3, 4")]
pub request: ::core::option::Option<eth_request::Request>,
}
/// Nested message and enum types in `ETHRequest`.
pub mod eth_request {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
#[prost(message, tag="1")]
Pub(super::EthPubRequest),
#[prost(message, tag="2")]
Sign(super::EthSignRequest),
#[prost(message, tag="3")]
SignMsg(super::EthSignMessageRequest),
#[prost(message, tag="4")]
AntikleptoSignature(super::AntiKleptoSignatureRequest),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EthResponse {
#[prost(oneof="eth_response::Response", tags="1, 2, 3")]
pub response: ::core::option::Option<eth_response::Response>,
}
/// Nested message and enum types in `ETHResponse`.
pub mod eth_response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag="1")]
Pub(super::PubResponse),
#[prost(message, tag="2")]
Sign(super::EthSignResponse),
#[prost(message, tag="3")]
AntikleptoSignerCommitment(super::AntiKleptoSignerCommitment),
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum EthCoin {
Eth = 0,
RopstenEth = 1,
RinkebyEth = 2,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ElectrumEncryptionKeyRequest {
#[prost(uint32, repeated, tag="1")]
pub keypath: ::prost::alloc::vec::Vec<u32>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ElectrumEncryptionKeyResponse {
#[prost(string, tag="1")]
pub key: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ShowMnemonicRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RestoreFromMnemonicRequest {
#[prost(uint32, tag="1")]
pub timestamp: u32,
#[prost(int32, tag="2")]
pub timezone_offset: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SetMnemonicPassphraseEnabledRequest {
#[prost(bool, tag="1")]
pub enabled: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RebootRequest {
#[prost(enumeration="reboot_request::Purpose", tag="1")]
pub purpose: i32,
}
/// Nested message and enum types in `RebootRequest`.
pub mod reboot_request {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Purpose {
Upgrade = 0,
Settings = 1,
}
}
/// Deprecated, last used in v1.0.0
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PerformAttestationRequest {
/// 32 bytes challenge.
#[prost(bytes="vec", tag="1")]
pub challenge: ::prost::alloc::vec::Vec<u8>,
}
/// Deprecated, last used in v1.0.0
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PerformAttestationResponse {
#[prost(bytes="vec", tag="1")]
pub bootloader_hash: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="2")]
pub device_pubkey: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="3")]
pub certificate: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="4")]
pub root_pubkey_identifier: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="5")]
pub challenge_signature: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Error {
#[prost(int32, tag="1")]
pub code: i32,
#[prost(string, tag="2")]
pub message: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Success {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Request {
#[prost(oneof="request::Request", tags="2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27")]
pub request: ::core::option::Option<request::Request>,
}
/// Nested message and enum types in `Request`.
pub mod request {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
/// removed: RandomNumberRequest random_number = 1;
#[prost(message, tag="2")]
DeviceName(super::SetDeviceNameRequest),
#[prost(message, tag="3")]
DeviceLanguage(super::SetDeviceLanguageRequest),
#[prost(message, tag="4")]
DeviceInfo(super::DeviceInfoRequest),
#[prost(message, tag="5")]
SetPassword(super::SetPasswordRequest),
#[prost(message, tag="6")]
CreateBackup(super::CreateBackupRequest),
#[prost(message, tag="7")]
ShowMnemonic(super::ShowMnemonicRequest),
#[prost(message, tag="8")]
BtcPub(super::BtcPubRequest),
#[prost(message, tag="9")]
BtcSignInit(super::BtcSignInitRequest),
#[prost(message, tag="10")]
BtcSignInput(super::BtcSignInputRequest),
#[prost(message, tag="11")]
BtcSignOutput(super::BtcSignOutputRequest),
#[prost(message, tag="12")]
InsertRemoveSdcard(super::InsertRemoveSdCardRequest),
#[prost(message, tag="13")]
CheckSdcard(super::CheckSdCardRequest),
#[prost(message, tag="14")]
SetMnemonicPassphraseEnabled(super::SetMnemonicPassphraseEnabledRequest),
#[prost(message, tag="15")]
ListBackups(super::ListBackupsRequest),
#[prost(message, tag="16")]
RestoreBackup(super::RestoreBackupRequest),
#[prost(message, tag="17")]
PerformAttestation(super::PerformAttestationRequest),
#[prost(message, tag="18")]
Reboot(super::RebootRequest),
#[prost(message, tag="19")]
CheckBackup(super::CheckBackupRequest),
#[prost(message, tag="20")]
Eth(super::EthRequest),
#[prost(message, tag="21")]
Reset(super::ResetRequest),
#[prost(message, tag="22")]
RestoreFromMnemonic(super::RestoreFromMnemonicRequest),
/// removed: BitBoxBaseRequest bitboxbase = 23;
#[prost(message, tag="24")]
Fingerprint(super::RootFingerprintRequest),
#[prost(message, tag="25")]
Btc(super::BtcRequest),
#[prost(message, tag="26")]
ElectrumEncryptionKey(super::ElectrumEncryptionKeyRequest),
#[prost(message, tag="27")]
Cardano(super::CardanoRequest),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Response {
#[prost(oneof="response::Response", tags="1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15")]
pub response: ::core::option::Option<response::Response>,
}
/// Nested message and enum types in `Response`.
pub mod response {
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag="1")]
Success(super::Success),
#[prost(message, tag="2")]
Error(super::Error),
/// removed: RandomNumberResponse random_number = 3;
#[prost(message, tag="4")]
DeviceInfo(super::DeviceInfoResponse),
#[prost(message, tag="5")]
Pub(super::PubResponse),
#[prost(message, tag="6")]
BtcSignNext(super::BtcSignNextResponse),
#[prost(message, tag="7")]
ListBackups(super::ListBackupsResponse),
#[prost(message, tag="8")]
CheckBackup(super::CheckBackupResponse),
#[prost(message, tag="9")]
PerformAttestation(super::PerformAttestationResponse),
#[prost(message, tag="10")]
CheckSdcard(super::CheckSdCardResponse),
#[prost(message, tag="11")]
Eth(super::EthResponse),
#[prost(message, tag="12")]
Fingerprint(super::RootFingerprintResponse),
#[prost(message, tag="13")]
Btc(super::BtcResponse),
#[prost(message, tag="14")]
ElectrumEncryptionKey(super::ElectrumEncryptionKeyResponse),
#[prost(message, tag="15")]
Cardano(super::CardanoResponse),
}
}
| 37.104603 | 165 | 0.638475 |
2345a86595e2da0223b60d29f68147c5964c3bdc | 778 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a by-ref `FnMut` closure gets an error when it tries to
// mutate a value.
fn call<F>(f: F) where F : Fn() {
f();
}
fn main() {
let mut counter = 0_u32;
call(|| {
counter += 1;
//~^ ERROR cannot assign to data in a captured outer variable in an `Fn` closure
});
}
| 31.12 | 88 | 0.673522 |
76385c94b0240f912822ea926ec6633315136576 | 1,029 | #[doc = r"Register block"]
#[repr(C)]
pub struct CONFIG {
#[doc = "0x00 - Description cluster: Destination address where content of the key value registers (KEYSLOT.KEYn.VALUE\\[0-3\\]) will be pushed by KMU. Note that this address MUST match that of a peripherals APB mapped write-only key registers, else the KMU can push this key value into an address range which the CPU can potentially read!"]
pub dest: crate::Reg<self::config::dest::DEST_SPEC>,
#[doc = "0x04 - Description cluster: Define permissions for the key slot. Bits 0-15 and 16-31 can only be written when equal to 0xFFFF."]
pub perm: crate::Reg<self::config::perm::PERM_SPEC>,
}
#[doc = r"Register block"]
#[doc = "Unspecified"]
pub mod config;
#[doc = r"Register block"]
#[repr(C)]
pub struct KEY {
#[doc = "0x00..0x10 - Description collection: Define bits \\[31+o*32:0+o*32\\]
of value assigned to KMU key slot."]
pub value: [crate::Reg<self::key::value::VALUE_SPEC>; 4],
}
#[doc = r"Register block"]
#[doc = "Unspecified"]
pub mod key;
| 46.772727 | 344 | 0.692906 |
870d684e90899e64d208251f1a88d4636c9fef2f | 784 | use std::process::Command;
pub struct VoteCommand {
command: Command,
}
impl VoteCommand {
pub fn new(command: Command) -> Self {
Self { command }
}
pub fn active_committees<S: Into<String>>(mut self, host: S) -> Self {
self.command
.arg("active")
.arg("committees")
.arg("get")
.arg("--host")
.arg(host.into());
self
}
pub fn active_vote_plans<S: Into<String>>(mut self, host: S) -> Self {
self.command
.arg("active")
.arg("plans")
.arg("get")
.arg("--host")
.arg(host.into());
self
}
pub fn build(self) -> Command {
println!("{:?}", self.command);
self.command
}
}
| 21.189189 | 74 | 0.473214 |
48b2dcbbb3712e882de811adb968e3609f2e9e6b | 5,685 | use lyon::lyon_algorithms::path::Path;
use ruffle_core::shape_utils::DrawCommand;
use ruffle_core::swf;
use std::borrow::Cow;
use std::mem::size_of;
use swf::{GradientSpread, Twips};
use wgpu::util::DeviceExt;
macro_rules! create_debug_label {
($($arg:tt)*) => (
if cfg!(feature = "render_debug_labels") {
Some(format!($($arg)*))
} else {
None
}
)
}
pub fn format_list<'a>(values: &[&'a str], connector: &'a str) -> Cow<'a, str> {
match values.len() {
0 => Cow::Borrowed(""),
1 => Cow::Borrowed(values[0]),
_ => Cow::Owned(
values[0..values.len() - 1].join(", ")
+ " "
+ connector
+ " "
+ values[values.len() - 1],
),
}
}
pub fn get_backend_names(backends: wgpu::BackendBit) -> Vec<&'static str> {
let mut names = Vec::new();
if backends.contains(wgpu::BackendBit::VULKAN) {
names.push("Vulkan");
}
if backends.contains(wgpu::BackendBit::DX12) {
names.push("DirectX 12");
}
if backends.contains(wgpu::BackendBit::DX11) {
names.push("DirectX 11");
}
if backends.contains(wgpu::BackendBit::METAL) {
names.push("Metal");
}
if backends.contains(wgpu::BackendBit::GL) {
names.push("Open GL");
}
if backends.contains(wgpu::BackendBit::BROWSER_WEBGPU) {
names.push("Web GPU");
}
names
}
pub fn create_buffer_with_data(
device: &wgpu::Device,
data: &[u8],
usage: wgpu::BufferUsage,
label: Option<String>,
) -> wgpu::Buffer {
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
usage,
label: label.as_deref(),
contents: data,
})
}
pub fn point(x: Twips, y: Twips) -> lyon::math::Point {
lyon::math::Point::new(x.to_pixels() as f32, y.to_pixels() as f32)
}
pub fn ruffle_path_to_lyon_path(commands: Vec<DrawCommand>, is_closed: bool) -> Path {
let mut builder = Path::builder();
let mut cmds = commands.into_iter().peekable();
while let Some(cmd) = cmds.next() {
match cmd {
DrawCommand::MoveTo { x, y } => {
// Lyon (incorrectly?) will make a 0-length line segment if you have consecutive MoveTos.
// Filter out consecutive MoveTos, only committing the last one.
let mut cursor_pos = (x, y);
while let Some(DrawCommand::MoveTo { x, y }) = cmds.peek() {
cursor_pos = (*x, *y);
cmds.next();
}
if cmds.peek().is_some() {
builder.move_to(point(cursor_pos.0, cursor_pos.1));
}
}
DrawCommand::LineTo { x, y } => {
builder.line_to(point(x, y));
}
DrawCommand::CurveTo { x1, y1, x2, y2 } => {
builder.quadratic_bezier_to(point(x1, y1), point(x2, y2));
}
}
}
if is_closed {
builder.close();
}
builder.build()
}
#[allow(clippy::many_single_char_names)]
pub fn swf_to_gl_matrix(m: swf::Matrix) -> [[f32; 4]; 4] {
let tx = m.tx.get() as f32;
let ty = m.ty.get() as f32;
let det = m.a * m.d - m.c * m.b;
let mut a = m.d / det;
let mut b = -m.c / det;
let mut c = -(tx * m.d - m.c * ty) / det;
let mut d = -m.b / det;
let mut e = m.a / det;
let mut f = (tx * m.b - m.a * ty) / det;
a *= 20.0 / 32768.0;
b *= 20.0 / 32768.0;
d *= 20.0 / 32768.0;
e *= 20.0 / 32768.0;
c /= 32768.0;
f /= 32768.0;
c += 0.5;
f += 0.5;
[
[a, d, 0.0, 0.0],
[b, e, 0., 0.0],
[c, f, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
}
#[allow(clippy::many_single_char_names)]
pub fn swf_bitmap_to_gl_matrix(
m: swf::Matrix,
bitmap_width: u32,
bitmap_height: u32,
) -> [[f32; 4]; 4] {
let bitmap_width = bitmap_width as f32;
let bitmap_height = bitmap_height as f32;
let tx = m.tx.get() as f32;
let ty = m.ty.get() as f32;
let det = m.a * m.d - m.c * m.b;
let mut a = m.d / det;
let mut b = -m.c / det;
let mut c = -(tx * m.d - m.c * ty) / det;
let mut d = -m.b / det;
let mut e = m.a / det;
let mut f = (tx * m.b - m.a * ty) / det;
a *= 20.0 / bitmap_width;
b *= 20.0 / bitmap_width;
d *= 20.0 / bitmap_height;
e *= 20.0 / bitmap_height;
c /= bitmap_width;
f /= bitmap_height;
[
[a, d, 0.0, 0.0],
[b, e, 0.0, 0.0],
[c, f, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
}
/// Map for SWF gradient spread mode to the uniform value used by the gradient shader.
pub fn gradient_spread_mode_index(spread: GradientSpread) -> i32 {
match spread {
GradientSpread::Pad => 0,
GradientSpread::Repeat => 1,
GradientSpread::Reflect => 2,
}
}
// Based off wgpu example 'capture'
#[derive(Debug)]
pub struct BufferDimensions {
pub width: usize,
pub height: usize,
pub unpadded_bytes_per_row: usize,
pub padded_bytes_per_row: usize,
}
impl BufferDimensions {
pub fn new(width: usize, height: usize) -> Self {
let bytes_per_pixel = size_of::<u32>();
let unpadded_bytes_per_row = width * bytes_per_pixel;
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align;
let padded_bytes_per_row = unpadded_bytes_per_row + padded_bytes_per_row_padding;
Self {
width,
height,
unpadded_bytes_per_row,
padded_bytes_per_row,
}
}
}
| 27.731707 | 105 | 0.536675 |
87c417bebb088fce1ab679b4a7a15b21aa4daeed | 41 | org.jfree.data.xy.VectorSeriesCollection
| 20.5 | 40 | 0.878049 |
9051c3633051db67ea1ec804045b4cd05f68fb6f | 2,955 | use cosmwasm_std::{Env, MessageInfo, Response, Uint128, SubMsg, CosmosMsg, WasmMsg, to_binary, Decimal};
use valkyrie::common::ContractResult;
use valkyrie::mock_querier::{custom_deps, CustomDeps};
use valkyrie::test_constants::{default_sender, VALKYRIE_TOKEN};
use crate::executions::spend_fee;
use cw20::Cw20ExecuteMsg;
use valkyrie::test_constants::campaign_manager::{campaign_manager_env, FEE_RECIPIENT, CAMPAIGN_MANAGER};
pub fn exec(
deps: &mut CustomDeps,
env: Env,
info: MessageInfo,
amount: Option<Uint128>,
) -> ContractResult<Response> {
spend_fee(deps.as_mut(), env, info, amount)
}
pub fn will_success(deps: &mut CustomDeps, amount: Option<Uint128>) -> (Env, MessageInfo, Response) {
let env = campaign_manager_env();
let info = default_sender();
let response = exec(deps, env.clone(), info.clone(), amount).unwrap();
(env, info, response)
}
#[test]
fn succeed() {
let mut deps = custom_deps();
super::instantiate::default(&mut deps);
deps.querier.plus_token_balances(&[
(VALKYRIE_TOKEN, &[
(CAMPAIGN_MANAGER, &Uint128::new(50))
])
]);
let (_, _, response) = will_success(&mut deps, None);
assert_eq!(response.messages, vec![
SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: VALKYRIE_TOKEN.to_string(),
funds: vec![],
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: FEE_RECIPIENT.to_string(),
amount: Uint128::new(25),
}).unwrap(),
})),
SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: VALKYRIE_TOKEN.to_string(),
funds: vec![],
msg: to_binary(&Cw20ExecuteMsg::Burn {
amount: Uint128::new(25),
}).unwrap(),
})),
]);
super::update_config::will_success(
&mut deps,
None,
None,
None,
None,
None,
None,
None,
Some(Decimal::percent(10)),
None,
None,
None,
None,
);
deps.querier.plus_token_balances(&[
(VALKYRIE_TOKEN, &[
(CAMPAIGN_MANAGER, &Uint128::new(100))
])
]);
let (_, _, response) = will_success(&mut deps, Some(Uint128::new(50)));
assert_eq!(response.messages, vec![
SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: VALKYRIE_TOKEN.to_string(),
funds: vec![],
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: FEE_RECIPIENT.to_string(),
amount: Uint128::new(45),
}).unwrap(),
})),
SubMsg::new(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: VALKYRIE_TOKEN.to_string(),
funds: vec![],
msg: to_binary(&Cw20ExecuteMsg::Burn {
amount: Uint128::new(5),
}).unwrap(),
})),
]);
}
| 29.257426 | 104 | 0.578003 |
6a204313277ed9132ba8f65a94af2a8eb01634dd | 3,437 | /// These tests are intended to provide coverage and
/// check panic-safety of relatively unused values.
use super::codec::{Codec, decode_u8, encode_u8, decode_u16, encode_u16};
use super::enums::*;
fn get8<T: Codec>(enum_value: &T) -> u8 {
let enc = enum_value.get_encoding();
assert_eq!(enc.len(), 1);
decode_u8(&enc).unwrap()
}
fn get16<T: Codec>(enum_value: &T) -> u16 {
let enc = enum_value.get_encoding();
assert_eq!(enc.len(), 2);
decode_u16(&enc).unwrap()
}
fn test_enum16<T: Codec>(first: T, last: T) {
let first_v = get16(&first);
let last_v = get16(&last);
for val in first_v..last_v + 1 {
let mut buf = Vec::new();
encode_u16(val, &mut buf);
assert_eq!(buf.len(), 2);
let t = T::read_bytes(&buf).unwrap();
assert_eq!(val, get16(&t));
}
}
fn test_enum8<T: Codec>(first: T, last: T) {
let first_v = get8(&first);
let last_v = get8(&last);
for val in first_v..last_v + 1 {
let mut buf = Vec::new();
encode_u8(val, &mut buf);
assert_eq!(buf.len(), 1);
let t = T::read_bytes(&buf).unwrap();
assert_eq!(val, get8(&t));
}
}
#[test]
fn test_enums() {
test_enum16::<ProtocolVersion>(ProtocolVersion::SSLv2, ProtocolVersion::TLSv1_3);
test_enum8::<HashAlgorithm>(HashAlgorithm::NONE, HashAlgorithm::SHA512);
test_enum8::<SignatureAlgorithm>(SignatureAlgorithm::Anonymous, SignatureAlgorithm::ECDSA);
test_enum8::<ClientCertificateType>(ClientCertificateType::RSASign,
ClientCertificateType::ECDSAFixedECDH);
test_enum8::<Compression>(Compression::Null, Compression::LSZ);
test_enum8::<ContentType>(ContentType::ChangeCipherSpec, ContentType::Heartbeat);
test_enum8::<HandshakeType>(HandshakeType::HelloRequest, HandshakeType::KeyUpdate);
test_enum8::<AlertLevel>(AlertLevel::Warning, AlertLevel::Fatal);
test_enum8::<AlertDescription>(AlertDescription::CloseNotify,
AlertDescription::NoApplicationProtocol);
test_enum8::<HeartbeatMessageType>(HeartbeatMessageType::Request,
HeartbeatMessageType::Response);
test_enum16::<ExtensionType>(ExtensionType::ServerName, ExtensionType::RenegotiationInfo);
test_enum8::<ServerNameType>(ServerNameType::HostName, ServerNameType::HostName);
test_enum16::<NamedCurve>(NamedCurve::sect163k1,
NamedCurve::arbitrary_explicit_char2_curves);
test_enum16::<NamedGroup>(NamedGroup::secp256r1, NamedGroup::FFDHE8192);
test_enum16::<CipherSuite>(CipherSuite::TLS_NULL_WITH_NULL_NULL,
CipherSuite::SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA);
test_enum8::<ECPointFormat>(ECPointFormat::Uncompressed,
ECPointFormat::ANSIX962CompressedChar2);
test_enum8::<HeartbeatMode>(HeartbeatMode::PeerAllowedToSend,
HeartbeatMode::PeerNotAllowedToSend);
test_enum8::<ECCurveType>(ECCurveType::ExplicitPrime, ECCurveType::NamedCurve);
test_enum16::<SignatureScheme>(SignatureScheme::RSA_PKCS1_SHA1, SignatureScheme::ED448);
test_enum8::<PSKKeyExchangeMode>(PSKKeyExchangeMode::PSK_KE, PSKKeyExchangeMode::PSK_DHE_KE);
test_enum8::<KeyUpdateRequest>(KeyUpdateRequest::UpdateNotRequested,
KeyUpdateRequest::UpdateRequested);
}
| 43.506329 | 97 | 0.664533 |
d7c8086f2683a749f14efd6bb4d7c86c7acd0505 | 5,061 | #![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! Independent watchdog
//!
//! Used by: stm32f730, stm32f7x2, stm32f7x3
use crate::{RORegister, RWRegister, WORegister};
#[cfg(not(feature = "nosync"))]
use core::marker::PhantomData;
/// Key register
pub mod KR {
/// Key value (write only, read 0000h)
pub mod KEY {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (16 bits: 0xffff << 0)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0101010101010101: Enable access to PR, RLR and WINR registers (0x5555)
pub const Enable: u32 = 0b0101010101010101;
/// 0b1010101010101010: Reset the watchdog value (0xAAAA)
pub const Reset: u32 = 0b1010101010101010;
/// 0b1100110011001100: Start the watchdog (0xCCCC)
pub const Start: u32 = 0b1100110011001100;
}
}
}
/// Prescaler register
pub mod PR {
/// Prescaler divider
pub mod PR {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: Divider /4
pub const DivideBy4: u32 = 0b000;
/// 0b001: Divider /8
pub const DivideBy8: u32 = 0b001;
/// 0b010: Divider /16
pub const DivideBy16: u32 = 0b010;
/// 0b011: Divider /32
pub const DivideBy32: u32 = 0b011;
/// 0b100: Divider /64
pub const DivideBy64: u32 = 0b100;
/// 0b101: Divider /128
pub const DivideBy128: u32 = 0b101;
/// 0b110: Divider /256
pub const DivideBy256: u32 = 0b110;
/// 0b111: Divider /256
pub const DivideBy256bis: u32 = 0b111;
}
}
}
/// Reload register
pub mod RLR {
/// Watchdog counter reload value
pub mod RL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (12 bits: 0xfff << 0)
pub const mask: u32 = 0xfff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Status register
pub mod SR {
/// Watchdog counter reload value update
pub mod RVU {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watchdog prescaler value update
pub mod PVU {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Watchdog counter window value update
pub mod WVU {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Window register
pub mod WINR {
/// Watchdog counter window value
pub mod WIN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (12 bits: 0xfff << 0)
pub const mask: u32 = 0xfff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
#[repr(C)]
pub struct RegisterBlock {
/// Key register
pub KR: WORegister<u32>,
/// Prescaler register
pub PR: RWRegister<u32>,
/// Reload register
pub RLR: RWRegister<u32>,
/// Status register
pub SR: RORegister<u32>,
/// Window register
pub WINR: RWRegister<u32>,
}
pub struct ResetValues {
pub KR: u32,
pub PR: u32,
pub RLR: u32,
pub SR: u32,
pub WINR: u32,
}
#[cfg(not(feature = "nosync"))]
pub struct Instance {
pub(crate) addr: u32,
pub(crate) _marker: PhantomData<*const RegisterBlock>,
}
#[cfg(not(feature = "nosync"))]
impl ::core::ops::Deref for Instance {
type Target = RegisterBlock;
#[inline(always)]
fn deref(&self) -> &RegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
#[cfg(feature = "rtic")]
unsafe impl Send for Instance {}
| 25.054455 | 88 | 0.527564 |
1ad40eca93b69e8c56c9648b3c44a95c9ac7aa9a | 738 | use super::*;
extern crate test;
use crate::boxed::Box;
use test::Bencher;
#[test]
fn allocate_zeroed() {
unsafe {
let layout = Layout::from_size_align(1024, 1).unwrap();
let memory = Global
.alloc(layout.clone(), AllocInit::Zeroed)
.unwrap_or_else(|_| handle_alloc_error(layout));
let mut i = memory.ptr.cast::<u8>().as_ptr();
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
}
Global.dealloc(memory.ptr, layout);
}
}
#[bench]
#[cfg_attr(miri, ignore)] // Miri does not support benchmarks
fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| {
let _: Box<_> = box 10;
})
}
| 23.0625 | 63 | 0.555556 |
d798d83d1012c16405e6338152c45e437b801d7a | 1,644 | use std::io;
use super::super::payload;
use crate::constants;
pub fn gen_buf_for_rpc(msg: super::MsgType, payload: Vec<u8>) -> Vec<u8> {
let mut buf = constants::PROTOCOL_IDENTIFIER_V1.to_vec();
buf.extend(msg.id().to_be_bytes().iter());
buf.extend(
((payload.len()) as super::MsgPayloadLen)
.to_be_bytes()
.iter(),
);
buf.extend(payload);
buf
}
pub fn new_message_buffer<'de, P, D>(msg: super::MsgType, pld: P) -> Result<Vec<u8>, io::Error>
where
P: payload::Payload<'de, D>,
D: serde::Serialize + serde::Deserialize<'de>,
{
let pld_buf = pld.as_vec()?;
let mut buf = constants::PROTOCOL_IDENTIFIER_V1.to_vec();
buf.extend(msg.id().to_be_bytes().iter());
buf.extend(
((pld_buf.len()) as super::MsgPayloadLen)
.to_be_bytes()
.iter(),
);
buf.extend(pld_buf);
Ok(buf)
}
pub fn message_buffer_from_payload<'de, P, D>(
msg: super::MsgType,
pld: P,
) -> Result<Vec<u8>, io::Error>
where
P: payload::Payload<'de, D>,
D: serde::Serialize + serde::Deserialize<'de>,
{
let pld_buf = pld.as_vec()?;
let mut buf = constants::PROTOCOL_IDENTIFIER_V1.to_vec();
buf.extend(msg.id().to_be_bytes().iter());
buf.extend(
((pld_buf.len()) as super::MsgPayloadLen)
.to_be_bytes()
.iter(),
);
buf.extend(pld_buf);
Ok(buf)
}
pub fn message_buffer_from_payload_buf(msg: super::MsgType, pld_buf: Vec<u8>) -> Vec<u8> {
let mut buf = constants::PROTOCOL_IDENTIFIER_V1.to_vec();
buf.extend(msg.id().to_be_bytes().iter());
buf.extend(
((pld_buf.len()) as super::MsgPayloadLen)
.to_be_bytes()
.iter(),
);
buf.extend(pld_buf);
buf
}
| 21.92 | 95 | 0.63747 |
643bf580c87f3742c7e518aaa196d32a4730f368 | 65,981 | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::cmp;
use std::collections::HashMap;
use std::convert::{TryFrom, TryInto};
use std::future::Future;
use std::iter;
use std::mem;
use byteorder::{ByteOrder, NetworkEndian};
use expr::GlobalId;
use futures::future::{BoxFuture, FutureExt};
use itertools::izip;
use openssl::nid::Nid;
use postgres::error::SqlState;
use tokio::io::{self, AsyncRead, AsyncWrite, Interest};
use tokio::sync::mpsc::unbounded_channel;
use tokio::time::{self, Duration, Instant};
use tracing::debug;
use coord::session::{
EndTransactionAction, InProgressRows, Portal, PortalState, RowBatchStream, Session,
TransactionStatus,
};
use coord::ExecuteResponse;
use dataflow_types::PeekResponse;
use ore::cast::CastFrom;
use ore::netio::AsyncReady;
use ore::str::StrExt;
use repr::{Datum, RelationDesc, RelationType, Row, RowArena};
use sql::ast::display::AstDisplay;
use sql::ast::{FetchDirection, Ident, Raw, Statement};
use sql::plan::{CopyFormat, CopyParams, ExecuteTimeout, StatementDesc};
use crate::codec::FramedConn;
use crate::message::{
self, BackendMessage, ErrorResponse, FrontendMessage, Severity, VERSIONS, VERSION_3,
};
use crate::metrics::Metrics;
use crate::server::{Conn, TlsMode};
use pgcopy::CopyFormatParams;
/// Reports whether the given stream begins with a pgwire handshake.
///
/// To avoid false negatives, there must be at least eight bytes in `buf`.
pub fn match_handshake(buf: &[u8]) -> bool {
// The pgwire StartupMessage looks like this:
//
// i32 - Length of entire message.
// i32 - Protocol version number.
// [String] - Arbitrary key-value parameters of any length.
//
// Since arbitrary parameters can be included in the StartupMessage, the
// first Int32 is worthless, since the message could have any length.
// Instead, we sniff the protocol version number.
if buf.len() < 8 {
return false;
}
let version = NetworkEndian::read_i32(&buf[4..8]);
VERSIONS.contains(&version)
}
/// Parameters for the [`run`] function.
pub struct RunParams<'a, A> {
/// The TLS mode of the pgwire server.
pub tls_mode: Option<TlsMode>,
/// A client for the coordinator.
pub coord_client: coord::ConnClient,
/// The connection to the client.
pub conn: &'a mut FramedConn<A>,
/// The protocol version that the client provided in the startup message.
pub version: i32,
/// The parameters that the client provided in the startup message.
pub params: HashMap<String, String>,
/// The server's metrics.
pub metrics: &'a Metrics,
}
/// Runs a pgwire connection to completion.
///
/// This involves responding to `FrontendMessage::StartupMessage` and all future
/// requests until the client terminates the connection or a fatal error occurs.
///
/// Note that this function returns successfully even upon delivering a fatal
/// error to the client. It only returns `Err` if an unexpected I/O error occurs
/// while communicating with the client, e.g., if the connection is severed in
/// the middle of a request.
pub async fn run<'a, A>(
RunParams {
tls_mode,
coord_client,
conn,
version,
mut params,
metrics,
}: RunParams<'a, A>,
) -> Result<(), io::Error>
where
A: AsyncRead + AsyncWrite + AsyncReady + Send + Sync + Unpin,
{
if version != VERSION_3 {
return conn
.send(ErrorResponse::fatal(
SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION,
"server does not support the client's requested protocol version",
))
.await;
}
let user = params.remove("user").unwrap_or_else(String::new);
// Validate that the connection is compatible with the TLS mode.
//
// The match here explicitly spells out all cases to be resilient to
// future changes to TlsMode.
match (tls_mode, conn.inner()) {
(None, Conn::Unencrypted(_)) => (),
(None, Conn::Ssl(_)) => unreachable!(),
(Some(TlsMode::Require), Conn::Ssl(_)) => (),
(Some(TlsMode::Require), Conn::Unencrypted(_))
| (Some(TlsMode::VerifyUser), Conn::Unencrypted(_)) => {
return conn
.send(ErrorResponse::fatal(
SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION,
"TLS encryption is required",
))
.await;
}
(Some(TlsMode::VerifyUser), Conn::Ssl(inner_conn)) => {
let cn_matches = match inner_conn.ssl().peer_certificate() {
None => false,
Some(cert) => cert
.subject_name()
.entries_by_nid(Nid::COMMONNAME)
.any(|n| n.data().as_slice() == user.as_bytes()),
};
if !cn_matches {
let msg = format!(
"certificate authentication failed for user {}",
user.quoted()
);
return conn
.send(ErrorResponse::fatal(
SqlState::INVALID_AUTHORIZATION_SPECIFICATION,
msg,
))
.await;
}
}
}
// Construct session.
let mut session = Session::new(conn.id(), user);
for (name, value) in params {
let local = false;
let _ = session.vars_mut().set(&name, &value, local);
}
// Register session with coordinator.
let (mut coord_client, startup) = match coord_client.startup(session).await {
Ok(startup) => startup,
Err(e) => {
return conn
.send(ErrorResponse::from_coord(Severity::Fatal, e))
.await
}
};
// From this point forward we must not fail without calling `coord_client.terminate`!
let res = async {
let session = coord_client.session();
let mut buf = vec![BackendMessage::AuthenticationOk];
for var in session.vars().notify_set() {
buf.push(BackendMessage::ParameterStatus(var.name(), var.value()));
}
buf.push(BackendMessage::BackendKeyData {
conn_id: session.conn_id(),
secret_key: startup.secret_key,
});
for startup_message in startup.messages {
buf.push(ErrorResponse::from_startup_message(startup_message).into());
}
buf.push(BackendMessage::ReadyForQuery(session.transaction().into()));
conn.send_all(buf).await?;
conn.flush().await?;
let machine = StateMachine {
metrics,
conn,
coord_client: &mut coord_client,
};
machine.run().await
}
.await;
coord_client.terminate().await;
res
}
#[derive(Debug)]
enum State {
Ready,
Drain,
Done,
}
struct StateMachine<'a, A> {
conn: &'a mut FramedConn<A>,
coord_client: &'a mut coord::SessionClient,
metrics: &'a Metrics,
}
impl<'a, A> StateMachine<'a, A>
where
A: AsyncRead + AsyncWrite + AsyncReady + Send + Sync + Unpin + 'a,
{
// Manually desugar this (don't use `async fn run`) here because a much better
// error message is produced if there are problems with Send or other traits
// somewhere within the Future.
#[allow(clippy::manual_async_fn)]
fn run(mut self) -> impl Future<Output = Result<(), io::Error>> + Send + 'a {
async move {
let mut state = State::Ready;
loop {
state = match state {
State::Ready => self.advance_ready().await?,
State::Drain => self.advance_drain().await?,
State::Done => return Ok(()),
}
}
}
}
async fn advance_ready(&mut self) -> Result<State, io::Error> {
let message = self.conn.recv().await?;
let timer = Instant::now();
let name = match &message {
Some(message) => message.name(),
None => "eof",
};
self.coord_client.reset_canceled();
let next_state = match message {
Some(FrontendMessage::Query { sql }) => self.query(sql).await?,
Some(FrontendMessage::Parse {
name,
sql,
param_types,
}) => self.parse(name, sql, param_types).await?,
Some(FrontendMessage::Bind {
portal_name,
statement_name,
param_formats,
raw_params,
result_formats,
}) => {
self.bind(
portal_name,
statement_name,
param_formats,
raw_params,
result_formats,
)
.await?
}
Some(FrontendMessage::Execute {
portal_name,
max_rows,
}) => {
self.metrics.query_count.inc();
let max_rows = match usize::try_from(max_rows) {
Ok(0) | Err(_) => ExecuteCount::All, // If `max_rows < 0`, no limit.
Ok(n) => ExecuteCount::Count(n),
};
self.execute(
portal_name,
max_rows,
portal_exec_message,
None,
ExecuteTimeout::None,
)
.await?
}
Some(FrontendMessage::DescribeStatement { name }) => {
self.describe_statement(&name).await?
}
Some(FrontendMessage::DescribePortal { name }) => self.describe_portal(&name).await?,
Some(FrontendMessage::CloseStatement { name }) => self.close_statement(name).await?,
Some(FrontendMessage::ClosePortal { name }) => self.close_portal(name).await?,
Some(FrontendMessage::Flush) => self.flush().await?,
Some(FrontendMessage::Sync) => self.sync().await?,
Some(FrontendMessage::Terminate) => State::Done,
Some(FrontendMessage::CopyData(_))
| Some(FrontendMessage::CopyDone)
| Some(FrontendMessage::CopyFail(_)) => State::Drain,
None => State::Done,
};
let status = match next_state {
State::Ready | State::Done => "success",
State::Drain => "error",
};
self.metrics
.command_durations
.with_label_values(&[name, status])
.observe(timer.elapsed().as_secs_f64());
Ok(next_state)
}
async fn advance_drain(&mut self) -> Result<State, io::Error> {
match self.conn.recv().await? {
Some(FrontendMessage::Sync) => self.sync().await,
None => Ok(State::Done),
_ => Ok(State::Drain),
}
}
async fn one_query(&mut self, stmt: Statement<Raw>) -> Result<State, io::Error> {
// Bind the portal. Note that this does not set the empty string prepared
// statement.
let param_types = vec![];
const EMPTY_PORTAL: &str = "";
if let Err(e) = self
.coord_client
.declare(EMPTY_PORTAL.to_string(), stmt, param_types)
.await
{
return self
.error(ErrorResponse::from_coord(Severity::Error, e))
.await;
}
let stmt_desc = self
.coord_client
.session()
.get_portal(EMPTY_PORTAL)
.map(|portal| portal.desc.clone())
.expect("unnamed portal should be present");
if !stmt_desc.param_types.is_empty() {
return self
.error(ErrorResponse::error(
SqlState::UNDEFINED_PARAMETER,
"there is no parameter $1",
))
.await;
}
// Maybe send row description.
if let Some(relation_desc) = &stmt_desc.relation_desc {
if !stmt_desc.is_copy {
let formats = vec![pgrepr::Format::Text; stmt_desc.arity()];
self.conn
.send(BackendMessage::RowDescription(
message::encode_row_description(relation_desc, &formats),
))
.await?;
}
}
self.metrics.query_count.inc();
let result = match self.coord_client.execute(EMPTY_PORTAL.to_string()).await {
Ok(response) => {
self.send_execute_response(
response,
stmt_desc.relation_desc,
EMPTY_PORTAL.to_string(),
ExecuteCount::All,
portal_exec_message,
None,
ExecuteTimeout::None,
)
.await
}
Err(e) => {
self.error(ErrorResponse::from_coord(Severity::Error, e))
.await
}
};
// Destroy the portal.
self.coord_client.session().remove_portal(EMPTY_PORTAL);
result
}
async fn start_transaction(&mut self, stmts: Option<usize>) {
// start_transaction can't error (but assert that just in case it changes in
// the future.
let res = self.coord_client.start_transaction(stmts).await;
assert!(res.is_ok());
}
// See "Multiple Statements in a Simple Query" which documents how implicit
// transactions are handled.
// From https://www.postgresql.org/docs/current/protocol-flow.html
async fn query(&mut self, sql: String) -> Result<State, io::Error> {
// Parse first before doing any transaction checking.
let stmts = match parse_sql(&sql) {
Ok(stmts) => stmts,
Err(err) => {
self.error(err).await?;
return self.ready().await;
}
};
let num_stmts = stmts.len();
// Compare with postgres' backend/tcop/postgres.c exec_simple_query.
for stmt in stmts {
// In an aborted transaction, reject all commands except COMMIT/ROLLBACK.
if self.is_aborted_txn() && !is_txn_exit_stmt(Some(&stmt)) {
self.aborted_txn_error().await?;
break;
}
// Start an implicit transaction if we aren't in any transaction and there's
// more than one statement. This mirrors the `use_implicit_block` variable in
// postgres.
//
// This needs to be done in the loop instead of once at the top because
// a COMMIT/ROLLBACK statement needs to start a new transaction on next
// statement.
self.start_transaction(Some(num_stmts)).await;
match self.one_query(stmt).await? {
State::Ready => (),
State::Drain => break,
State::Done => return Ok(State::Done),
}
}
// Implicit transactions are closed at the end of a Query message.
{
if self.coord_client.session().transaction().is_implicit() {
self.commit_transaction().await?;
}
}
if num_stmts == 0 {
self.conn.send(BackendMessage::EmptyQueryResponse).await?;
}
self.ready().await
}
async fn parse(
&mut self,
name: String,
sql: String,
param_oids: Vec<u32>,
) -> Result<State, io::Error> {
// Start a transaction if we aren't in one.
self.start_transaction(Some(1)).await;
let mut param_types = vec![];
for oid in param_oids {
match pgrepr::Type::from_oid(oid) {
Some(ty) => param_types.push(Some(ty)),
None if oid == 0 => param_types.push(None),
None => {
return self
.error(ErrorResponse::error(
SqlState::PROTOCOL_VIOLATION,
format!("unable to decode parameter whose type OID is {}", oid),
))
.await;
}
}
}
let stmts = match parse_sql(&sql) {
Ok(stmts) => stmts,
Err(err) => {
return self.error(err).await;
}
};
if stmts.len() > 1 {
return self
.error(ErrorResponse::error(
SqlState::INTERNAL_ERROR,
"cannot insert multiple commands into a prepared statement",
))
.await;
}
let maybe_stmt = stmts.into_iter().next();
if self.is_aborted_txn() && !is_txn_exit_stmt(maybe_stmt.as_ref()) {
return self.aborted_txn_error().await;
}
match self
.coord_client
.describe(name, maybe_stmt, param_types)
.await
{
Ok(()) => {
self.conn.send(BackendMessage::ParseComplete).await?;
Ok(State::Ready)
}
Err(e) => {
self.error(ErrorResponse::from_coord(Severity::Error, e))
.await
}
}
}
/// Commits and clears the current transaction.
async fn commit_transaction(&mut self) -> Result<(), io::Error> {
self.end_transaction(EndTransactionAction::Commit).await
}
/// Rollback and clears the current transaction.
async fn rollback_transaction(&mut self) -> Result<(), io::Error> {
self.end_transaction(EndTransactionAction::Rollback).await
}
/// End a transaction and report to the user if an error occurred.
async fn end_transaction(&mut self, action: EndTransactionAction) -> Result<(), io::Error> {
let resp = self.coord_client.end_transaction(action).await;
if let Err(err) = resp {
self.conn
.send(BackendMessage::ErrorResponse(ErrorResponse::from_coord(
Severity::Error,
err,
)))
.await?;
}
Ok(())
}
async fn bind(
&mut self,
portal_name: String,
statement_name: String,
param_formats: Vec<pgrepr::Format>,
raw_params: Vec<Option<Vec<u8>>>,
result_formats: Vec<pgrepr::Format>,
) -> Result<State, io::Error> {
// Start a transaction if we aren't in one.
self.start_transaction(Some(1)).await;
let aborted_txn = self.is_aborted_txn();
let stmt = match self
.coord_client
.get_prepared_statement(&statement_name)
.await
{
Ok(stmt) => stmt,
Err(err) => {
return self
.error(ErrorResponse::from_coord(Severity::Error, err))
.await
}
};
let param_types = &stmt.desc().param_types;
if param_types.len() != raw_params.len() {
let message = format!(
"bind message supplies {actual} parameters, \
but prepared statement \"{name}\" requires {expected}",
name = statement_name,
actual = raw_params.len(),
expected = param_types.len()
);
return self
.error(ErrorResponse::error(SqlState::PROTOCOL_VIOLATION, message))
.await;
}
let param_formats = match pad_formats(param_formats, raw_params.len()) {
Ok(param_formats) => param_formats,
Err(msg) => {
return self
.error(ErrorResponse::error(SqlState::PROTOCOL_VIOLATION, msg))
.await
}
};
if aborted_txn && !is_txn_exit_stmt(stmt.sql()) {
return self.aborted_txn_error().await;
}
let buf = RowArena::new();
let mut params: Vec<(Datum, repr::ScalarType)> = Vec::new();
for (raw_param, typ, format) in izip!(raw_params, param_types, param_formats) {
match raw_param {
None => params.push(pgrepr::null_datum(typ)),
Some(bytes) => match pgrepr::Value::decode(format, typ, &bytes) {
Ok(param) => params.push(param.into_datum(&buf, typ)),
Err(err) => {
let msg = format!("unable to decode parameter: {}", err);
return self
.error(ErrorResponse::error(SqlState::INVALID_PARAMETER_VALUE, msg))
.await;
}
},
}
}
let result_formats = match pad_formats(
result_formats,
stmt.desc()
.relation_desc
.clone()
.map(|desc| desc.typ().column_types.len())
.unwrap_or(0),
) {
Ok(result_formats) => result_formats,
Err(msg) => {
return self
.error(ErrorResponse::error(SqlState::PROTOCOL_VIOLATION, msg))
.await
}
};
if let Some(desc) = stmt.desc().relation_desc.clone() {
for (format, ty) in result_formats.iter().zip(desc.iter_types()) {
match (format, &ty.scalar_type) {
(pgrepr::Format::Binary, repr::ScalarType::List { .. }) => {
return self
.error(ErrorResponse::error(
SqlState::PROTOCOL_VIOLATION,
"binary encoding of list types is not implemented",
))
.await;
}
(pgrepr::Format::Binary, repr::ScalarType::Map { .. }) => {
return self
.error(ErrorResponse::error(
SqlState::PROTOCOL_VIOLATION,
"binary encoding of map types is not implemented",
))
.await;
}
_ => (),
}
}
}
let desc = stmt.desc().clone();
let stmt = stmt.sql().cloned();
if let Err(err) =
self.coord_client
.session()
.set_portal(portal_name, desc, stmt, params, result_formats)
{
return self
.error(ErrorResponse::from_coord(Severity::Error, err))
.await;
}
self.conn.send(BackendMessage::BindComplete).await?;
Ok(State::Ready)
}
fn execute(
&mut self,
portal_name: String,
max_rows: ExecuteCount,
get_response: GetResponse,
fetch_portal_name: Option<String>,
timeout: ExecuteTimeout,
) -> BoxFuture<'_, Result<State, io::Error>> {
async move {
let aborted_txn = self.is_aborted_txn();
// Check if the portal has been started and can be continued.
let portal = match self.coord_client.session().get_portal_mut(&portal_name) {
// let portal = match session.get_portal_mut(&portal_name) {
Some(portal) => portal,
None => {
return self
.error(ErrorResponse::error(
SqlState::INVALID_CURSOR_NAME,
format!("portal {} does not exist", portal_name.quoted()),
))
.await;
}
};
// In an aborted transaction, reject all commands except COMMIT/ROLLBACK.
let txn_exit_stmt = is_txn_exit_stmt(portal.stmt.as_ref());
if aborted_txn && !txn_exit_stmt {
return self.aborted_txn_error().await;
}
let row_desc = portal.desc.relation_desc.clone();
match &mut portal.state {
PortalState::NotStarted => {
// Start a transaction if we aren't in one. Postgres does this both here and
// in bind. We don't do it in bind because I'm not sure what purpose it would
// serve us (i.e., I'm not aware of a pgtest that would differ between us and
// Postgres).
self.start_transaction(Some(1)).await;
match self.coord_client.execute(portal_name.clone()).await {
Ok(response) => {
self.send_execute_response(
response,
row_desc,
portal_name,
max_rows,
get_response,
fetch_portal_name,
timeout,
)
.await
}
Err(e) => {
self.error(ErrorResponse::from_coord(Severity::Error, e))
.await
}
}
}
PortalState::InProgress(rows) => {
let rows = rows.take().expect("InProgress rows must be populated");
self.send_rows(
row_desc.expect("portal missing row desc on resumption"),
portal_name,
rows,
max_rows,
get_response,
fetch_portal_name,
timeout,
)
.await
}
// FETCH is an awkward command for our current architecture. In Postgres it
// will extract <count> rows from the target portal, cache them, and return
// them to the user as requested. Its command tag is always FETCH <num rows
// extracted>. In Materialize, since we have chosen to not fully support FETCH,
// we must remember the number of rows that were returned. Use this tag to
// remember that information and return it.
PortalState::Completed(Some(tag)) => {
self.conn
.send(BackendMessage::CommandComplete {
tag: tag.to_string(),
})
.await?;
Ok(State::Ready)
}
PortalState::Completed(None) => {
self.error(ErrorResponse::error(
SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE,
format!(
"portal {} cannot be run",
Ident::new(portal_name).to_ast_string_stable()
),
))
.await
}
}
}
.boxed()
}
async fn describe_statement(&mut self, name: &str) -> Result<State, io::Error> {
// Start a transaction if we aren't in one.
self.start_transaction(Some(1)).await;
let stmt = match self.coord_client.get_prepared_statement(&name).await {
Ok(stmt) => stmt,
Err(err) => {
return self
.error(ErrorResponse::from_coord(Severity::Error, err))
.await
}
};
self.conn
.send(BackendMessage::ParameterDescription(
stmt.desc().param_types.clone(),
))
.await?;
// Claim that all results will be output in text format, even
// though the true result formats are not yet known. A bit
// weird, but this is the behavior that PostgreSQL specifies.
let formats = vec![pgrepr::Format::Text; stmt.desc().arity()];
self.conn.send(describe_rows(stmt.desc(), &formats)).await?;
Ok(State::Ready)
}
async fn describe_portal(&mut self, name: &str) -> Result<State, io::Error> {
// Start a transaction if we aren't in one.
self.start_transaction(Some(1)).await;
let session = self.coord_client.session();
let row_desc = session
.get_portal(name)
.map(|portal| describe_rows(&portal.desc, &portal.result_formats));
match row_desc {
Some(row_desc) => {
self.conn.send(row_desc).await?;
Ok(State::Ready)
}
None => {
self.error(ErrorResponse::error(
SqlState::INVALID_CURSOR_NAME,
format!("portal {} does not exist", name.quoted()),
))
.await
}
}
}
async fn close_statement(&mut self, name: String) -> Result<State, io::Error> {
self.coord_client.session().remove_prepared_statement(&name);
self.conn.send(BackendMessage::CloseComplete).await?;
Ok(State::Ready)
}
async fn close_portal(&mut self, name: String) -> Result<State, io::Error> {
self.coord_client.session().remove_portal(&name);
self.conn.send(BackendMessage::CloseComplete).await?;
Ok(State::Ready)
}
fn complete_portal(&mut self, name: &str) {
let portal = self
.coord_client
.session()
.get_portal_mut(name)
.expect("portal should exist");
portal.state = PortalState::Completed(None);
}
async fn fetch(
&mut self,
name: String,
count: Option<FetchDirection>,
max_rows: ExecuteCount,
fetch_portal_name: Option<String>,
timeout: ExecuteTimeout,
) -> Result<State, io::Error> {
// Unlike Execute, no count specified in FETCH returns 1 row, and 0 means 0
// instead of All.
let count = count.unwrap_or(FetchDirection::ForwardCount(1));
// Figure out how many rows we should send back by looking at the various
// combinations of the execute and fetch.
//
// In Postgres, Fetch will cache <count> rows from the target portal and
// return those as requested (if, say, an Execute message was sent with a
// max_rows < the Fetch's count). We expect that case to be incredibly rare and
// so have chosen to not support it until users request it. This eases
// implementation difficulty since we don't have to be able to "send" rows to
// a buffer.
//
// TODO(mjibson): Test this somehow? Need to divide up the pgtest files in
// order to have some that are not Postgres compatible.
let count = match (max_rows, count) {
(ExecuteCount::Count(max_rows), FetchDirection::ForwardCount(count)) => {
let count = usize::cast_from(count);
if max_rows < count {
return self
.error(ErrorResponse::error(
SqlState::FEATURE_NOT_SUPPORTED,
"Execute with max_rows < a FETCH's count is not supported",
))
.await;
}
ExecuteCount::Count(count)
}
(ExecuteCount::Count(_), FetchDirection::ForwardAll) => {
return self
.error(ErrorResponse::error(
SqlState::FEATURE_NOT_SUPPORTED,
"Execute with max_rows of a FETCH ALL is not supported",
))
.await;
}
(ExecuteCount::All, FetchDirection::ForwardAll) => ExecuteCount::All,
(ExecuteCount::All, FetchDirection::ForwardCount(count)) => {
ExecuteCount::Count(usize::cast_from(count))
}
};
let cursor_name = name.to_string();
self.execute(
cursor_name,
count,
fetch_message,
fetch_portal_name,
timeout,
)
.await
}
async fn flush(&mut self) -> Result<State, io::Error> {
self.conn.flush().await?;
Ok(State::Ready)
}
async fn sync(&mut self) -> Result<State, io::Error> {
// Close the current transaction if we are in an implicit transaction.
if self.coord_client.session().transaction().is_implicit() {
self.commit_transaction().await?;
}
return self.ready().await;
}
async fn ready(&mut self) -> Result<State, io::Error> {
let txn_state = self.coord_client.session().transaction().into();
self.conn
.send(BackendMessage::ReadyForQuery(txn_state))
.await?;
self.flush().await
}
#[allow(clippy::too_many_arguments)]
async fn send_execute_response(
&mut self,
response: ExecuteResponse,
row_desc: Option<RelationDesc>,
portal_name: String,
max_rows: ExecuteCount,
get_response: GetResponse,
fetch_portal_name: Option<String>,
timeout: ExecuteTimeout,
) -> Result<State, io::Error> {
macro_rules! command_complete {
($($arg:tt)*) => {{
// N.B.: the output of format! must be stored into a
// variable, or rustc barfs out a completely inscrutable
// error: https://github.com/rust-lang/rust/issues/64960.
let tag = format!($($arg)*);
self.conn.send(BackendMessage::CommandComplete { tag }).await?;
Ok(State::Ready)
}};
}
macro_rules! created {
($existed:expr, $code:expr, $type:expr) => {{
if $existed {
let msg =
ErrorResponse::notice($code, concat!($type, " already exists, skipping"));
self.conn.send(msg).await?;
}
command_complete!("CREATE {}", $type.to_uppercase())
}};
}
match response {
ExecuteResponse::Canceled => {
return self
.error(ErrorResponse::error(
SqlState::QUERY_CANCELED,
"canceling statement due to user request",
))
.await;
}
ExecuteResponse::ClosedCursor => {
self.complete_portal(&portal_name);
command_complete!("CLOSE CURSOR")
}
ExecuteResponse::CreatedDatabase { existed } => {
created!(existed, SqlState::DUPLICATE_DATABASE, "database")
}
ExecuteResponse::CreatedSchema { existed } => {
created!(existed, SqlState::DUPLICATE_SCHEMA, "schema")
}
ExecuteResponse::CreatedRole => {
let existed = false;
created!(existed, SqlState::DUPLICATE_OBJECT, "role")
}
ExecuteResponse::CreatedTable { existed } => {
created!(existed, SqlState::DUPLICATE_TABLE, "table")
}
ExecuteResponse::CreatedIndex { existed } => {
created!(existed, SqlState::DUPLICATE_OBJECT, "index")
}
ExecuteResponse::CreatedSource { existed } => {
created!(existed, SqlState::DUPLICATE_OBJECT, "source")
}
ExecuteResponse::CreatedSources => command_complete!("CREATE SOURCES"),
ExecuteResponse::CreatedSink { existed } => {
created!(existed, SqlState::DUPLICATE_OBJECT, "sink")
}
ExecuteResponse::CreatedView { existed } => {
created!(existed, SqlState::DUPLICATE_OBJECT, "view")
}
ExecuteResponse::CreatedType => command_complete!("CREATE TYPE"),
ExecuteResponse::DeclaredCursor => {
self.complete_portal(&portal_name);
command_complete!("DECLARE CURSOR")
}
ExecuteResponse::Deleted(n) => command_complete!("DELETE {}", n),
ExecuteResponse::DiscardedTemp => command_complete!("DISCARD TEMP"),
ExecuteResponse::DiscardedAll => command_complete!("DISCARD ALL"),
ExecuteResponse::DroppedDatabase => command_complete!("DROP DATABASE"),
ExecuteResponse::DroppedSchema => command_complete!("DROP SCHEMA"),
ExecuteResponse::DroppedRole => command_complete!("DROP ROLE"),
ExecuteResponse::DroppedSource => command_complete!("DROP SOURCE"),
ExecuteResponse::DroppedIndex => command_complete!("DROP INDEX"),
ExecuteResponse::DroppedSink => command_complete!("DROP SINK"),
ExecuteResponse::DroppedTable => command_complete!("DROP TABLE"),
ExecuteResponse::DroppedView => command_complete!("DROP VIEW"),
ExecuteResponse::DroppedType => command_complete!("DROP TYPE"),
ExecuteResponse::EmptyQuery => {
self.conn.send(BackendMessage::EmptyQueryResponse).await?;
Ok(State::Ready)
}
ExecuteResponse::Fetch {
name,
count,
timeout,
} => {
self.fetch(
name,
count,
max_rows,
Some(portal_name.to_string()),
timeout,
)
.await
}
ExecuteResponse::Inserted(n) => {
// "On successful completion, an INSERT command returns a
// command tag of the form `INSERT <oid> <count>`."
// -- https://www.postgresql.org/docs/11/sql-insert.html
//
// OIDs are a PostgreSQL-specific historical quirk, but we
// can return a 0 OID to indicate that the table does not
// have OIDs.
command_complete!("INSERT 0 {}", n)
}
ExecuteResponse::SendingRows(rx) => {
let row_desc =
row_desc.expect("missing row description for ExecuteResponse::SendingRows");
match rx.await {
PeekResponse::Canceled => {
self.error(ErrorResponse::error(
SqlState::QUERY_CANCELED,
"canceling statement due to user request",
))
.await
}
PeekResponse::Error(text) => {
self.error(ErrorResponse::error(SqlState::INTERNAL_ERROR, text))
.await
}
PeekResponse::Rows(rows) => {
self.send_rows(
row_desc,
portal_name,
InProgressRows::single_batch(rows),
max_rows,
get_response,
fetch_portal_name,
timeout,
)
.await
}
}
}
ExecuteResponse::SetVariable { name } => {
// This code is somewhat awkwardly structured because we
// can't hold `var` across an await point.
let qn = name.to_string();
let msg = if let Some(var) = self
.coord_client
.session()
.vars_mut()
.notify_set()
.find(|v| v.name() == qn)
{
Some(BackendMessage::ParameterStatus(var.name(), var.value()))
} else {
None
};
if let Some(msg) = msg {
self.conn.send(msg).await?;
}
command_complete!("SET")
}
ExecuteResponse::StartedTransaction { duplicated } => {
if duplicated {
let msg = ErrorResponse::warning(
SqlState::ACTIVE_SQL_TRANSACTION,
"there is already a transaction in progress",
);
self.conn.send(msg).await?;
}
command_complete!("BEGIN")
}
ExecuteResponse::TransactionExited { tag, was_implicit } => {
// In Postgres, if a user sends a COMMIT or ROLLBACK in an implicit
// transaction, a notice is sent warning them. (The transaction is still closed
// and a new implicit transaction started, though.)
if was_implicit {
let msg = ErrorResponse::notice(
SqlState::NO_ACTIVE_SQL_TRANSACTION,
"there is no transaction in progress",
);
self.conn.send(msg).await?;
}
command_complete!("{}", tag)
}
ExecuteResponse::Tailing { rx } => {
if fetch_portal_name.is_none() {
let mut msg = ErrorResponse::notice(
SqlState::WARNING,
"streaming TAIL rows directly requires a client that does not buffer output",
);
if self.coord_client.session().vars().application_name() == "psql" {
msg.hint =
Some("Wrap your TAIL statement in `COPY (TAIL ...) TO STDOUT`.".into())
}
self.conn.send(msg).await?;
self.conn.flush().await?;
}
let row_desc =
row_desc.expect("missing row description for ExecuteResponse::Tailing");
self.send_rows(
row_desc,
portal_name,
InProgressRows::new(rx),
max_rows,
get_response,
fetch_portal_name,
timeout,
)
.await
}
ExecuteResponse::CopyTo { format, resp } => {
let row_desc =
row_desc.expect("missing row description for ExecuteResponse::CopyTo");
let rows: RowBatchStream = match *resp {
ExecuteResponse::Tailing { rx } => rx,
ExecuteResponse::SendingRows(rx) => match rx.await {
// TODO(mjibson): This logic is duplicated from SendingRows. Dedup?
PeekResponse::Canceled => {
return self
.error(ErrorResponse::error(
SqlState::QUERY_CANCELED,
"canceling statement due to user request",
))
.await;
}
PeekResponse::Error(text) => {
return self
.error(ErrorResponse::error(SqlState::INTERNAL_ERROR, text))
.await;
}
PeekResponse::Rows(rows) => {
let (tx, rx) = unbounded_channel();
tx.send(rows).expect("send must succeed");
rx
}
},
_ => {
return self
.error(ErrorResponse::error(
SqlState::INTERNAL_ERROR,
"unsupported COPY response type".to_string(),
))
.await;
}
};
self.copy_rows(format, row_desc, rows).await
}
ExecuteResponse::CopyFrom {
id,
columns,
params,
} => {
let row_desc =
row_desc.expect("missing row description for ExecuteResponse::CopyFrom");
self.copy_from(id, columns, params, row_desc).await
}
ExecuteResponse::Updated(n) => command_complete!("UPDATE {}", n),
ExecuteResponse::AlteredObject(o) => command_complete!("ALTER {}", o),
ExecuteResponse::AlteredIndexLogicalCompaction => command_complete!("ALTER INDEX"),
ExecuteResponse::Prepare => command_complete!("PREPARE"),
ExecuteResponse::Deallocate { all } => {
command_complete!("DEALLOCATE{}", if all { " ALL" } else { "" })
}
}
}
#[allow(clippy::too_many_arguments)]
async fn send_rows(
&mut self,
row_desc: RelationDesc,
portal_name: String,
mut rows: InProgressRows,
max_rows: ExecuteCount,
get_response: GetResponse,
fetch_portal_name: Option<String>,
timeout: ExecuteTimeout,
) -> Result<State, io::Error> {
// If this portal is being executed from a FETCH then we need to use the result
// format type of the outer portal.
let result_format_portal_name: &str = if let Some(ref name) = fetch_portal_name {
name
} else {
&portal_name
};
let result_formats = self
.coord_client
.session()
.get_portal(result_format_portal_name)
.expect("valid fetch portal name for send rows")
.result_formats
.clone();
let (mut wait_once, mut deadline) = match timeout {
ExecuteTimeout::None => (false, None),
ExecuteTimeout::Seconds(t) => {
(false, Some(Instant::now() + Duration::from_secs_f64(t)))
}
ExecuteTimeout::WaitOnce => (true, None),
};
self.conn.set_encode_state(
row_desc
.typ()
.column_types
.iter()
.map(|ty| pgrepr::Type::from(&ty.scalar_type))
.zip(result_formats)
.collect(),
);
let mut total_sent_rows = 0;
// want_rows is the maximum number of rows the client wants.
let mut want_rows = match max_rows {
ExecuteCount::All => usize::MAX,
ExecuteCount::Count(count) => count,
};
// Send rows while the client still wants them and there are still rows to send.
loop {
// Fetch next batch of rows, waiting for a possible requested timeout or
// cancellation.
let batch = if self.coord_client.canceled().now_or_never().is_some() {
FetchResult::Canceled
} else if rows.current.is_some() {
FetchResult::Rows(rows.current.take())
} else {
tokio::select! {
_ = time::sleep_until(deadline.unwrap_or_else(time::Instant::now)), if deadline.is_some() => FetchResult::Rows(None),
_ = self.coord_client.canceled() => FetchResult::Canceled,
batch = rows.remaining.recv() => FetchResult::Rows(batch),
}
};
match batch {
FetchResult::Rows(None) => break,
FetchResult::Rows(Some(mut batch_rows)) => {
// Verify the first row is of the expected type. This is often good enough to
// find problems. Notably it failed to find #6304 when "FETCH 2" was used in a
// test, instead we had to use "FETCH 1" twice.
if let [row, ..] = batch_rows.as_slice() {
let datums = row.unpack();
let col_types = &row_desc.typ().column_types;
if datums.len() != col_types.len() {
return self
.error(ErrorResponse::error(
SqlState::INTERNAL_ERROR,
format!(
"internal error: row descriptor has {} columns but row has {} columns",
col_types.len(),
datums.len(),
),
))
.await;
}
for (i, (d, t)) in datums.iter().zip(col_types).enumerate() {
if !d.is_instance_of(&t) {
return self
.error(ErrorResponse::error(
SqlState::INTERNAL_ERROR,
format!(
"internal error: column {} is not of expected type {:?}: {:?}",
i, t, d
),
))
.await;
}
}
}
// If wait_once is true: the first time this fn is called it blocks (same as
// deadline == None). The second time this fn is called it should behave the
// same a 0s timeout.
if wait_once && !batch_rows.is_empty() {
deadline = Some(Instant::now());
wait_once = false;
}
// let mut batch_rows = batch_rows;
// Drain panics if it's > len, so cap it.
let drain_rows = cmp::min(want_rows, batch_rows.len());
self.conn
.send_all(batch_rows.drain(..drain_rows).map(|row| {
BackendMessage::DataRow(pgrepr::values_from_row(row, row_desc.typ()))
}))
.await?;
total_sent_rows += drain_rows;
want_rows -= drain_rows;
// If we have sent the number of requested rows, put the remainder of the batch
// (if any) back and stop sending.
if want_rows == 0 {
if !batch_rows.is_empty() {
rows.current = Some(batch_rows);
}
break;
}
self.conn.flush().await?;
}
FetchResult::Canceled => {
return self
.error(ErrorResponse::error(
SqlState::QUERY_CANCELED,
"canceling statement due to user request",
))
.await;
}
}
}
self.metrics
.rows_returned
.inc_by(u64::cast_from(total_sent_rows));
let portal = self
.coord_client
.session()
.get_portal_mut(&portal_name)
.expect("valid portal name for send rows");
// Always return rows back, even if it's empty. This prevents an unclosed
// portal from re-executing after it has been emptied.
portal.state = PortalState::InProgress(Some(rows));
let fetch_portal = fetch_portal_name.map(|name| {
self.coord_client
.session()
.get_portal_mut(&name)
.expect("valid fetch portal")
});
let response_message = get_response(max_rows, total_sent_rows, fetch_portal);
self.conn.send(response_message).await?;
Ok(State::Ready)
}
async fn copy_rows(
&mut self,
format: CopyFormat,
row_desc: RelationDesc,
mut stream: RowBatchStream,
) -> Result<State, io::Error> {
let (encode_fn, encode_format): (
fn(Row, &RelationType, &mut Vec<u8>) -> Result<(), std::io::Error>,
pgrepr::Format,
) = match format {
CopyFormat::Text => (pgcopy::encode_copy_row_text, pgrepr::Format::Text),
CopyFormat::Binary => (pgcopy::encode_copy_row_binary, pgrepr::Format::Binary),
_ => {
return self
.error(ErrorResponse::error(
SqlState::FEATURE_NOT_SUPPORTED,
format!("COPY TO format {:?} not supported", format),
))
.await
}
};
let typ = row_desc.typ();
let column_formats = iter::repeat(encode_format)
.take(typ.column_types.len())
.collect();
self.conn
.send(BackendMessage::CopyOutResponse {
overall_format: encode_format,
column_formats,
})
.await?;
// In Postgres, binary copy has a header that is followed (in the same
// CopyData) by the first row. In order to replicate their behavior, use a
// common vec that we can extend one time now and then fill up with the encode
// functions.
let mut out = Vec::new();
if let CopyFormat::Binary = format {
// 11-byte signature.
out.extend(b"PGCOPY\n\xFF\r\n\0");
// 32-bit flags field.
out.extend(&[0, 0, 0, 0]);
// 32-bit header extension length field.
out.extend(&[0, 0, 0, 0]);
}
let mut count = 0;
loop {
tokio::select! {
_ = time::sleep_until(Instant::now() + Duration::from_secs(1)) => {
// It's been a while since we've had any data to send, and
// the client may have disconnected. Check whether the
// socket is no longer readable and error if so. Otherwise
// we might block forever waiting for rows, leaking memory
// and a socket.
//
// In theory we should check for writability rather than
// readability—after all, we're writing data to the socket,
// not reading from it—but read-closed events are much more
// reliable on TCP streams than write-closed events.
// See: https://github.com/tokio-rs/mio/pull/1110
let ready = self.conn.ready(Interest::READABLE).await?;
if ready.is_read_closed() {
return self
.error(ErrorResponse::fatal(
SqlState::CONNECTION_FAILURE,
"connection closed",
))
.await;
}
},
_ = self.coord_client.canceled() => {
return self
.error(ErrorResponse::error(
SqlState::QUERY_CANCELED,
"canceling statement due to user request",
))
.await;
},
batch = stream.recv() => match batch {
None => break,
Some(rows) => {
count += rows.len();
for row in rows {
encode_fn(row, typ, &mut out)?;
self.conn
.send(BackendMessage::CopyData(mem::take(&mut out)))
.await?;
}
}
},
}
self.conn.flush().await?;
}
// Send required trailers.
if let CopyFormat::Binary = format {
let trailer: i16 = -1;
out.extend(&trailer.to_be_bytes());
self.conn
.send(BackendMessage::CopyData(mem::take(&mut out)))
.await?;
}
let tag = format!("COPY {}", count);
self.conn.send(BackendMessage::CopyDone).await?;
self.conn
.send(BackendMessage::CommandComplete { tag })
.await?;
Ok(State::Ready)
}
/// Handles the copy-in mode of the postgres protocol from transferring
/// data to the server.
async fn copy_from(
&mut self,
id: GlobalId,
columns: Vec<usize>,
params: CopyParams,
row_desc: RelationDesc,
) -> Result<State, io::Error> {
if !matches!(params.format, CopyFormat::Text | CopyFormat::Csv) {
return self
.error(ErrorResponse::error(
SqlState::FEATURE_NOT_SUPPORTED,
format!("COPY FROM format {:?} not supported", params.format),
))
.await;
}
// Ensure params are valid here so as to error before waiting to receive
// any data from the client.
let params: CopyFormatParams = match params.try_into() {
Ok(params) => params,
Err(e) => {
return self.error(e.into()).await;
}
};
let typ = row_desc.typ();
let column_formats = vec![pgrepr::Format::Text; typ.column_types.len()];
self.conn
.send(BackendMessage::CopyInResponse {
overall_format: pgrepr::Format::Text,
column_formats,
})
.await?;
self.conn.flush().await?;
let mut data = Vec::new();
let mut next_state = State::Ready;
loop {
let message = self.conn.recv().await?;
match message {
Some(FrontendMessage::CopyData(buf)) => data.extend(buf),
Some(FrontendMessage::CopyDone) => break,
Some(FrontendMessage::CopyFail(err)) => {
return self
.error(ErrorResponse::error(
SqlState::QUERY_CANCELED,
format!("COPY from stdin failed: {}", err),
))
.await
}
Some(FrontendMessage::Flush) | Some(FrontendMessage::Sync) => {}
Some(_) => {
return self
.error(ErrorResponse::error(
SqlState::PROTOCOL_VIOLATION,
"unexpected message type during COPY from stdin",
))
.await
}
_ => {
next_state = State::Done;
break;
}
}
}
let column_types = typ
.column_types
.iter()
.map(|x| &x.scalar_type)
.map(pgrepr::Type::from)
.collect::<Vec<pgrepr::Type>>();
if let State::Ready = next_state {
let rows = match pgcopy::decode_copy_format(&data, &column_types, params) {
Ok(rows) => rows,
Err(e) => {
return self
.error(ErrorResponse::error(
SqlState::BAD_COPY_FILE_FORMAT,
format!("{}", e),
))
.await
}
};
let count = rows.len();
if let Err(e) = self.coord_client.insert_rows(id, columns, rows).await {
return self
.error(ErrorResponse::from_coord(Severity::Error, e))
.await;
}
let tag = format!("COPY {}", count);
self.conn
.send(BackendMessage::CommandComplete { tag })
.await?;
}
Ok(next_state)
}
async fn error(&mut self, err: ErrorResponse) -> Result<State, io::Error> {
assert!(err.severity.is_error());
debug!(
"cid={} error code={} message={}",
self.coord_client.session().conn_id(),
err.code.code(),
err.message
);
let is_fatal = err.severity.is_fatal();
self.conn.send(BackendMessage::ErrorResponse(err)).await?;
let txn = self.coord_client.session().transaction();
match txn {
// Error can be called from describe and parse and so might not be in an active
// transaction.
TransactionStatus::Default | TransactionStatus::Failed(_) => {}
// In Started (i.e., a single statement), cleanup ourselves.
TransactionStatus::Started(_) => {
self.rollback_transaction().await?;
}
// Implicit transactions also clear themselves.
TransactionStatus::InTransactionImplicit(_) => {
self.rollback_transaction().await?;
}
// Explicit transactions move to failed.
TransactionStatus::InTransaction(_) => {
self.coord_client.fail_transaction();
}
};
if is_fatal {
Ok(State::Done)
} else {
Ok(State::Drain)
}
}
async fn aborted_txn_error(&mut self) -> Result<State, io::Error> {
self.conn
.send(BackendMessage::ErrorResponse(ErrorResponse::error(
SqlState::IN_FAILED_SQL_TRANSACTION,
"current transaction is aborted, commands ignored until end of transaction block",
)))
.await?;
Ok(State::Drain)
}
fn is_aborted_txn(&mut self) -> bool {
matches!(
self.coord_client.session().transaction(),
TransactionStatus::Failed(_)
)
}
}
fn pad_formats(formats: Vec<pgrepr::Format>, n: usize) -> Result<Vec<pgrepr::Format>, String> {
match (formats.len(), n) {
(0, e) => Ok(vec![pgrepr::Format::Text; e]),
(1, e) => Ok(iter::repeat(formats[0]).take(e).collect()),
(a, e) if a == e => Ok(formats),
(a, e) => Err(format!(
"expected {} field format specifiers, but got {}",
e, a
)),
}
}
fn describe_rows(stmt_desc: &StatementDesc, formats: &[pgrepr::Format]) -> BackendMessage {
match &stmt_desc.relation_desc {
Some(desc) if !stmt_desc.is_copy => {
BackendMessage::RowDescription(message::encode_row_description(desc, formats))
}
_ => BackendMessage::NoData,
}
}
fn parse_sql(sql: &str) -> Result<Vec<Statement<Raw>>, ErrorResponse> {
sql::parse::parse(sql).map_err(|e| {
// Convert our 0-based byte position to pgwire's 1-based character
// position.
let pos = sql[..e.pos].chars().count() + 1;
ErrorResponse::error(SqlState::SYNTAX_ERROR, e.message).with_position(pos)
})
}
type GetResponse = fn(
max_rows: ExecuteCount,
total_sent_rows: usize,
fetch_portal: Option<&mut Portal>,
) -> BackendMessage;
// A GetResponse used by send_rows during execute messages on portals or for
// simple query messages.
fn portal_exec_message(
max_rows: ExecuteCount,
total_sent_rows: usize,
_fetch_portal: Option<&mut Portal>,
) -> BackendMessage {
// If max_rows is not specified, we will always send back a CommandComplete. If
// max_rows is specified, we only send CommandComplete if there were more rows
// requested than were remaining. That is, if max_rows == number of rows that
// were remaining before sending (not that are remaining after sending), then
// we still send a PortalSuspended. The number of remaining rows after the rows
// have been sent doesn't matter. This matches postgres.
match max_rows {
ExecuteCount::Count(max_rows) if max_rows <= total_sent_rows => {
BackendMessage::PortalSuspended
}
_ => BackendMessage::CommandComplete {
tag: format!("SELECT {}", total_sent_rows),
},
}
}
// A GetResponse used by send_rows during FETCH queries.
fn fetch_message(
_max_rows: ExecuteCount,
total_sent_rows: usize,
fetch_portal: Option<&mut Portal>,
) -> BackendMessage {
let tag = format!("FETCH {}", total_sent_rows);
if let Some(portal) = fetch_portal {
portal.state = PortalState::Completed(Some(tag.clone()));
}
BackendMessage::CommandComplete { tag }
}
#[derive(Debug, Copy, Clone)]
enum ExecuteCount {
All,
Count(usize),
}
// See postgres' backend/tcop/postgres.c IsTransactionExitStmt.
fn is_txn_exit_stmt(stmt: Option<&Statement<Raw>>) -> bool {
match stmt {
// Add PREPARE to this if we ever support it.
Some(stmt) => matches!(stmt, Statement::Commit(_) | Statement::Rollback(_)),
None => false,
}
}
#[derive(Debug)]
enum FetchResult {
Rows(Option<Vec<Row>>),
Canceled,
}
| 38.450466 | 137 | 0.498886 |
6ac5905c4b146342ee5ca7b0ae2a7c05956f2f30 | 8,453 | // Copyright 2017 Dropbox, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core;
use std::io;
use super::{SliceWrapperMut,SliceWrapper};
use super::alloc;
pub struct DynBuffer(Box<[u8]>);
impl core::default::Default for DynBuffer {
fn default() -> Self {
let v: Vec<u8> = Vec::new();
let b = v.into_boxed_slice();
DynBuffer(b)
}
}
impl DynBuffer {
#[allow(unused)]
pub fn new(size:usize) -> DynBuffer {
DynBuffer(vec![0u8;size].into_boxed_slice())
}
}
impl SliceWrapper<u8> for DynBuffer {
fn slice(&self) -> &[u8] {
&*self.0
}
}
impl SliceWrapperMut<u8> for DynBuffer {
fn slice_mut(&mut self) -> &mut [u8] {
&mut *self.0
}
}
#[cfg(feature="inplace-new")]
macro_rules! define_static_heap_buffer {
($name : ident, $size: expr) => {
pub struct $name(Box<[u8;$size]>);
impl core::default::Default for $name {
fn default() -> Self {
static DEFAULT_VALUE: [u8;$size] = [0u8;$size];
$name(Box::<[u8;$size]>::new(DEFAULT_VALUE))
}
}
impl SliceWrapper<u8> for $name {
fn slice(&self) -> &[u8] {
&*self.0
}
}
impl SliceWrapperMut<u8> for $name {
fn slice_mut(&mut self) -> &mut [u8] {
&mut *self.0
}
}
}
}
#[cfg(not(feature="inplace-new"))]
macro_rules! define_static_heap_buffer {
($name : ident, $size: expr) => {
pub struct $name(DynBuffer);
impl core::default::Default for $name {
fn default() -> Self {
$name(DynBuffer((vec![0u8;$size]).into_boxed_slice()))
}
}
impl SliceWrapper<u8> for $name {
fn slice(&self) -> &[u8] {
(&*(self.0).0).split_at($size).0
}
}
impl SliceWrapperMut<u8> for $name {
fn slice_mut(&mut self) -> &mut [u8] {
(&mut *(self.0).0).split_at_mut($size).0
}
}
}
}
define_static_heap_buffer!(StaticHeapBuffer10, 1<<10);
define_static_heap_buffer!(StaticHeapBuffer11, 1<<11);
define_static_heap_buffer!(StaticHeapBuffer12, 1<<12);
define_static_heap_buffer!(StaticHeapBuffer13, 1<<13);
define_static_heap_buffer!(StaticHeapBuffer14, 1<<14);
define_static_heap_buffer!(StaticHeapBuffer15, 1<<15);
define_static_heap_buffer!(StaticHeapBuffer16, 1<<16);
define_static_heap_buffer!(StaticHeapBuffer17, 1<<17);
define_static_heap_buffer!(StaticHeapBuffer18, 1<<18);
define_static_heap_buffer!(StaticHeapBuffer19, 1<<19);
define_static_heap_buffer!(StaticHeapBuffer20, 1<<20);
define_static_heap_buffer!(StaticHeapBuffer21, 1<<21);
define_static_heap_buffer!(StaticHeapBuffer22, 1<<22);
define_static_heap_buffer!(StaticHeapBuffer23, 1<<23);
define_static_heap_buffer!(StaticHeapBuffer24, 1<<24);
pub struct Rebox<T> {
b: Box<[T]>,
}
impl<T> core::default::Default for Rebox<T> {
fn default() -> Self {
let v: Vec<T> = Vec::new();
let b = v.into_boxed_slice();
Rebox::<T> { b: b }
}
}
impl<T> core::ops::Index<usize> for Rebox<T> {
type Output = T;
fn index(&self, index: usize) -> &T {
&(*self.b)[index]
}
}
impl<T> core::ops::IndexMut<usize> for Rebox<T> {
fn index_mut(&mut self, index: usize) -> &mut T {
&mut (*self.b)[index]
}
}
impl<T> alloc::SliceWrapper<T> for Rebox<T> {
fn slice(&self) -> &[T] {
&*self.b
}
}
impl<T> alloc::SliceWrapperMut<T> for Rebox<T> {
fn slice_mut(&mut self) -> &mut [T] {
&mut *self.b
}
}
pub struct HeapAllocator<T: core::clone::Clone> {
pub default_value: T,
}
impl<T: core::clone::Clone> alloc::Allocator<T> for HeapAllocator<T> {
type AllocatedMemory = Rebox<T>;
fn alloc_cell(self: &mut HeapAllocator<T>, len: usize) -> Rebox<T> {
let v: Vec<T> = vec![self.default_value.clone();len];
let b = v.into_boxed_slice();
Rebox::<T> { b: b }
}
fn free_cell(self: &mut HeapAllocator<T>, _data: Rebox<T>) {}
}
fn hex_to_nibble(byte: u8) -> Result<u8, ()> {
if byte >= b'A' && byte <= b'F' {
Ok(byte - b'A' + 10)
} else if byte >= b'a' && byte <= b'f' {
Ok(byte - b'a' + 10)
} else if byte >= b'0' && byte <= b'9' {
Ok(byte - b'0')
} else {
Err(())
}
}
fn quoted_slice_to_vec(s: &[u8]) -> Result<Vec<u8>, io::Error> {
if s.len() < 2 {
return Err(io::Error::new(io::ErrorKind::InvalidInput, core::str::from_utf8(s).unwrap()));
}
let mut output = Vec::<u8>::with_capacity(s.len() - 2);
let mut must_end = false;
let mut escaped = false;
let mut hexed = false;
let mut upper: Option<u8> = None;
for byte_ref in s.iter().skip(1) {
let byte = *byte_ref;
if must_end {
return Err(io::Error::new(io::ErrorKind::InvalidInput, core::str::from_utf8(s).unwrap()));
}
if byte == b'\"' && !escaped {
must_end = true;
continue;
}
if byte == b'\\' && !escaped {
escaped = true;
continue;
}
if escaped {
if hexed {
if let Ok(nib) = hex_to_nibble(byte) {
if let Some(unib) = upper {
output.push((unib << 4) | nib);
hexed = false;
escaped = false;
upper = None;
} else {
upper = Some(nib);
}
} else {
return Err(io::Error::new(io::ErrorKind::InvalidInput, core::str::from_utf8(s).unwrap()));
}
} else if byte == b'x' {
hexed = true;
} else if byte == b'n' {
output.push(b'\n');
escaped = false;
} else if byte == b'r' {
output.push(b'\r');
escaped = false;
} else if byte == b't' {
output.push(b'\t');
escaped = false;
} else if byte == b'\\' {
output.push(b'\\');
escaped = false;
} else if byte == b'\'' {
output.push(b'\'');
escaped = false;
} else if byte == b'\"' {
output.push(b'\"');
escaped = false;
} else if byte == b'?' {
output.push(b'?');
escaped = false;
} else {
return Err(io::Error::new(io::ErrorKind::InvalidInput, core::str::from_utf8(s).unwrap()));
}
} else {
output.push(byte);
}
}
if hexed || escaped || !must_end {
return Err(io::Error::new(io::ErrorKind::InvalidInput, core::str::from_utf8(s).unwrap()));
}
return Ok(output);
}
pub fn literal_slice_to_vec(s: &[u8]) -> Result<Vec<u8>, io::Error> {
if s.len() == 0 {
return Ok(Vec::<u8>::new());
}
if *s.iter().next().unwrap() == b'\"' {
quoted_slice_to_vec(s)
} else {
hex_slice_to_vec(s)
}
}
pub fn hex_slice_to_vec(s: &[u8]) -> Result<Vec<u8>, io::Error> {
let mut output = Vec::with_capacity(s.len() >> 1);
let mut rem = 0;
let mut buf : u8 = 0;
for byte_ref in s.iter() {
let byte = *byte_ref;
if let Ok(b) = hex_to_nibble(byte) {
buf <<= 4;
buf |= b;
} else if byte == b'\n'|| byte == b'\t'|| byte == b'\r' {
continue;
} else {
return Err(io::Error::new(io::ErrorKind::InvalidInput, core::str::from_utf8(s).unwrap()));
}
rem += 1;
if rem == 2 {
rem = 0;
output.push(buf);
}
}
if rem != 0 {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"String must have an even number of digits"));
}
Ok(output)
}
| 29.452962 | 111 | 0.526914 |
1c5f5ab39f671d11008d88de16c9dae11d50312d | 14,815 | use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io::{self, Error as IoError, Read, Write};
use std::mem;
use std::path::{Path, PathBuf};
use pulldown_cmark::{CodeBlockKind, Event, HeadingLevel, Parser, Tag};
pub mod rt;
#[cfg(test)]
mod tests;
/// Returns a list of markdown files under a directory.
///
/// # Usage
///
/// List markdown files of `mdbook` which are under `<project dir>/book` usually:
///
/// ```rust
/// extern crate skeptic;
///
/// use skeptic::markdown_files_of_directory;
///
/// fn main() {
/// let _ = markdown_files_of_directory("book/");
/// }
/// ```
pub fn markdown_files_of_directory(dir: &str) -> Vec<PathBuf> {
use glob::{glob_with, MatchOptions};
let opts = MatchOptions {
case_sensitive: false,
require_literal_separator: false,
require_literal_leading_dot: false,
};
let mut out = Vec::new();
for path in glob_with(&format!("{}/**/*.md", dir), opts)
.expect("Failed to read glob pattern")
.filter_map(Result::ok)
{
out.push(path.to_str().unwrap().into());
}
out
}
/// Generates tests for specified markdown files.
///
/// # Usage
///
/// Generates doc tests for the specified files.
///
/// ```rust,no_run
/// extern crate skeptic;
///
/// use skeptic::generate_doc_tests;
///
/// fn main() {
/// generate_doc_tests(&["README.md"]);
/// }
/// ```
///
/// Or in case you want to add `mdbook` files:
///
/// ```rust,no_run
/// extern crate skeptic;
///
/// use skeptic::*;
///
/// fn main() {
/// let mut mdbook_files = markdown_files_of_directory("book/");
/// mdbook_files.push("README.md".into());
/// generate_doc_tests(&mdbook_files);
/// }
/// ```
pub fn generate_doc_tests<T: Clone>(docs: &[T])
where
T: AsRef<Path>,
{
// This shortcut is specifically so examples in skeptic's on
// readme can call this function in non-build.rs contexts, without
// panicking below.
if docs.is_empty() {
return;
}
let docs = docs
.iter()
.cloned()
.map(|path| path.as_ref().to_str().unwrap().to_owned())
.filter(|d| !d.ends_with(".skt.md"))
.collect::<Vec<_>>();
// Inform cargo that it needs to rerun the build script if one of the skeptic files are
// modified
for doc in &docs {
println!("cargo:rerun-if-changed={}", doc);
let skt = format!("{}.skt.md", doc);
if Path::new(&skt).exists() {
println!("cargo:rerun-if-changed={}", skt);
}
}
let out_dir = env::var("OUT_DIR").unwrap();
let cargo_manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let mut out_file = PathBuf::from(out_dir.clone());
out_file.push("skeptic-tests.rs");
let config = Config {
out_dir: PathBuf::from(out_dir),
root_dir: PathBuf::from(cargo_manifest_dir),
out_file,
target_triple: env::var("TARGET").expect("could not get target triple"),
docs,
};
run(&config);
}
struct Config {
out_dir: PathBuf,
root_dir: PathBuf,
out_file: PathBuf,
target_triple: String,
docs: Vec<String>,
}
fn run(config: &Config) {
let tests = extract_tests(config).unwrap();
emit_tests(config, tests).unwrap();
}
struct Test {
name: String,
text: Vec<String>,
ignore: bool,
no_run: bool,
should_panic: bool,
template: Option<String>,
}
struct DocTestSuite {
doc_tests: Vec<DocTest>,
}
struct DocTest {
path: PathBuf,
old_template: Option<String>,
tests: Vec<Test>,
templates: HashMap<String, String>,
}
fn extract_tests(config: &Config) -> Result<DocTestSuite, IoError> {
let mut doc_tests = Vec::new();
for doc in &config.docs {
let path = &mut config.root_dir.clone();
path.push(doc);
let new_tests = extract_tests_from_file(path)?;
doc_tests.push(new_tests);
}
Ok(DocTestSuite { doc_tests })
}
enum Buffer {
None,
Code(Vec<String>),
Heading(String),
}
fn extract_tests_from_file(path: &Path) -> Result<DocTest, IoError> {
let mut file = File::open(path)?;
let s = &mut String::new();
file.read_to_string(s)?;
let file_stem = &sanitize_test_name(path.file_stem().unwrap().to_str().unwrap());
let tests = extract_tests_from_string(s, file_stem);
let templates = load_templates(path)?;
Ok(DocTest {
path: path.to_owned(),
old_template: tests.1,
tests: tests.0,
templates,
})
}
fn extract_tests_from_string(s: &str, file_stem: &str) -> (Vec<Test>, Option<String>) {
let mut tests = Vec::new();
let mut buffer = Buffer::None;
let parser = Parser::new(s);
let mut section = None;
let mut code_block_start = 0;
// Oh this isn't actually a test but a legacy template
let mut old_template = None;
for (event, range) in parser.into_offset_iter() {
let line_number = bytecount::count(&s.as_bytes()[0..range.start], b'\n');
match event {
Event::Start(Tag::Heading(level, ..)) if level < HeadingLevel::H3 => {
buffer = Buffer::Heading(String::new());
}
Event::End(Tag::Heading(level, ..)) if level < HeadingLevel::H3 => {
let cur_buffer = mem::replace(&mut buffer, Buffer::None);
if let Buffer::Heading(sect) = cur_buffer {
section = Some(sanitize_test_name(§));
}
}
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(ref info))) => {
let code_block_info = parse_code_block_info(info);
if code_block_info.is_rust {
buffer = Buffer::Code(Vec::new());
}
}
Event::Text(text) => {
if let Buffer::Code(ref mut buf) = buffer {
if buf.is_empty() {
code_block_start = line_number;
}
buf.extend(text.lines().map(|s| format!("{}\n", s)));
} else if let Buffer::Heading(ref mut buf) = buffer {
buf.push_str(&*text);
}
}
Event::End(Tag::CodeBlock(CodeBlockKind::Fenced(ref info))) => {
let code_block_info = parse_code_block_info(info);
if let Buffer::Code(buf) = mem::replace(&mut buffer, Buffer::None) {
if code_block_info.is_old_template {
old_template = Some(buf.into_iter().collect())
} else {
let name = if let Some(ref section) = section {
format!("{}_sect_{}_line_{}", file_stem, section, code_block_start)
} else {
format!("{}_line_{}", file_stem, code_block_start)
};
tests.push(Test {
name,
text: buf,
ignore: code_block_info.ignore,
no_run: code_block_info.no_run,
should_panic: code_block_info.should_panic,
template: code_block_info.template,
});
}
}
}
_ => (),
}
}
(tests, old_template)
}
fn load_templates(path: &Path) -> Result<HashMap<String, String>, IoError> {
let file_name = format!(
"{}.skt.md",
path.file_name().expect("no file name").to_string_lossy()
);
let path = path.with_file_name(&file_name);
if !path.exists() {
return Ok(HashMap::new());
}
let mut map = HashMap::new();
let mut file = File::open(path)?;
let s = &mut String::new();
file.read_to_string(s)?;
let parser = Parser::new(s);
let mut code_buffer = None;
for event in parser {
match event {
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(ref info))) => {
let code_block_info = parse_code_block_info(info);
if code_block_info.is_rust {
code_buffer = Some(Vec::new());
}
}
Event::Text(text) => {
if let Some(ref mut buf) = code_buffer {
buf.push(text.to_string());
}
}
Event::End(Tag::CodeBlock(CodeBlockKind::Fenced(ref info))) => {
let code_block_info = parse_code_block_info(info);
if let Some(buf) = code_buffer.take() {
if let Some(t) = code_block_info.template {
map.insert(t, buf.into_iter().collect());
}
}
}
_ => (),
}
}
Ok(map)
}
fn sanitize_test_name(s: &str) -> String {
s.to_ascii_lowercase()
.chars()
.map(|ch| {
if ch.is_ascii() && ch.is_alphanumeric() {
ch
} else {
'_'
}
})
.collect::<String>()
.split('_')
.filter(|s| !s.is_empty())
.collect::<Vec<_>>()
.join("_")
}
fn parse_code_block_info(info: &str) -> CodeBlockInfo {
// Same as rustdoc
let tokens = info.split(|c: char| !(c == '_' || c == '-' || c.is_alphanumeric()));
let mut seen_rust_tags = false;
let mut seen_other_tags = false;
let mut info = CodeBlockInfo {
is_rust: false,
should_panic: false,
ignore: false,
no_run: false,
is_old_template: false,
template: None,
};
for token in tokens {
match token {
"" => {}
"rust" => {
info.is_rust = true;
seen_rust_tags = true
}
"should_panic" => {
info.should_panic = true;
seen_rust_tags = true
}
"ignore" => {
info.ignore = true;
seen_rust_tags = true
}
"no_run" => {
info.no_run = true;
seen_rust_tags = true;
}
"skeptic-template" => {
info.is_old_template = true;
seen_rust_tags = true
}
_ if token.starts_with("skt-") => {
info.template = Some(token[4..].to_string());
seen_rust_tags = true;
}
_ => seen_other_tags = true,
}
}
info.is_rust &= !seen_other_tags || seen_rust_tags;
info
}
struct CodeBlockInfo {
is_rust: bool,
should_panic: bool,
ignore: bool,
no_run: bool,
is_old_template: bool,
template: Option<String>,
}
fn emit_tests(config: &Config, suite: DocTestSuite) -> Result<(), IoError> {
let mut out = String::new();
// Test cases use the api from skeptic::rt
out.push_str("extern crate skeptic;\n");
for doc_test in suite.doc_tests {
for test in &doc_test.tests {
let test_string = {
if let Some(ref t) = test.template {
let template = doc_test.templates.get(t).unwrap_or_else(|| {
panic!("template {} not found for {}", t, doc_test.path.display())
});
create_test_runner(config, &Some(template.to_string()), test)?
} else {
create_test_runner(config, &doc_test.old_template, test)?
}
};
out.push_str(&test_string);
}
}
write_if_contents_changed(&config.out_file, &out)
}
/// Just like Rustdoc, ignore a "#" sign at the beginning of a line of code.
/// These are commonly an indication to omit the line from user-facing
/// documentation but include it for the purpose of playground links or skeptic
/// testing.
#[allow(clippy::manual_strip)] // Relies on str::strip_prefix(), MSRV 1.45
fn clean_omitted_line(line: &str) -> &str {
// XXX To silence depreciation warning of trim_left and not bump rustc
// requirement upto 1.30 (for trim_start) we roll our own trim_left :(
let trimmed = if let Some(pos) = line.find(|c: char| !c.is_whitespace()) {
&line[pos..]
} else {
line
};
if trimmed.starts_with("# ") {
&trimmed[2..]
} else if line.trim() == "#" {
// line consists of single "#" which might not be followed by newline on windows
&trimmed[1..]
} else {
line
}
}
/// Creates the Rust code that this test will be operating on.
fn create_test_input(lines: &[String]) -> String {
lines
.iter()
.map(|s| clean_omitted_line(s).to_owned())
.collect()
}
fn create_test_runner(
config: &Config,
template: &Option<String>,
test: &Test,
) -> Result<String, IoError> {
let template = template.clone().unwrap_or_else(|| String::from("{}"));
let test_text = create_test_input(&test.text);
let mut s: Vec<u8> = Vec::new();
if test.ignore {
writeln!(s, "#[ignore]")?;
}
if test.should_panic {
writeln!(s, "#[should_panic]")?;
}
writeln!(s, "#[test] fn {}() {{", test.name)?;
writeln!(
s,
" let s = &format!(r####\"\n{}\"####, r####\"{}\"####);",
template, test_text
)?;
// if we are not running, just compile the test without running it
if test.no_run {
writeln!(
s,
" skeptic::rt::compile_test(r#\"{}\"#, r#\"{}\"#, r#\"{}\"#, s);",
config.root_dir.to_str().unwrap(),
config.out_dir.to_str().unwrap(),
config.target_triple
)?;
} else {
writeln!(
s,
" skeptic::rt::run_test(r#\"{}\"#, r#\"{}\"#, r#\"{}\"#, s);",
config.root_dir.to_str().unwrap(),
config.out_dir.to_str().unwrap(),
config.target_triple
)?;
}
writeln!(s, "}}")?;
writeln!(s)?;
Ok(String::from_utf8(s).unwrap())
}
fn write_if_contents_changed(name: &Path, contents: &str) -> Result<(), IoError> {
// Can't open in write mode now as that would modify the last changed timestamp of the file
match File::open(name) {
Ok(mut file) => {
let mut current_contents = String::new();
file.read_to_string(&mut current_contents)?;
if current_contents == contents {
// No change avoid writing to avoid updating the timestamp of the file
return Ok(());
}
}
Err(ref err) if err.kind() == io::ErrorKind::NotFound => (),
Err(err) => return Err(err),
}
let mut file = File::create(name)?;
file.write_all(contents.as_bytes())?;
Ok(())
}
| 29.394841 | 95 | 0.531961 |
fb4c9f3bad7154686a13dfe76f72bdaa185f2cfd | 155,900 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! See [rustc guide] for more info on how this works.
//!
//! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html#selection
use self::EvaluationResult::*;
use self::SelectionCandidate::*;
use super::coherence::{self, Conflict};
use super::project;
use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey};
use super::util;
use super::DerivedObligationCause;
use super::Selection;
use super::SelectionResult;
use super::TraitNotObjectSafe;
use super::{BuiltinDerivedObligation, ImplDerivedObligation, ObligationCauseCode};
use super::{IntercrateMode, TraitQueryMode};
use super::{ObjectCastObligation, Obligation};
use super::{ObligationCause, PredicateObligation, TraitObligation};
use super::{OutputTypeParameterMismatch, Overflow, SelectionError, Unimplemented};
use super::{
VtableAutoImpl, VtableBuiltin, VtableClosure, VtableFnPointer, VtableGenerator, VtableImpl,
VtableObject, VtableParam, VtableTraitAlias,
};
use super::{
VtableAutoImplData, VtableBuiltinData, VtableClosureData, VtableFnPointerData,
VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData,
};
use dep_graph::{DepKind, DepNodeIndex};
use hir::def_id::DefId;
use infer;
use infer::{InferCtxt, InferOk, TypeFreshener};
use middle::lang_items;
use mir::interpret::GlobalId;
use ty::fast_reject;
use ty::relate::TypeRelation;
use ty::subst::{Subst, Substs};
use ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable};
use hir;
use rustc_data_structures::bit_set::GrowableBitSet;
use rustc_data_structures::sync::Lock;
use rustc_target::spec::abi::Abi;
use std::cmp;
use std::fmt;
use std::iter;
use std::mem;
use std::rc::Rc;
use util::nodemap::{FxHashMap, FxHashSet};
pub struct SelectionContext<'cx, 'gcx: 'cx + 'tcx, 'tcx: 'cx> {
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
/// Freshener used specifically for entries on the obligation
/// stack. This ensures that all entries on the stack at one time
/// will have the same set of placeholder entries, which is
/// important for checking for trait bounds that recursively
/// require themselves.
freshener: TypeFreshener<'cx, 'gcx, 'tcx>,
/// If true, indicates that the evaluation should be conservative
/// and consider the possibility of types outside this crate.
/// This comes up primarily when resolving ambiguity. Imagine
/// there is some trait reference `$0 : Bar` where `$0` is an
/// inference variable. If `intercrate` is true, then we can never
/// say for sure that this reference is not implemented, even if
/// there are *no impls at all for `Bar`*, because `$0` could be
/// bound to some type that in a downstream crate that implements
/// `Bar`. This is the suitable mode for coherence. Elsewhere,
/// though, we set this to false, because we are only interested
/// in types that the user could actually have written --- in
/// other words, we consider `$0 : Bar` to be unimplemented if
/// there is no type that the user could *actually name* that
/// would satisfy it. This avoids crippling inference, basically.
intercrate: Option<IntercrateMode>,
intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
/// Controls whether or not to filter out negative impls when selecting.
/// This is used in librustdoc to distinguish between the lack of an impl
/// and a negative impl
allow_negative_impls: bool,
/// The mode that trait queries run in, which informs our error handling
/// policy. In essence, canonicalized queries need their errors propagated
/// rather than immediately reported because we do not have accurate spans.
query_mode: TraitQueryMode,
}
#[derive(Clone, Debug)]
pub enum IntercrateAmbiguityCause {
DownstreamCrate {
trait_desc: String,
self_desc: Option<String>,
},
UpstreamCrateUpdate {
trait_desc: String,
self_desc: Option<String>,
},
}
impl IntercrateAmbiguityCause {
/// Emits notes when the overlap is caused by complex intercrate ambiguities.
/// See #23980 for details.
pub fn add_intercrate_ambiguity_hint<'a, 'tcx>(
&self,
err: &mut ::errors::DiagnosticBuilder<'_>,
) {
err.note(&self.intercrate_ambiguity_hint());
}
pub fn intercrate_ambiguity_hint(&self) -> String {
match self {
&IntercrateAmbiguityCause::DownstreamCrate {
ref trait_desc,
ref self_desc,
} => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
} else {
String::new()
};
format!(
"downstream crates may implement trait `{}`{}",
trait_desc, self_desc
)
}
&IntercrateAmbiguityCause::UpstreamCrateUpdate {
ref trait_desc,
ref self_desc,
} => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
} else {
String::new()
};
format!(
"upstream crates may add new impl of trait `{}`{} \
in future versions",
trait_desc, self_desc
)
}
}
}
}
// A stack that walks back up the stack frame.
struct TraitObligationStack<'prev, 'tcx: 'prev> {
obligation: &'prev TraitObligation<'tcx>,
/// Trait ref from `obligation` but "freshened" with the
/// selection-context's freshener. Used to check for recursion.
fresh_trait_ref: ty::PolyTraitRef<'tcx>,
previous: TraitObligationStackList<'prev, 'tcx>,
}
#[derive(Clone, Default)]
pub struct SelectionCache<'tcx> {
hashmap: Lock<
FxHashMap<ty::TraitRef<'tcx>, WithDepNode<SelectionResult<'tcx, SelectionCandidate<'tcx>>>>,
>,
}
/// The selection process begins by considering all impls, where
/// clauses, and so forth that might resolve an obligation. Sometimes
/// we'll be able to say definitively that (e.g.) an impl does not
/// apply to the obligation: perhaps it is defined for `usize` but the
/// obligation is for `int`. In that case, we drop the impl out of the
/// list. But the other cases are considered *candidates*.
///
/// For selection to succeed, there must be exactly one matching
/// candidate. If the obligation is fully known, this is guaranteed
/// by coherence. However, if the obligation contains type parameters
/// or variables, there may be multiple such impls.
///
/// It is not a real problem if multiple matching impls exist because
/// of type variables - it just means the obligation isn't sufficiently
/// elaborated. In that case we report an ambiguity, and the caller can
/// try again after more type information has been gathered or report a
/// "type annotations required" error.
///
/// However, with type parameters, this can be a real problem - type
/// parameters don't unify with regular types, but they *can* unify
/// with variables from blanket impls, and (unless we know its bounds
/// will always be satisfied) picking the blanket impl will be wrong
/// for at least *some* substitutions. To make this concrete, if we have
///
/// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; }
/// impl<T: fmt::Debug> AsDebug for T {
/// type Out = T;
/// fn debug(self) -> fmt::Debug { self }
/// }
/// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
///
/// we can't just use the impl to resolve the <T as AsDebug> obligation
/// - a type from another crate (that doesn't implement fmt::Debug) could
/// implement AsDebug.
///
/// Because where-clauses match the type exactly, multiple clauses can
/// only match if there are unresolved variables, and we can mostly just
/// report this ambiguity in that case. This is still a problem - we can't
/// *do anything* with ambiguities that involve only regions. This is issue
/// #21974.
///
/// If a single where-clause matches and there are no inference
/// variables left, then it definitely matches and we can just select
/// it.
///
/// In fact, we even select the where-clause when the obligation contains
/// inference variables. The can lead to inference making "leaps of logic",
/// for example in this situation:
///
/// pub trait Foo<T> { fn foo(&self) -> T; }
/// impl<T> Foo<()> for T { fn foo(&self) { } }
/// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
///
/// pub fn foo<T>(t: T) where T: Foo<bool> {
/// println!("{:?}", <T as Foo<_>>::foo(&t));
/// }
/// fn main() { foo(false); }
///
/// Here the obligation <T as Foo<$0>> can be matched by both the blanket
/// impl and the where-clause. We select the where-clause and unify $0=bool,
/// so the program prints "false". However, if the where-clause is omitted,
/// the blanket impl is selected, we unify $0=(), and the program prints
/// "()".
///
/// Exactly the same issues apply to projection and object candidates, except
/// that we can have both a projection candidate and a where-clause candidate
/// for the same obligation. In that case either would do (except that
/// different "leaps of logic" would occur if inference variables are
/// present), and we just pick the where-clause. This is, for example,
/// required for associated types to work in default impls, as the bounds
/// are visible both as projection bounds and as where-clauses from the
/// parameter environment.
#[derive(PartialEq, Eq, Debug, Clone)]
enum SelectionCandidate<'tcx> {
/// If has_nested is false, there are no *further* obligations
BuiltinCandidate {
has_nested: bool,
},
ParamCandidate(ty::PolyTraitRef<'tcx>),
ImplCandidate(DefId),
AutoImplCandidate(DefId),
/// This is a trait matching with a projected type as `Self`, and
/// we found an applicable bound in the trait definition.
ProjectionCandidate,
/// Implementation of a `Fn`-family trait by one of the anonymous types
/// generated for a `||` expression.
ClosureCandidate,
/// Implementation of a `Generator` trait by one of the anonymous types
/// generated for a generator.
GeneratorCandidate,
/// Implementation of a `Fn`-family trait by one of the anonymous
/// types generated for a fn pointer type (e.g., `fn(int)->int`)
FnPointerCandidate,
TraitAliasCandidate(DefId),
ObjectCandidate,
BuiltinObjectCandidate,
BuiltinUnsizeCandidate,
}
impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> {
type Lifted = SelectionCandidate<'tcx>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
Some(match *self {
BuiltinCandidate { has_nested } => BuiltinCandidate { has_nested },
ImplCandidate(def_id) => ImplCandidate(def_id),
AutoImplCandidate(def_id) => AutoImplCandidate(def_id),
ProjectionCandidate => ProjectionCandidate,
ClosureCandidate => ClosureCandidate,
GeneratorCandidate => GeneratorCandidate,
FnPointerCandidate => FnPointerCandidate,
TraitAliasCandidate(def_id) => TraitAliasCandidate(def_id),
ObjectCandidate => ObjectCandidate,
BuiltinObjectCandidate => BuiltinObjectCandidate,
BuiltinUnsizeCandidate => BuiltinUnsizeCandidate,
ParamCandidate(ref trait_ref) => {
return tcx.lift(trait_ref).map(ParamCandidate);
}
})
}
}
struct SelectionCandidateSet<'tcx> {
// a list of candidates that definitely apply to the current
// obligation (meaning: types unify).
vec: Vec<SelectionCandidate<'tcx>>,
// if this is true, then there were candidates that might or might
// not have applied, but we couldn't tell. This occurs when some
// of the input types are type variables, in which case there are
// various "builtin" rules that might or might not trigger.
ambiguous: bool,
}
#[derive(PartialEq, Eq, Debug, Clone)]
struct EvaluatedCandidate<'tcx> {
candidate: SelectionCandidate<'tcx>,
evaluation: EvaluationResult,
}
/// When does the builtin impl for `T: Trait` apply?
enum BuiltinImplConditions<'tcx> {
/// The impl is conditional on T1,T2,.. : Trait
Where(ty::Binder<Vec<Ty<'tcx>>>),
/// There is no built-in impl. There may be some other
/// candidate (a where-clause or user-defined impl).
None,
/// It is unknown whether there is an impl.
Ambiguous,
}
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
/// The result of trait evaluation. The order is important
/// here as the evaluation of a list is the maximum of the
/// evaluations.
///
/// The evaluation results are ordered:
/// - `EvaluatedToOk` implies `EvaluatedToAmbig` implies `EvaluatedToUnknown`
/// - `EvaluatedToErr` implies `EvaluatedToRecur`
/// - the "union" of evaluation results is equal to their maximum -
/// all the "potential success" candidates can potentially succeed,
/// so they are no-ops when unioned with a definite error, and within
/// the categories it's easy to see that the unions are correct.
pub enum EvaluationResult {
/// Evaluation successful
EvaluatedToOk,
/// Evaluation is known to be ambiguous - it *might* hold for some
/// assignment of inference variables, but it might not.
///
/// While this has the same meaning as `EvaluatedToUnknown` - we can't
/// know whether this obligation holds or not - it is the result we
/// would get with an empty stack, and therefore is cacheable.
EvaluatedToAmbig,
/// Evaluation failed because of recursion involving inference
/// variables. We are somewhat imprecise there, so we don't actually
/// know the real result.
///
/// This can't be trivially cached for the same reason as `EvaluatedToRecur`.
EvaluatedToUnknown,
/// Evaluation failed because we encountered an obligation we are already
/// trying to prove on this branch.
///
/// We know this branch can't be a part of a minimal proof-tree for
/// the "root" of our cycle, because then we could cut out the recursion
/// and maintain a valid proof tree. However, this does not mean
/// that all the obligations on this branch do not hold - it's possible
/// that we entered this branch "speculatively", and that there
/// might be some other way to prove this obligation that does not
/// go through this cycle - so we can't cache this as a failure.
///
/// For example, suppose we have this:
///
/// ```rust,ignore (pseudo-Rust)
/// pub trait Trait { fn xyz(); }
/// // This impl is "useless", but we can still have
/// // an `impl Trait for SomeUnsizedType` somewhere.
/// impl<T: Trait + Sized> Trait for T { fn xyz() {} }
///
/// pub fn foo<T: Trait + ?Sized>() {
/// <T as Trait>::xyz();
/// }
/// ```
///
/// When checking `foo`, we have to prove `T: Trait`. This basically
/// translates into this:
///
/// ```plain,ignore
/// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait
/// ```
///
/// When we try to prove it, we first go the first option, which
/// recurses. This shows us that the impl is "useless" - it won't
/// tell us that `T: Trait` unless it already implemented `Trait`
/// by some other means. However, that does not prevent `T: Trait`
/// does not hold, because of the bound (which can indeed be satisfied
/// by `SomeUnsizedType` from another crate).
///
/// FIXME: when an `EvaluatedToRecur` goes past its parent root, we
/// ought to convert it to an `EvaluatedToErr`, because we know
/// there definitely isn't a proof tree for that obligation. Not
/// doing so is still sound - there isn't any proof tree, so the
/// branch still can't be a part of a minimal one - but does not
/// re-enable caching.
EvaluatedToRecur,
/// Evaluation failed
EvaluatedToErr,
}
impl EvaluationResult {
pub fn may_apply(self) -> bool {
match self {
EvaluatedToOk | EvaluatedToAmbig | EvaluatedToUnknown => true,
EvaluatedToErr | EvaluatedToRecur => false,
}
}
fn is_stack_dependent(self) -> bool {
match self {
EvaluatedToUnknown | EvaluatedToRecur => true,
EvaluatedToOk | EvaluatedToAmbig | EvaluatedToErr => false,
}
}
}
impl_stable_hash_for!(enum self::EvaluationResult {
EvaluatedToOk,
EvaluatedToAmbig,
EvaluatedToUnknown,
EvaluatedToRecur,
EvaluatedToErr
});
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Indicates that trait evaluation caused overflow.
pub struct OverflowError;
impl_stable_hash_for!(struct OverflowError {});
impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
fn from(OverflowError: OverflowError) -> SelectionError<'tcx> {
SelectionError::Overflow
}
}
#[derive(Clone, Default)]
pub struct EvaluationCache<'tcx> {
hashmap: Lock<FxHashMap<ty::PolyTraitRef<'tcx>, WithDepNode<EvaluationResult>>>,
}
impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> {
pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> {
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: None,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
pub fn intercrate(
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
mode: IntercrateMode,
) -> SelectionContext<'cx, 'gcx, 'tcx> {
debug!("intercrate({:?})", mode);
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: Some(mode),
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
pub fn with_negative(
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
allow_negative_impls: bool,
) -> SelectionContext<'cx, 'gcx, 'tcx> {
debug!("with_negative({:?})", allow_negative_impls);
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: None,
intercrate_ambiguity_causes: None,
allow_negative_impls,
query_mode: TraitQueryMode::Standard,
}
}
pub fn with_query_mode(
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
query_mode: TraitQueryMode,
) -> SelectionContext<'cx, 'gcx, 'tcx> {
debug!("with_query_mode({:?})", query_mode);
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: None,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode,
}
}
/// Enables tracking of intercrate ambiguity causes. These are
/// used in coherence to give improved diagnostics. We don't do
/// this until we detect a coherence error because it can lead to
/// false overflow results (#47139) and because it costs
/// computation time.
pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
assert!(self.intercrate.is_some());
assert!(self.intercrate_ambiguity_causes.is_none());
self.intercrate_ambiguity_causes = Some(vec![]);
debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
}
/// Gets the intercrate ambiguity causes collected since tracking
/// was enabled and disables tracking at the same time. If
/// tracking is not enabled, just returns an empty vector.
pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
assert!(self.intercrate.is_some());
self.intercrate_ambiguity_causes.take().unwrap_or(vec![])
}
pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> {
self.infcx
}
pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> {
self.infcx.tcx
}
pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> {
self.infcx
}
/// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection
/// context's self.
fn in_snapshot<R, F>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R,
{
self.infcx.in_snapshot(|snapshot| f(self, snapshot))
}
/// Wraps a probe s.t. obligations collected during it are ignored and old obligations are
/// retained.
fn probe<R, F>(&mut self, f: F) -> R
where
F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R,
{
self.infcx.probe(|snapshot| f(self, snapshot))
}
/// Wraps a commit_if_ok s.t. obligations collected during it are not returned in selection if
/// the transaction fails and s.t. old obligations are retained.
fn commit_if_ok<T, E, F>(&mut self, f: F) -> Result<T, E>
where
F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> Result<T, E>,
{
self.infcx.commit_if_ok(|snapshot| f(self, snapshot))
}
///////////////////////////////////////////////////////////////////////////
// Selection
//
// The selection phase tries to identify *how* an obligation will
// be resolved. For example, it will identify which impl or
// parameter bound is to be used. The process can be inconclusive
// if the self type in the obligation is not fully inferred. Selection
// can result in an error in one of two ways:
//
// 1. If no applicable impl or parameter bound can be found.
// 2. If the output type parameters in the obligation do not match
// those specified by the impl/bound. For example, if the obligation
// is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
// `impl<T> Iterable<T> for Vec<T>`, than an error would result.
/// Attempts to satisfy the obligation. If successful, this will affect the surrounding
/// type environment by performing unification.
pub fn select(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, Selection<'tcx>> {
debug!("select({:?})", obligation);
debug_assert!(!obligation.predicate.has_escaping_bound_vars());
let stack = self.push_stack(TraitObligationStackList::empty(), obligation);
let candidate = match self.candidate_from_obligation(&stack) {
Err(SelectionError::Overflow) => {
// In standard mode, overflow must have been caught and reported
// earlier.
assert!(self.query_mode == TraitQueryMode::Canonical);
return Err(SelectionError::Overflow);
}
Err(e) => {
return Err(e);
}
Ok(None) => {
return Ok(None);
}
Ok(Some(candidate)) => candidate,
};
match self.confirm_candidate(obligation, candidate) {
Err(SelectionError::Overflow) => {
assert!(self.query_mode == TraitQueryMode::Canonical);
Err(SelectionError::Overflow)
}
Err(e) => Err(e),
Ok(candidate) => Ok(Some(candidate)),
}
}
///////////////////////////////////////////////////////////////////////////
// EVALUATION
//
// Tests whether an obligation can be selected or whether an impl
// can be applied to particular types. It skips the "confirmation"
// step and hence completely ignores output type parameters.
//
// The result is "true" if the obligation *may* hold and "false" if
// we can be sure it does not.
/// Evaluates whether the obligation `obligation` can be satisfied (by any means).
pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
debug!("predicate_may_hold_fatal({:?})", obligation);
// This fatal query is a stopgap that should only be used in standard mode,
// where we do not expect overflow to be propagated.
assert!(self.query_mode == TraitQueryMode::Standard);
self.evaluate_obligation_recursively(obligation)
.expect("Overflow should be caught earlier in standard query mode")
.may_apply()
}
/// Evaluates whether the obligation `obligation` can be satisfied and returns
/// an `EvaluationResult`.
pub fn evaluate_obligation_recursively(
&mut self,
obligation: &PredicateObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
self.probe(|this, _| {
this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation)
})
}
/// Evaluates the predicates in `predicates` recursively. Note that
/// this applies projections in the predicates, and therefore
/// is run within an inference probe.
fn evaluate_predicates_recursively<'a, 'o, I>(
&mut self,
stack: TraitObligationStackList<'o, 'tcx>,
predicates: I,
) -> Result<EvaluationResult, OverflowError>
where
I: IntoIterator<Item = &'a PredicateObligation<'tcx>>,
'tcx: 'a,
{
let mut result = EvaluatedToOk;
for obligation in predicates {
let eval = self.evaluate_predicate_recursively(stack, obligation)?;
debug!(
"evaluate_predicate_recursively({:?}) = {:?}",
obligation, eval
);
if let EvaluatedToErr = eval {
// fast-path - EvaluatedToErr is the top of the lattice,
// so we don't need to look on the other predicates.
return Ok(EvaluatedToErr);
} else {
result = cmp::max(result, eval);
}
}
Ok(result)
}
fn evaluate_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
obligation: &PredicateObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
debug!("evaluate_predicate_recursively({:?})", obligation);
match obligation.predicate {
ty::Predicate::Trait(ref t) => {
debug_assert!(!t.has_escaping_bound_vars());
let obligation = obligation.with(t.clone());
self.evaluate_trait_predicate_recursively(previous_stack, obligation)
}
ty::Predicate::Subtype(ref p) => {
// does this code ever run?
match self.infcx
.subtype_predicate(&obligation.cause, obligation.param_env, p)
{
Some(Ok(InferOk { obligations, .. })) => {
self.evaluate_predicates_recursively(previous_stack, &obligations)
}
Some(Err(_)) => Ok(EvaluatedToErr),
None => Ok(EvaluatedToAmbig),
}
}
ty::Predicate::WellFormed(ty) => match ty::wf::obligations(
self.infcx,
obligation.param_env,
obligation.cause.body_id,
ty,
obligation.cause.span,
) {
Some(obligations) => {
self.evaluate_predicates_recursively(previous_stack, obligations.iter())
}
None => Ok(EvaluatedToAmbig),
},
ty::Predicate::TypeOutlives(ref binder) => {
assert!(!binder.has_escaping_bound_vars());
// Check if the type has higher-ranked vars.
if binder.skip_binder().0.has_escaping_bound_vars() {
// If so, this obligation is an error (for now). Eventually we should be
// able to support additional cases here, like `for<'a> &'a str: 'a`.
// NOTE: this hack is implemented in both trait fulfillment and
// evaluation. If you fix it in one place, make sure you fix it
// in the other.
// We don't want to allow this sort of reasoning in intercrate
// mode, for backwards-compatibility reasons.
if self.intercrate.is_some() {
Ok(EvaluatedToAmbig)
} else {
Ok(EvaluatedToErr)
}
} else {
// If the type has no late bound vars, then if we assign all
// the inference variables in it to be 'static, then the type
// will be 'static itself.
//
// Therefore, `staticize(T): 'a` holds for any `'a`, so this
// obligation is fulfilled. Because evaluation works with
// staticized types (yes I know this is involved with #21974),
// we are 100% OK here.
Ok(EvaluatedToOk)
}
}
ty::Predicate::RegionOutlives(ref binder) => {
let ty::OutlivesPredicate(r_a, r_b) = binder.skip_binder();
if r_a == r_b {
// for<'a> 'a: 'a. OK
Ok(EvaluatedToOk)
} else if **r_a == ty::ReStatic {
// 'static: 'x always holds.
//
// This special case is handled somewhat inconsistently - if we
// have an inference variable that is supposed to be equal to
// `'static`, then we don't allow it to be equated to an LBR,
// but if we have a literal `'static`, then we *do*.
//
// This is actually consistent with how our region inference works.
//
// It would appear that this sort of inconsistency would
// cause "instability" problems with evaluation caching. However,
// evaluation caching is only for trait predicates, and when
// trait predicates create nested obligations, they contain
// inference variables for all the regions in the trait - the
// only way this codepath can be reached from trait predicate
// evaluation is when the user typed an explicit `where 'static: 'a`
// lifetime bound (in which case we want to return EvaluatedToOk).
//
// If we ever want to handle inference variables that might be
// equatable with ReStatic, we need to make sure we are not confused by
// technically-allowed-by-RFC-447-but-probably-should-not-be
// impls such as
// ```Rust
// impl<'a, 's, T> X<'s> for T where T: Debug + 'a, 'a: 's
// ```
Ok(EvaluatedToOk)
} else if r_a.is_late_bound() || r_b.is_late_bound() {
// There is no current way to prove `for<'a> 'a: 'x`
// unless `'a = 'x`, because there are no bounds involving
// lifetimes.
// It might be possible to prove `for<'a> 'x: 'a` by forcing `'x`
// to be `'static`. However, this is not currently done by type
// inference unless `'x` is literally ReStatic. See the comment
// above.
// We don't want to allow this sort of reasoning in intercrate
// mode, for backwards-compatibility reasons.
if self.intercrate.is_some() {
Ok(EvaluatedToAmbig)
} else {
Ok(EvaluatedToErr)
}
} else {
// Relating 2 inference variable regions. These will
// always hold if our query is "staticized".
Ok(EvaluatedToOk)
}
}
ty::Predicate::ObjectSafe(trait_def_id) => {
if self.tcx().is_object_safe(trait_def_id) {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToErr)
}
}
ty::Predicate::Projection(ref data) => {
let project_obligation = obligation.with(data.clone());
match project::poly_project_and_unify_type(self, &project_obligation) {
Ok(Some(subobligations)) => {
let result = self.evaluate_predicates_recursively(
previous_stack,
subobligations.iter(),
);
if let Some(key) =
ProjectionCacheKey::from_poly_projection_predicate(self, data)
{
self.infcx.projection_cache.borrow_mut().complete(key);
}
result
}
Ok(None) => Ok(EvaluatedToAmbig),
Err(_) => Ok(EvaluatedToErr),
}
}
ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => {
match self.infcx.closure_kind(closure_def_id, closure_substs) {
Some(closure_kind) => {
if closure_kind.extends(kind) {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToErr)
}
}
None => Ok(EvaluatedToAmbig),
}
}
ty::Predicate::ConstEvaluatable(def_id, substs) => {
let tcx = self.tcx();
match tcx.lift_to_global(&(obligation.param_env, substs)) {
Some((param_env, substs)) => {
let instance =
ty::Instance::resolve(tcx.global_tcx(), param_env, def_id, substs);
if let Some(instance) = instance {
let cid = GlobalId {
instance,
promoted: None,
};
match self.tcx().const_eval(param_env.and(cid)) {
Ok(_) => Ok(EvaluatedToOk),
Err(_) => Ok(EvaluatedToErr),
}
} else {
Ok(EvaluatedToErr)
}
}
None => {
// Inference variables still left in param_env or substs.
Ok(EvaluatedToAmbig)
}
}
}
}
}
fn evaluate_trait_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
mut obligation: TraitObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
debug!("evaluate_trait_predicate_recursively({:?})", obligation);
if self.intercrate.is_none() && obligation.is_global()
&& obligation
.param_env
.caller_bounds
.iter()
.all(|bound| bound.needs_subst())
{
// If a param env has no global bounds, global obligations do not
// depend on its particular value in order to work, so we can clear
// out the param env and get better caching.
debug!(
"evaluate_trait_predicate_recursively({:?}) - in global",
obligation
);
obligation.param_env = obligation.param_env.without_caller_bounds();
}
let stack = self.push_stack(previous_stack, &obligation);
let fresh_trait_ref = stack.fresh_trait_ref;
if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) {
debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
return Ok(result);
}
let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
let result = result?;
debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result);
self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result);
Ok(result)
}
fn evaluate_stack<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> Result<EvaluationResult, OverflowError> {
// In intercrate mode, whenever any of the types are unbound,
// there can always be an impl. Even if there are no impls in
// this crate, perhaps the type would be unified with
// something from another crate that does provide an impl.
//
// In intra mode, we must still be conservative. The reason is
// that we want to avoid cycles. Imagine an impl like:
//
// impl<T:Eq> Eq for Vec<T>
//
// and a trait reference like `$0 : Eq` where `$0` is an
// unbound variable. When we evaluate this trait-reference, we
// will unify `$0` with `Vec<$1>` (for some fresh variable
// `$1`), on the condition that `$1 : Eq`. We will then wind
// up with many candidates (since that are other `Eq` impls
// that apply) and try to winnow things down. This results in
// a recursive evaluation that `$1 : Eq` -- as you can
// imagine, this is just where we started. To avoid that, we
// check for unbound variables and return an ambiguous (hence possible)
// match if we've seen this trait before.
//
// This suffices to allow chains like `FnMut` implemented in
// terms of `Fn` etc, but we could probably make this more
// precise still.
let unbound_input_types = stack
.fresh_trait_ref
.skip_binder()
.input_types()
.any(|ty| ty.is_fresh());
// this check was an imperfect workaround for a bug n the old
// intercrate mode, it should be removed when that goes away.
if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) {
debug!(
"evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
stack.fresh_trait_ref
);
// Heuristics: show the diagnostics when there are no candidates in crate.
if self.intercrate_ambiguity_causes.is_some() {
debug!("evaluate_stack: intercrate_ambiguity_causes is some");
if let Ok(candidate_set) = self.assemble_candidates(stack) {
if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
let self_ty = trait_ref.self_ty();
let cause = IntercrateAmbiguityCause::DownstreamCrate {
trait_desc: trait_ref.to_string(),
self_desc: if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
},
};
debug!("evaluate_stack: pushing cause = {:?}", cause);
self.intercrate_ambiguity_causes
.as_mut()
.unwrap()
.push(cause);
}
}
}
return Ok(EvaluatedToAmbig);
}
if unbound_input_types && stack.iter().skip(1).any(|prev| {
stack.obligation.param_env == prev.obligation.param_env
&& self.match_fresh_trait_refs(&stack.fresh_trait_ref, &prev.fresh_trait_ref)
}) {
debug!(
"evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
stack.fresh_trait_ref
);
return Ok(EvaluatedToUnknown);
}
// If there is any previous entry on the stack that precisely
// matches this obligation, then we can assume that the
// obligation is satisfied for now (still all other conditions
// must be met of course). One obvious case this comes up is
// marker traits like `Send`. Think of a linked list:
//
// struct List<T> { data: T, next: Option<Box<List<T>>> }
//
// `Box<List<T>>` will be `Send` if `T` is `Send` and
// `Option<Box<List<T>>>` is `Send`, and in turn
// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
// `Send`.
//
// Note that we do this comparison using the `fresh_trait_ref`
// fields. Because these have all been freshened using
// `self.freshener`, we can be sure that (a) this will not
// affect the inferencer state and (b) that if we see two
// fresh regions with the same index, they refer to the same
// unbound type variable.
if let Some(rec_index) = stack.iter()
.skip(1) // skip top-most frame
.position(|prev| stack.obligation.param_env == prev.obligation.param_env &&
stack.fresh_trait_ref == prev.fresh_trait_ref)
{
debug!("evaluate_stack({:?}) --> recursive", stack.fresh_trait_ref);
let cycle = stack.iter().skip(1).take(rec_index + 1);
let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate));
if self.coinductive_match(cycle) {
debug!(
"evaluate_stack({:?}) --> recursive, coinductive",
stack.fresh_trait_ref
);
return Ok(EvaluatedToOk);
} else {
debug!(
"evaluate_stack({:?}) --> recursive, inductive",
stack.fresh_trait_ref
);
return Ok(EvaluatedToRecur);
}
}
match self.candidate_from_obligation(stack) {
Ok(Some(c)) => self.evaluate_candidate(stack, &c),
Ok(None) => Ok(EvaluatedToAmbig),
Err(Overflow) => Err(OverflowError),
Err(..) => Ok(EvaluatedToErr),
}
}
/// For defaulted traits, we use a co-inductive strategy to solve, so
/// that recursion is ok. This routine returns true if the top of the
/// stack (`cycle[0]`):
///
/// - is a defaulted trait, and
/// - it also appears in the backtrace at some position `X`; and,
/// - all the predicates at positions `X..` between `X` an the top are
/// also defaulted traits.
pub fn coinductive_match<I>(&mut self, cycle: I) -> bool
where
I: Iterator<Item = ty::Predicate<'tcx>>,
{
let mut cycle = cycle;
cycle.all(|predicate| self.coinductive_predicate(predicate))
}
fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
let result = match predicate {
ty::Predicate::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()),
_ => false,
};
debug!("coinductive_predicate({:?}) = {:?}", predicate, result);
result
}
/// Further evaluate `candidate` to decide whether all type parameters match and whether nested
/// obligations are met. Returns true if `candidate` remains viable after this further
/// scrutiny.
fn evaluate_candidate<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
candidate: &SelectionCandidate<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
debug!(
"evaluate_candidate: depth={} candidate={:?}",
stack.obligation.recursion_depth, candidate
);
let result = self.probe(|this, _| {
let candidate = (*candidate).clone();
match this.confirm_candidate(stack.obligation, candidate) {
Ok(selection) => this.evaluate_predicates_recursively(
stack.list(),
selection.nested_obligations().iter(),
),
Err(..) => Ok(EvaluatedToErr),
}
})?;
debug!(
"evaluate_candidate: depth={} result={:?}",
stack.obligation.recursion_depth, result
);
Ok(result)
}
fn check_evaluation_cache(
&self,
param_env: ty::ParamEnv<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> Option<EvaluationResult> {
let tcx = self.tcx();
if self.can_use_global_caches(param_env) {
let cache = tcx.evaluation_cache.hashmap.borrow();
if let Some(cached) = cache.get(&trait_ref) {
return Some(cached.get(tcx));
}
}
self.infcx
.evaluation_cache
.hashmap
.borrow()
.get(&trait_ref)
.map(|v| v.get(tcx))
}
fn insert_evaluation_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
dep_node: DepNodeIndex,
result: EvaluationResult,
) {
// Avoid caching results that depend on more than just the trait-ref
// - the stack can create recursion.
if result.is_stack_dependent() {
return;
}
if self.can_use_global_caches(param_env) {
if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) {
debug!(
"insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global",
trait_ref, result,
);
// This may overwrite the cache with the same value
// FIXME: Due to #50507 this overwrites the different values
// This should be changed to use HashMapExt::insert_same
// when that is fixed
self.tcx()
.evaluation_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, result));
return;
}
}
debug!(
"insert_evaluation_cache(trait_ref={:?}, candidate={:?})",
trait_ref, result,
);
self.infcx
.evaluation_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, result));
}
///////////////////////////////////////////////////////////////////////////
// CANDIDATE ASSEMBLY
//
// The selection process begins by examining all in-scope impls,
// caller obligations, and so forth and assembling a list of
// candidates. See [rustc guide] for more details.
//
// [rustc guide]:
// https://rust-lang.github.io/rustc-guide/traits/resolution.html#candidate-assembly
fn candidate_from_obligation<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
// Watch out for overflow. This intentionally bypasses (and does
// not update) the cache.
let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get();
if stack.obligation.recursion_depth >= recursion_limit {
match self.query_mode {
TraitQueryMode::Standard => {
self.infcx().report_overflow_error(&stack.obligation, true);
}
TraitQueryMode::Canonical => {
return Err(Overflow);
}
}
}
// Check the cache. Note that we freshen the trait-ref
// separately rather than using `stack.fresh_trait_ref` --
// this is because we want the unbound variables to be
// replaced with fresh types starting from index 0.
let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone());
debug!(
"candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})",
cache_fresh_trait_pred, stack
);
debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
if let Some(c) =
self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred)
{
debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c);
return c;
}
// If no match, compute result and insert into cache.
let (candidate, dep_node) =
self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
debug!(
"CACHE MISS: SELECT({:?})={:?}",
cache_fresh_trait_pred, candidate
);
self.insert_candidate_cache(
stack.obligation.param_env,
cache_fresh_trait_pred,
dep_node,
candidate.clone(),
);
candidate
}
fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
where
OP: FnOnce(&mut Self) -> R,
{
let (result, dep_node) = self.tcx()
.dep_graph
.with_anon_task(DepKind::TraitSelect, || op(self));
self.tcx().dep_graph.read_index(dep_node);
(result, dep_node)
}
// Treat negative impls as unimplemented
fn filter_negative_impls(
&self,
candidate: SelectionCandidate<'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
if let ImplCandidate(def_id) = candidate {
if !self.allow_negative_impls
&& self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative
{
return Err(Unimplemented);
}
}
Ok(Some(candidate))
}
fn candidate_from_obligation_no_cache<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
if stack.obligation.predicate.references_error() {
// If we encounter a `Error`, we generally prefer the
// most "optimistic" result in response -- that is, the
// one least likely to report downstream errors. But
// because this routine is shared by coherence and by
// trait selection, there isn't an obvious "right" choice
// here in that respect, so we opt to just return
// ambiguity and let the upstream clients sort it out.
return Ok(None);
}
if let Some(conflict) = self.is_knowable(stack) {
debug!("coherence stage: not knowable");
if self.intercrate_ambiguity_causes.is_some() {
debug!("evaluate_stack: intercrate_ambiguity_causes is some");
// Heuristics: show the diagnostics when there are no candidates in crate.
if let Ok(candidate_set) = self.assemble_candidates(stack) {
let mut no_candidates_apply = true;
{
let evaluated_candidates = candidate_set
.vec
.iter()
.map(|c| self.evaluate_candidate(stack, &c));
for ec in evaluated_candidates {
match ec {
Ok(c) => {
if c.may_apply() {
no_candidates_apply = false;
break;
}
}
Err(e) => return Err(e.into()),
}
}
}
if !candidate_set.ambiguous && no_candidates_apply {
let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
let self_ty = trait_ref.self_ty();
let trait_desc = trait_ref.to_string();
let self_desc = if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
};
let cause = if let Conflict::Upstream = conflict {
IntercrateAmbiguityCause::UpstreamCrateUpdate {
trait_desc,
self_desc,
}
} else {
IntercrateAmbiguityCause::DownstreamCrate {
trait_desc,
self_desc,
}
};
debug!("evaluate_stack: pushing cause = {:?}", cause);
self.intercrate_ambiguity_causes
.as_mut()
.unwrap()
.push(cause);
}
}
}
return Ok(None);
}
let candidate_set = self.assemble_candidates(stack)?;
if candidate_set.ambiguous {
debug!("candidate set contains ambig");
return Ok(None);
}
let mut candidates = candidate_set.vec;
debug!(
"assembled {} candidates for {:?}: {:?}",
candidates.len(),
stack,
candidates
);
// At this point, we know that each of the entries in the
// candidate set is *individually* applicable. Now we have to
// figure out if they contain mutual incompatibilities. This
// frequently arises if we have an unconstrained input type --
// for example, we are looking for $0:Eq where $0 is some
// unconstrained type variable. In that case, we'll get a
// candidate which assumes $0 == int, one that assumes $0 ==
// usize, etc. This spells an ambiguity.
// If there is more than one candidate, first winnow them down
// by considering extra conditions (nested obligations and so
// forth). We don't winnow if there is exactly one
// candidate. This is a relatively minor distinction but it
// can lead to better inference and error-reporting. An
// example would be if there was an impl:
//
// impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
//
// and we were to see some code `foo.push_clone()` where `boo`
// is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
// we were to winnow, we'd wind up with zero candidates.
// Instead, we select the right impl now but report `Bar does
// not implement Clone`.
if candidates.len() == 1 {
return self.filter_negative_impls(candidates.pop().unwrap());
}
// Winnow, but record the exact outcome of evaluation, which
// is needed for specialization. Propagate overflow if it occurs.
let mut candidates = candidates
.into_iter()
.map(|c| match self.evaluate_candidate(stack, &c) {
Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate {
candidate: c,
evaluation: eval,
})),
Ok(_) => Ok(None),
Err(OverflowError) => Err(Overflow),
})
.flat_map(Result::transpose)
.collect::<Result<Vec<_>, _>>()?;
debug!(
"winnowed to {} candidates for {:?}: {:?}",
candidates.len(),
stack,
candidates
);
// If there are STILL multiple candidates, we can further
// reduce the list by dropping duplicates -- including
// resolving specializations.
if candidates.len() > 1 {
let mut i = 0;
while i < candidates.len() {
let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| {
self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j])
});
if is_dup {
debug!(
"Dropping candidate #{}/{}: {:?}",
i,
candidates.len(),
candidates[i]
);
candidates.swap_remove(i);
} else {
debug!(
"Retaining candidate #{}/{}: {:?}",
i,
candidates.len(),
candidates[i]
);
i += 1;
// If there are *STILL* multiple candidates, give up
// and report ambiguity.
if i > 1 {
debug!("multiple matches, ambig");
return Ok(None);
}
}
}
}
// If there are *NO* candidates, then there are no impls --
// that we know of, anyway. Note that in the case where there
// are unbound type variables within the obligation, it might
// be the case that you could still satisfy the obligation
// from another crate by instantiating the type variables with
// a type from another crate that does have an impl. This case
// is checked for in `evaluate_stack` (and hence users
// who might care about this case, like coherence, should use
// that function).
if candidates.is_empty() {
return Err(Unimplemented);
}
// Just one candidate left.
self.filter_negative_impls(candidates.pop().unwrap().candidate)
}
fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
debug!("is_knowable(intercrate={:?})", self.intercrate);
if !self.intercrate.is_some() {
return None;
}
let obligation = &stack.obligation;
let predicate = self.infcx()
.resolve_type_vars_if_possible(&obligation.predicate);
// OK to skip binder because of the nature of the
// trait-ref-is-knowable check, which does not care about
// bound regions
let trait_ref = predicate.skip_binder().trait_ref;
let result = coherence::trait_ref_is_knowable(self.tcx(), trait_ref);
if let (
Some(Conflict::Downstream {
used_to_be_broken: true,
}),
Some(IntercrateMode::Issue43355),
) = (result, self.intercrate)
{
debug!("is_knowable: IGNORING conflict to be bug-compatible with #43355");
None
} else {
result
}
}
/// Returns true if the global caches can be used.
/// Do note that if the type itself is not in the
/// global tcx, the local caches will be used.
fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
// If there are any where-clauses in scope, then we always use
// a cache local to this particular scope. Otherwise, we
// switch to a global cache. We used to try and draw
// finer-grained distinctions, but that led to a serious of
// annoying and weird bugs like #22019 and #18290. This simple
// rule seems to be pretty clearly safe and also still retains
// a very high hit rate (~95% when compiling rustc).
if !param_env.caller_bounds.is_empty() {
return false;
}
// Avoid using the master cache during coherence and just rely
// on the local cache. This effectively disables caching
// during coherence. It is really just a simplification to
// avoid us having to fear that coherence results "pollute"
// the master cache. Since coherence executes pretty quickly,
// it's not worth going to more trouble to increase the
// hit-rate I don't think.
if self.intercrate.is_some() {
return false;
}
// Otherwise, we can use the global cache.
true
}
fn check_candidate_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>,
) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
let tcx = self.tcx();
let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref;
if self.can_use_global_caches(param_env) {
let cache = tcx.selection_cache.hashmap.borrow();
if let Some(cached) = cache.get(&trait_ref) {
return Some(cached.get(tcx));
}
}
self.infcx
.selection_cache
.hashmap
.borrow()
.get(trait_ref)
.map(|v| v.get(tcx))
}
/// Determines whether can we safely cache the result
/// of selecting an obligation. This is almost always 'true',
/// except when dealing with certain ParamCandidates.
///
/// Ordinarily, a ParamCandidate will contain no inference variables,
/// since it was usually produced directly from a DefId. However,
/// certain cases (currently only librustdoc's blanket impl finder),
/// a ParamEnv may be explicitly constructed with inference types.
/// When this is the case, we do *not* want to cache the resulting selection
/// candidate. This is due to the fact that it might not always be possible
/// to equate the obligation's trait ref and the candidate's trait ref,
/// if more constraints end up getting added to an inference variable.
///
/// Because of this, we always want to re-run the full selection
/// process for our obligation the next time we see it, since
/// we might end up picking a different SelectionCandidate (or none at all)
fn can_cache_candidate(&self,
result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>
) -> bool {
match result {
Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => {
!trait_ref.skip_binder().input_types().any(|t| t.walk().any(|t_| t_.is_ty_infer()))
},
_ => true
}
}
fn insert_candidate_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
dep_node: DepNodeIndex,
candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
) {
let tcx = self.tcx();
let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref;
if !self.can_cache_candidate(&candidate) {
debug!("insert_candidate_cache(trait_ref={:?}, candidate={:?} -\
candidate is not cacheable", trait_ref, candidate);
return;
}
if self.can_use_global_caches(param_env) {
if let Err(Overflow) = candidate {
// Don't cache overflow globally; we only produce this
// in certain modes.
} else if let Some(trait_ref) = tcx.lift_to_global(&trait_ref) {
if let Some(candidate) = tcx.lift_to_global(&candidate) {
debug!(
"insert_candidate_cache(trait_ref={:?}, candidate={:?}) global",
trait_ref, candidate,
);
// This may overwrite the cache with the same value
tcx.selection_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, candidate));
return;
}
}
}
debug!(
"insert_candidate_cache(trait_ref={:?}, candidate={:?}) local",
trait_ref, candidate,
);
self.infcx
.selection_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, candidate));
}
fn assemble_candidates<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> {
let TraitObligationStack { obligation, .. } = *stack;
let ref obligation = Obligation {
param_env: obligation.param_env,
cause: obligation.cause.clone(),
recursion_depth: obligation.recursion_depth,
predicate: self.infcx()
.resolve_type_vars_if_possible(&obligation.predicate),
};
if obligation.predicate.skip_binder().self_ty().is_ty_var() {
// Self is a type variable (e.g. `_: AsRef<str>`).
//
// This is somewhat problematic, as the current scheme can't really
// handle it turning to be a projection. This does end up as truly
// ambiguous in most cases anyway.
//
// Take the fast path out - this also improves
// performance by preventing assemble_candidates_from_impls from
// matching every impl for this trait.
return Ok(SelectionCandidateSet {
vec: vec![],
ambiguous: true,
});
}
let mut candidates = SelectionCandidateSet {
vec: Vec::new(),
ambiguous: false,
};
self.assemble_candidates_for_trait_alias(obligation, &mut candidates)?;
// Other bounds. Consider both in-scope bounds from fn decl
// and applicable impls. There is a certain set of precedence rules here.
let def_id = obligation.predicate.def_id();
let lang_items = self.tcx().lang_items();
if lang_items.copy_trait() == Some(def_id) {
debug!(
"obligation self ty is {:?}",
obligation.predicate.skip_binder().self_ty()
);
// User-defined copy impls are permitted, but only for
// structs and enums.
self.assemble_candidates_from_impls(obligation, &mut candidates)?;
// For other types, we'll use the builtin rules.
let copy_conditions = self.copy_clone_conditions(obligation);
self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?;
} else if lang_items.sized_trait() == Some(def_id) {
// Sized is never implementable by end-users, it is
// always automatically computed.
let sized_conditions = self.sized_conditions(obligation);
self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?;
} else if lang_items.unsize_trait() == Some(def_id) {
self.assemble_candidates_for_unsizing(obligation, &mut candidates);
} else {
if lang_items.clone_trait() == Some(def_id) {
// Same builtin conditions as `Copy`, i.e. every type which has builtin support
// for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone`
// types have builtin support for `Clone`.
let clone_conditions = self.copy_clone_conditions(obligation);
self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?;
}
self.assemble_generator_candidates(obligation, &mut candidates)?;
self.assemble_closure_candidates(obligation, &mut candidates)?;
self.assemble_fn_pointer_candidates(obligation, &mut candidates)?;
self.assemble_candidates_from_impls(obligation, &mut candidates)?;
self.assemble_candidates_from_object_ty(obligation, &mut candidates);
}
self.assemble_candidates_from_projected_tys(obligation, &mut candidates);
self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?;
// Auto implementations have lower priority, so we only
// consider triggering a default if there is no other impl that can apply.
if candidates.vec.is_empty() {
self.assemble_candidates_from_auto_impls(obligation, &mut candidates)?;
}
debug!("candidate list size: {}", candidates.vec.len());
Ok(candidates)
}
fn assemble_candidates_from_projected_tys(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
debug!("assemble_candidates_for_projected_tys({:?})", obligation);
// before we go into the whole placeholder thing, just
// quickly check if the self-type is a projection at all.
match obligation.predicate.skip_binder().trait_ref.self_ty().sty {
ty::Projection(_) | ty::Opaque(..) => {}
ty::Infer(ty::TyVar(_)) => {
span_bug!(
obligation.cause.span,
"Self=_ should have been handled by assemble_candidates"
);
}
_ => return,
}
let result = self.probe(|this, snapshot| {
this.match_projection_obligation_against_definition_bounds(obligation, snapshot)
});
if result {
candidates.vec.push(ProjectionCandidate);
}
}
fn match_projection_obligation_against_definition_bounds(
&mut self,
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> bool {
let poly_trait_predicate = self.infcx()
.resolve_type_vars_if_possible(&obligation.predicate);
let (skol_trait_predicate, placeholder_map) = self.infcx()
.replace_bound_vars_with_placeholders(&poly_trait_predicate);
debug!(
"match_projection_obligation_against_definition_bounds: \
skol_trait_predicate={:?} placeholder_map={:?}",
skol_trait_predicate, placeholder_map
);
let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty {
ty::Projection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs),
ty::Opaque(def_id, substs) => (def_id, substs),
_ => {
span_bug!(
obligation.cause.span,
"match_projection_obligation_against_definition_bounds() called \
but self-ty is not a projection: {:?}",
skol_trait_predicate.trait_ref.self_ty()
);
}
};
debug!(
"match_projection_obligation_against_definition_bounds: \
def_id={:?}, substs={:?}",
def_id, substs
);
let predicates_of = self.tcx().predicates_of(def_id);
let bounds = predicates_of.instantiate(self.tcx(), substs);
debug!(
"match_projection_obligation_against_definition_bounds: \
bounds={:?}",
bounds
);
let matching_bound = util::elaborate_predicates(self.tcx(), bounds.predicates)
.filter_to_traits()
.find(|bound| {
self.probe(|this, _| {
this.match_projection(
obligation,
bound.clone(),
skol_trait_predicate.trait_ref.clone(),
&placeholder_map,
snapshot,
)
})
});
debug!(
"match_projection_obligation_against_definition_bounds: \
matching_bound={:?}",
matching_bound
);
match matching_bound {
None => false,
Some(bound) => {
// Repeat the successful match, if any, this time outside of a probe.
let result = self.match_projection(
obligation,
bound,
skol_trait_predicate.trait_ref.clone(),
&placeholder_map,
snapshot,
);
self.infcx.pop_placeholders(placeholder_map, snapshot);
assert!(result);
true
}
}
}
fn match_projection(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_bound: ty::PolyTraitRef<'tcx>,
skol_trait_ref: ty::TraitRef<'tcx>,
placeholder_map: &infer::PlaceholderMap<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> bool {
debug_assert!(!skol_trait_ref.has_escaping_bound_vars());
if self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(ty::Binder::dummy(skol_trait_ref), trait_bound)
.is_err()
{
return false;
}
self.infcx
.leak_check(false, obligation.cause.span, placeholder_map, snapshot)
.is_ok()
}
/// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
/// supplied to find out whether it is listed among them.
///
/// Never affects inference environment.
fn assemble_candidates_from_caller_bounds<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
debug!(
"assemble_candidates_from_caller_bounds({:?})",
stack.obligation
);
let all_bounds = stack
.obligation
.param_env
.caller_bounds
.iter()
.filter_map(|o| o.to_opt_poly_trait_ref());
// micro-optimization: filter out predicates relating to different
// traits.
let matching_bounds =
all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id());
// keep only those bounds which may apply, and propagate overflow if it occurs
let mut param_candidates = vec![];
for bound in matching_bounds {
let wc = self.evaluate_where_clause(stack, bound.clone())?;
if wc.may_apply() {
param_candidates.push(ParamCandidate(bound));
}
}
candidates.vec.extend(param_candidates);
Ok(())
}
fn evaluate_where_clause<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
self.probe(move |this, _| {
match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
Ok(obligations) => {
this.evaluate_predicates_recursively(stack.list(), obligations.iter())
}
Err(()) => Ok(EvaluatedToErr),
}
})
}
fn assemble_generator_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) {
return Ok(());
}
// OK to skip binder because the substs on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
ty::Generator(..) => {
debug!(
"assemble_generator_candidates: self_ty={:?} obligation={:?}",
self_ty, obligation
);
candidates.vec.push(GeneratorCandidate);
}
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_generator_candidates: ambiguous self-type");
candidates.ambiguous = true;
}
_ => {}
}
Ok(())
}
/// Check for the artificial impl that the compiler will create for an obligation like `X :
/// FnMut<..>` where `X` is a closure type.
///
/// Note: the type parameters on a closure candidate are modeled as *output* type
/// parameters and hence do not affect whether this trait is a match or not. They will be
/// unified during the confirmation step.
fn assemble_closure_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
let kind = match self.tcx()
.lang_items()
.fn_trait_kind(obligation.predicate.def_id())
{
Some(k) => k,
None => {
return Ok(());
}
};
// OK to skip binder because the substs on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters
match obligation.self_ty().skip_binder().sty {
ty::Closure(closure_def_id, closure_substs) => {
debug!(
"assemble_unboxed_candidates: kind={:?} obligation={:?}",
kind, obligation
);
match self.infcx.closure_kind(closure_def_id, closure_substs) {
Some(closure_kind) => {
debug!(
"assemble_unboxed_candidates: closure_kind = {:?}",
closure_kind
);
if closure_kind.extends(kind) {
candidates.vec.push(ClosureCandidate);
}
}
None => {
debug!("assemble_unboxed_candidates: closure_kind not yet known");
candidates.vec.push(ClosureCandidate);
}
}
}
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
candidates.ambiguous = true;
}
_ => {}
}
Ok(())
}
/// Implement one of the `Fn()` family for a fn pointer.
fn assemble_fn_pointer_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
// We provide impl of all fn traits for fn pointers.
if self.tcx()
.lang_items()
.fn_trait_kind(obligation.predicate.def_id())
.is_none()
{
return Ok(());
}
// OK to skip binder because what we are inspecting doesn't involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.sty {
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_fn_pointer_candidates: ambiguous self-type");
candidates.ambiguous = true; // could wind up being a fn() type
}
// provide an impl, but only for suitable `fn` pointers
ty::FnDef(..) | ty::FnPtr(_) => {
if let ty::FnSig {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
variadic: false,
..
} = self_ty.fn_sig(self.tcx()).skip_binder()
{
candidates.vec.push(FnPointerCandidate);
}
}
_ => {}
}
Ok(())
}
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_impls(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
debug!(
"assemble_candidates_from_impls(obligation={:?})",
obligation
);
self.tcx().for_each_relevant_impl(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().trait_ref.self_ty(),
|impl_def_id| {
self.probe(|this, snapshot| {
if let Ok(placeholder_map) = this.match_impl(impl_def_id, obligation, snapshot)
{
candidates.vec.push(ImplCandidate(impl_def_id));
// NB: we can safely drop the placeholder map
// since we are in a probe.
mem::drop(placeholder_map);
}
});
},
);
Ok(())
}
fn assemble_candidates_from_auto_impls(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
// OK to skip binder here because the tests we do below do not involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty);
let def_id = obligation.predicate.def_id();
if self.tcx().trait_is_auto(def_id) {
match self_ty.sty {
ty::Dynamic(..) => {
// For object types, we don't know what the closed
// over types are. This means we conservatively
// say nothing; a candidate may be added by
// `assemble_candidates_from_object_ty`.
}
ty::Foreign(..) => {
// Since the contents of foreign types is unknown,
// we don't add any `..` impl. Default traits could
// still be provided by a manual implementation for
// this trait and type.
}
ty::Param(..) | ty::Projection(..) => {
// In these cases, we don't know what the actual
// type is. Therefore, we cannot break it down
// into its constituent types. So we don't
// consider the `..` impl but instead just add no
// candidates: this means that typeck will only
// succeed if there is another reason to believe
// that this obligation holds. That could be a
// where-clause or, in the case of an object type,
// it could be that the object type lists the
// trait (e.g. `Foo+Send : Send`). See
// `compile-fail/typeck-default-trait-impl-send-param.rs`
// for an example of a test case that exercises
// this path.
}
ty::Infer(ty::TyVar(_)) => {
// the auto impl might apply, we don't know
candidates.ambiguous = true;
}
_ => candidates.vec.push(AutoImplCandidate(def_id.clone())),
}
}
Ok(())
}
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_object_ty(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
debug!(
"assemble_candidates_from_object_ty(self_ty={:?})",
obligation.self_ty().skip_binder()
);
self.probe(|this, _snapshot| {
// the code below doesn't care about regions, and the
// self-ty here doesn't escape this probe, so just erase
// any LBR.
let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty());
let poly_trait_ref = match self_ty.sty {
ty::Dynamic(ref data, ..) => {
if data.auto_traits()
.any(|did| did == obligation.predicate.def_id())
{
debug!(
"assemble_candidates_from_object_ty: matched builtin bound, \
pushing candidate"
);
candidates.vec.push(BuiltinObjectCandidate);
return;
}
data.principal().with_self_ty(this.tcx(), self_ty)
}
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_candidates_from_object_ty: ambiguous");
candidates.ambiguous = true; // could wind up being an object type
return;
}
_ => return,
};
debug!(
"assemble_candidates_from_object_ty: poly_trait_ref={:?}",
poly_trait_ref
);
// Count only those upcast versions that match the trait-ref
// we are looking for. Specifically, do not only check for the
// correct trait, but also the correct type parameters.
// For example, we may be trying to upcast `Foo` to `Bar<i32>`,
// but `Foo` is declared as `trait Foo : Bar<u32>`.
let upcast_trait_refs = util::supertraits(this.tcx(), poly_trait_ref)
.filter(|upcast_trait_ref| {
this.probe(|this, _| {
let upcast_trait_ref = upcast_trait_ref.clone();
this.match_poly_trait_ref(obligation, upcast_trait_ref)
.is_ok()
})
})
.count();
if upcast_trait_refs > 1 {
// can be upcast in many ways; need more type information
candidates.ambiguous = true;
} else if upcast_trait_refs == 1 {
candidates.vec.push(ObjectCandidate);
}
})
}
/// Search for unsizing that might apply to `obligation`.
fn assemble_candidates_for_unsizing(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
// We currently never consider higher-ranked obligations e.g.
// `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not
// because they are a priori invalid, and we could potentially add support
// for them later, it's just that there isn't really a strong need for it.
// A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>`
// impl, and those are generally applied to concrete types.
//
// That said, one might try to write a fn with a where clause like
// for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>>
// where the `'a` is kind of orthogonal to the relevant part of the `Unsize`.
// Still, you'd be more likely to write that where clause as
// T: Trait
// so it seems ok if we (conservatively) fail to accept that `Unsize`
// obligation above. Should be possible to extend this in the future.
let source = match obligation.self_ty().no_bound_vars() {
Some(t) => t,
None => {
// Don't add any candidates if there are bound regions.
return;
}
};
let target = obligation
.predicate
.skip_binder()
.trait_ref
.substs
.type_at(1);
debug!(
"assemble_candidates_for_unsizing(source={:?}, target={:?})",
source, target
);
let may_apply = match (&source.sty, &target.sty) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
(&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
// Upcasts permit two things:
//
// 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo`
// 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b`
//
// Note that neither of these changes requires any
// change at runtime. Eventually this will be
// generalized.
//
// We always upcast when we can because of reason
// #2 (region bounds).
data_a.principal().def_id() == data_b.principal().def_id()
&& data_b.auto_traits()
// All of a's auto traits need to be in b's auto traits.
.all(|b| data_a.auto_traits().any(|a| a == b))
}
// T -> Trait.
(_, &ty::Dynamic(..)) => true,
// Ambiguous handling is below T -> Trait, because inference
// variables can still implement Unsize<Trait> and nested
// obligations will have the final say (likely deferred).
(&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => {
debug!("assemble_candidates_for_unsizing: ambiguous");
candidates.ambiguous = true;
false
}
// [T; n] -> [T].
(&ty::Array(..), &ty::Slice(_)) => true,
// Struct<T> -> Struct<U>.
(&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => {
def_id_a == def_id_b
}
// (.., T) -> (.., U).
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => tys_a.len() == tys_b.len(),
_ => false,
};
if may_apply {
candidates.vec.push(BuiltinUnsizeCandidate);
}
}
fn assemble_candidates_for_trait_alias(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
// OK to skip binder here because the tests we do below do not involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty);
let def_id = obligation.predicate.def_id();
if ty::is_trait_alias(self.tcx(), def_id) {
candidates.vec.push(TraitAliasCandidate(def_id.clone()));
}
Ok(())
}
///////////////////////////////////////////////////////////////////////////
// WINNOW
//
// Winnowing is the process of attempting to resolve ambiguity by
// probing further. During the winnowing process, we unify all
// type variables and then we also attempt to evaluate recursive
// bounds to see if they are satisfied.
/// Returns true if `victim` should be dropped in favor of
/// `other`. Generally speaking we will drop duplicate
/// candidates and prefer where-clause candidates.
///
/// See the comment for "SelectionCandidate" for more details.
fn candidate_should_be_dropped_in_favor_of<'o>(
&mut self,
victim: &EvaluatedCandidate<'tcx>,
other: &EvaluatedCandidate<'tcx>,
) -> bool {
if victim.candidate == other.candidate {
return true;
}
// Check if a bound would previously have been removed when normalizing
// the param_env so that it can be given the lowest priority. See
// #50825 for the motivation for this.
let is_global =
|cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
match other.candidate {
// Prefer BuiltinCandidate { has_nested: false } to anything else.
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false } => true,
ParamCandidate(ref cand) => match victim.candidate {
AutoImplCandidate(..) => {
bug!(
"default implementations shouldn't be recorded \
when there are other valid candidates"
);
}
// Prefer BuiltinCandidate { has_nested: false } to anything else.
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false } => false,
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| BuiltinCandidate { .. }
| TraitAliasCandidate(..) => {
// Global bounds from the where clause should be ignored
// here (see issue #50825). Otherwise, we have a where
// clause so don't go around looking for impls.
!is_global(cand)
}
ObjectCandidate | ProjectionCandidate => {
// Arbitrarily give param candidates priority
// over projection and object candidates.
!is_global(cand)
}
ParamCandidate(..) => false,
},
ObjectCandidate | ProjectionCandidate => match victim.candidate {
AutoImplCandidate(..) => {
bug!(
"default implementations shouldn't be recorded \
when there are other valid candidates"
);
}
// Prefer BuiltinCandidate { has_nested: false } to anything else.
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false } => false,
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| BuiltinCandidate { .. }
| TraitAliasCandidate(..) => true,
ObjectCandidate | ProjectionCandidate => {
// Arbitrarily give param candidates priority
// over projection and object candidates.
true
}
ParamCandidate(ref cand) => is_global(cand),
},
ImplCandidate(other_def) => {
// See if we can toss out `victim` based on specialization.
// This requires us to know *for sure* that the `other` impl applies
// i.e. EvaluatedToOk:
if other.evaluation == EvaluatedToOk {
match victim.candidate {
ImplCandidate(victim_def) => {
let tcx = self.tcx().global_tcx();
return tcx.specializes((other_def, victim_def))
|| tcx.impls_are_allowed_to_overlap(other_def, victim_def);
}
ParamCandidate(ref cand) => {
// Prefer the impl to a global where clause candidate.
return is_global(cand);
}
_ => (),
}
}
false
}
ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| BuiltinCandidate { has_nested: true } => {
match victim.candidate {
ParamCandidate(ref cand) => {
// Prefer these to a global where-clause bound
// (see issue #50825)
is_global(cand) && other.evaluation == EvaluatedToOk
}
_ => false,
}
}
_ => false,
}
}
///////////////////////////////////////////////////////////////////////////
// BUILTIN BOUNDS
//
// These cover the traits that are built-in to the language
// itself: `Copy`, `Clone` and `Sized`.
fn assemble_builtin_bound_candidates<'o>(
&mut self,
conditions: BuiltinImplConditions<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
match conditions {
BuiltinImplConditions::Where(nested) => {
debug!("builtin_bound: nested={:?}", nested);
candidates.vec.push(BuiltinCandidate {
has_nested: nested.skip_binder().len() > 0,
});
}
BuiltinImplConditions::None => {}
BuiltinImplConditions::Ambiguous => {
debug!("assemble_builtin_bound_candidates: ambiguous builtin");
candidates.ambiguous = true;
}
}
Ok(())
}
fn sized_conditions(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
use self::BuiltinImplConditions::{Ambiguous, None, Where};
// NOTE: binder moved to (*)
let self_ty = self.infcx
.shallow_resolve(obligation.predicate.skip_binder().self_ty());
match self_ty.sty {
ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
| ty::Error => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
}
ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
ty::Tuple(tys) => Where(ty::Binder::bind(tys.last().into_iter().cloned().collect())),
ty::Adt(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(ty::Binder::bind(
sized_crit
.iter()
.map(|ty| ty.subst(self.tcx(), substs))
.collect(),
))
}
ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
ty::Infer(ty::TyVar(_)) => Ambiguous,
ty::UnnormalizedProjection(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
bug!(
"asked to assemble builtin bounds of unexpected type: {:?}",
self_ty
);
}
}
}
fn copy_clone_conditions(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
// NOTE: binder moved to (*)
let self_ty = self.infcx
.shallow_resolve(obligation.predicate.skip_binder().self_ty());
use self::BuiltinImplConditions::{Ambiguous, None, Where};
match self_ty.sty {
ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Error => Where(ty::Binder::dummy(Vec::new())),
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::Char
| ty::RawPtr(..)
| ty::Never
| ty::Ref(_, _, hir::MutImmutable) => {
// Implementations provided in libcore
None
}
ty::Dynamic(..)
| ty::Str
| ty::Slice(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Foreign(..)
| ty::Ref(_, _, hir::MutMutable) => None,
ty::Array(element_ty, _) => {
// (*) binder moved here
Where(ty::Binder::bind(vec![element_ty]))
}
ty::Tuple(tys) => {
// (*) binder moved here
Where(ty::Binder::bind(tys.to_vec()))
}
ty::Closure(def_id, substs) => {
let trait_id = obligation.predicate.def_id();
let is_copy_trait = Some(trait_id) == self.tcx().lang_items().copy_trait();
let is_clone_trait = Some(trait_id) == self.tcx().lang_items().clone_trait();
if is_copy_trait || is_clone_trait {
Where(ty::Binder::bind(
substs.upvar_tys(def_id, self.tcx()).collect(),
))
} else {
None
}
}
ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
// Fallback to whatever user-defined impls exist in this case.
None
}
ty::Infer(ty::TyVar(_)) => {
// Unbound type variable. Might or might not have
// applicable impls and so forth, depending on what
// those type variables wind up being bound to.
Ambiguous
}
ty::UnnormalizedProjection(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
bug!(
"asked to assemble builtin bounds of unexpected type: {:?}",
self_ty
);
}
}
}
/// For default impls, we need to break apart a type into its
/// "constituent types" -- meaning, the types that it contains.
///
/// Here are some (simple) examples:
///
/// ```
/// (i32, u32) -> [i32, u32]
/// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
/// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
/// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
/// ```
fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
match t.sty {
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Str
| ty::Error
| ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::Never
| ty::Char => Vec::new(),
ty::UnnormalizedProjection(..)
| ty::Placeholder(..)
| ty::Dynamic(..)
| ty::Param(..)
| ty::Foreign(..)
| ty::Projection(..)
| ty::Bound(..)
| ty::Infer(ty::TyVar(_))
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
bug!(
"asked to assemble constituent types of unexpected type: {:?}",
t
);
}
ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
vec![element_ty]
}
ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty],
ty::Tuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
tys.to_vec()
}
ty::Closure(def_id, ref substs) => substs.upvar_tys(def_id, self.tcx()).collect(),
ty::Generator(def_id, ref substs, _) => {
let witness = substs.witness(def_id, self.tcx());
substs
.upvar_tys(def_id, self.tcx())
.chain(iter::once(witness))
.collect()
}
ty::GeneratorWitness(types) => {
// This is sound because no regions in the witness can refer to
// the binder outside the witness. So we'll effectivly reuse
// the implicit binder around the witness.
types.skip_binder().to_vec()
}
// for `PhantomData<T>`, we pass `T`
ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(),
ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(),
ty::Opaque(def_id, substs) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)]
}
}
}
fn collect_predicates_for_types(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
trait_def_id: DefId,
types: ty::Binder<Vec<Ty<'tcx>>>,
) -> Vec<PredicateObligation<'tcx>> {
// Because the types were potentially derived from
// higher-ranked obligations they may reference late-bound
// regions. For example, `for<'a> Foo<&'a int> : Copy` would
// yield a type like `for<'a> &'a int`. In general, we
// maintain the invariant that we never manipulate bound
// regions, so we have to process these bound regions somehow.
//
// The strategy is to:
//
// 1. Instantiate those regions to placeholder regions (e.g.,
// `for<'a> &'a int` becomes `&0 int`.
// 2. Produce something like `&'0 int : Copy`
// 3. Re-bind the regions back to `for<'a> &'a int : Copy`
types
.skip_binder()
.into_iter()
.flat_map(|ty| {
// binder moved -\
let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/
self.in_snapshot(|this, snapshot| {
let (skol_ty, placeholder_map) = this.infcx()
.replace_bound_vars_with_placeholders(&ty);
let Normalized {
value: normalized_ty,
mut obligations,
} = project::normalize_with_depth(
this,
param_env,
cause.clone(),
recursion_depth,
&skol_ty,
);
let skol_obligation = this.tcx().predicate_for_trait_def(
param_env,
cause.clone(),
trait_def_id,
recursion_depth,
normalized_ty,
&[],
);
obligations.push(skol_obligation);
this.infcx()
.plug_leaks(placeholder_map, snapshot, obligations)
})
})
.collect()
}
///////////////////////////////////////////////////////////////////////////
// CONFIRMATION
//
// Confirmation unifies the output type parameters of the trait
// with the values found in the obligation, possibly yielding a
// type error. See [rustc guide] for more details.
//
// [rustc guide]:
// https://rust-lang.github.io/rustc-guide/traits/resolution.html#confirmation
fn confirm_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
candidate: SelectionCandidate<'tcx>,
) -> Result<Selection<'tcx>, SelectionError<'tcx>> {
debug!("confirm_candidate({:?}, {:?})", obligation, candidate);
match candidate {
BuiltinCandidate { has_nested } => {
let data = self.confirm_builtin_candidate(obligation, has_nested);
Ok(VtableBuiltin(data))
}
ParamCandidate(param) => {
let obligations = self.confirm_param_candidate(obligation, param);
Ok(VtableParam(obligations))
}
ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate(
obligation,
impl_def_id,
))),
AutoImplCandidate(trait_def_id) => {
let data = self.confirm_auto_impl_candidate(obligation, trait_def_id);
Ok(VtableAutoImpl(data))
}
ProjectionCandidate => {
self.confirm_projection_candidate(obligation);
Ok(VtableParam(Vec::new()))
}
ClosureCandidate => {
let vtable_closure = self.confirm_closure_candidate(obligation)?;
Ok(VtableClosure(vtable_closure))
}
GeneratorCandidate => {
let vtable_generator = self.confirm_generator_candidate(obligation)?;
Ok(VtableGenerator(vtable_generator))
}
FnPointerCandidate => {
let data = self.confirm_fn_pointer_candidate(obligation)?;
Ok(VtableFnPointer(data))
}
TraitAliasCandidate(alias_def_id) => {
let data = self.confirm_trait_alias_candidate(obligation, alias_def_id);
Ok(VtableTraitAlias(data))
}
ObjectCandidate => {
let data = self.confirm_object_candidate(obligation);
Ok(VtableObject(data))
}
BuiltinObjectCandidate => {
// This indicates something like `(Trait+Send) :
// Send`. In this case, we know that this holds
// because that's what the object type is telling us,
// and there's really no additional obligations to
// prove and no types in particular to unify etc.
Ok(VtableParam(Vec::new()))
}
BuiltinUnsizeCandidate => {
let data = self.confirm_builtin_unsize_candidate(obligation)?;
Ok(VtableBuiltin(data))
}
}
}
fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) {
self.in_snapshot(|this, snapshot| {
let result =
this.match_projection_obligation_against_definition_bounds(obligation, snapshot);
assert!(result);
})
}
fn confirm_param_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
param: ty::PolyTraitRef<'tcx>,
) -> Vec<PredicateObligation<'tcx>> {
debug!("confirm_param_candidate({:?},{:?})", obligation, param);
// During evaluation, we already checked that this
// where-clause trait-ref could be unified with the obligation
// trait-ref. Repeat that unification now without any
// transactional boundary; it should not fail.
match self.match_where_clause_trait_ref(obligation, param.clone()) {
Ok(obligations) => obligations,
Err(()) => {
bug!(
"Where clause `{:?}` was applicable to `{:?}` but now is not",
param,
obligation
);
}
}
}
fn confirm_builtin_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
has_nested: bool,
) -> VtableBuiltinData<PredicateObligation<'tcx>> {
debug!(
"confirm_builtin_candidate({:?}, {:?})",
obligation, has_nested
);
let lang_items = self.tcx().lang_items();
let obligations = if has_nested {
let trait_def = obligation.predicate.def_id();
let conditions = if Some(trait_def) == lang_items.sized_trait() {
self.sized_conditions(obligation)
} else if Some(trait_def) == lang_items.copy_trait() {
self.copy_clone_conditions(obligation)
} else if Some(trait_def) == lang_items.clone_trait() {
self.copy_clone_conditions(obligation)
} else {
bug!("unexpected builtin trait {:?}", trait_def)
};
let nested = match conditions {
BuiltinImplConditions::Where(nested) => nested,
_ => bug!(
"obligation {:?} had matched a builtin impl but now doesn't",
obligation
),
};
let cause = obligation.derived_cause(BuiltinDerivedObligation);
self.collect_predicates_for_types(
obligation.param_env,
cause,
obligation.recursion_depth + 1,
trait_def,
nested,
)
} else {
vec![]
};
debug!("confirm_builtin_candidate: obligations={:?}", obligations);
VtableBuiltinData {
nested: obligations,
}
}
/// This handles the case where a `auto trait Foo` impl is being used.
/// The idea is that the impl applies to `X : Foo` if the following conditions are met:
///
/// 1. For each constituent type `Y` in `X`, `Y : Foo` holds
/// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds.
fn confirm_auto_impl_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_def_id: DefId,
) -> VtableAutoImplData<PredicateObligation<'tcx>> {
debug!(
"confirm_auto_impl_candidate({:?}, {:?})",
obligation, trait_def_id
);
let types = obligation.predicate.map_bound(|inner| {
let self_ty = self.infcx.shallow_resolve(inner.self_ty());
self.constituent_types_for_ty(self_ty)
});
self.vtable_auto_impl(obligation, trait_def_id, types)
}
/// See `confirm_auto_impl_candidate`.
fn vtable_auto_impl(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_def_id: DefId,
nested: ty::Binder<Vec<Ty<'tcx>>>,
) -> VtableAutoImplData<PredicateObligation<'tcx>> {
debug!("vtable_auto_impl: nested={:?}", nested);
let cause = obligation.derived_cause(BuiltinDerivedObligation);
let mut obligations = self.collect_predicates_for_types(
obligation.param_env,
cause,
obligation.recursion_depth + 1,
trait_def_id,
nested,
);
let trait_obligations: Vec<PredicateObligation<'_>> = self.in_snapshot(|this, snapshot| {
let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
let (trait_ref, placeholder_map) = this.infcx()
.replace_bound_vars_with_placeholders(&poly_trait_ref);
let cause = obligation.derived_cause(ImplDerivedObligation);
this.impl_or_trait_obligations(
cause,
obligation.recursion_depth + 1,
obligation.param_env,
trait_def_id,
&trait_ref.substs,
placeholder_map,
snapshot,
)
});
// Adds the predicates from the trait. Note that this contains a `Self: Trait`
// predicate as usual. It won't have any effect since auto traits are coinductive.
obligations.extend(trait_obligations);
debug!("vtable_auto_impl: obligations={:?}", obligations);
VtableAutoImplData {
trait_def_id,
nested: obligations,
}
}
fn confirm_impl_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
impl_def_id: DefId,
) -> VtableImplData<'tcx, PredicateObligation<'tcx>> {
debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id);
// First, create the substitutions by matching the impl again,
// this time not in a probe.
self.in_snapshot(|this, snapshot| {
let (substs, placeholder_map) = this.rematch_impl(impl_def_id, obligation, snapshot);
debug!("confirm_impl_candidate: substs={:?}", substs);
let cause = obligation.derived_cause(ImplDerivedObligation);
this.vtable_impl(
impl_def_id,
substs,
cause,
obligation.recursion_depth + 1,
obligation.param_env,
placeholder_map,
snapshot,
)
})
}
fn vtable_impl(
&mut self,
impl_def_id: DefId,
mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
placeholder_map: infer::PlaceholderMap<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> VtableImplData<'tcx, PredicateObligation<'tcx>> {
debug!(
"vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, placeholder_map={:?})",
impl_def_id, substs, recursion_depth, placeholder_map
);
let mut impl_obligations = self.impl_or_trait_obligations(
cause,
recursion_depth,
param_env,
impl_def_id,
&substs.value,
placeholder_map,
snapshot,
);
debug!(
"vtable_impl: impl_def_id={:?} impl_obligations={:?}",
impl_def_id, impl_obligations
);
// Because of RFC447, the impl-trait-ref and obligations
// are sufficient to determine the impl substs, without
// relying on projections in the impl-trait-ref.
//
// e.g. `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
impl_obligations.append(&mut substs.obligations);
VtableImplData {
impl_def_id,
substs: substs.value,
nested: impl_obligations,
}
}
fn confirm_object_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> {
debug!("confirm_object_candidate({:?})", obligation);
// FIXME(nmatsakis) skipping binder here seems wrong -- we should
// probably flatten the binder from the obligation and the binder
// from the object. Have to try to make a broken test case that
// results.
let self_ty = self.infcx
.shallow_resolve(*obligation.self_ty().skip_binder());
let poly_trait_ref = match self_ty.sty {
ty::Dynamic(ref data, ..) => data.principal().with_self_ty(self.tcx(), self_ty),
_ => span_bug!(obligation.cause.span, "object candidate with non-object"),
};
let mut upcast_trait_ref = None;
let mut nested = vec![];
let vtable_base;
{
let tcx = self.tcx();
// We want to find the first supertrait in the list of
// supertraits that we can unify with, and do that
// unification. We know that there is exactly one in the list
// where we can unify because otherwise select would have
// reported an ambiguity. (When we do find a match, also
// record it for later.)
let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while(
|&t| match self.commit_if_ok(|this, _| this.match_poly_trait_ref(obligation, t)) {
Ok(obligations) => {
upcast_trait_ref = Some(t);
nested.extend(obligations);
false
}
Err(_) => true,
},
);
// Additionally, for each of the nonmatching predicates that
// we pass over, we sum up the set of number of vtable
// entries, so that we can compute the offset for the selected
// trait.
vtable_base = nonmatching.map(|t| tcx.count_own_vtable_entries(t)).sum();
}
VtableObjectData {
upcast_trait_ref: upcast_trait_ref.unwrap(),
vtable_base,
nested,
}
}
fn confirm_fn_pointer_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
debug!("confirm_fn_pointer_candidate({:?})", obligation);
// OK to skip binder; it is reintroduced below
let self_ty = self.infcx
.shallow_resolve(*obligation.self_ty().skip_binder());
let sig = self_ty.fn_sig(self.tcx());
let trait_ref = self.tcx()
.closure_trait_ref_and_return_type(
obligation.predicate.def_id(),
self_ty,
sig,
util::TupleArgumentsFlag::Yes,
)
.map_bound(|(trait_ref, _)| trait_ref);
let Normalized {
value: trait_ref,
obligations,
} = project::normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&trait_ref,
);
self.confirm_poly_trait_refs(
obligation.cause.clone(),
obligation.param_env,
obligation.predicate.to_poly_trait_ref(),
trait_ref,
)?;
Ok(VtableFnPointerData {
fn_ty: self_ty,
nested: obligations,
})
}
fn confirm_trait_alias_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
alias_def_id: DefId,
) -> VtableTraitAliasData<'tcx, PredicateObligation<'tcx>> {
debug!(
"confirm_trait_alias_candidate({:?}, {:?})",
obligation, alias_def_id
);
self.in_snapshot(|this, snapshot| {
let (predicate, placeholder_map) = this.infcx()
.replace_bound_vars_with_placeholders(&obligation.predicate);
let trait_ref = predicate.trait_ref;
let trait_def_id = trait_ref.def_id;
let substs = trait_ref.substs;
let trait_obligations = this.impl_or_trait_obligations(
obligation.cause.clone(),
obligation.recursion_depth,
obligation.param_env,
trait_def_id,
&substs,
placeholder_map,
snapshot,
);
debug!(
"confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}",
trait_def_id, trait_obligations
);
VtableTraitAliasData {
alias_def_id,
substs: substs,
nested: trait_obligations,
}
})
}
fn confirm_generator_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
// OK to skip binder because the substs on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters
let self_ty = self.infcx
.shallow_resolve(obligation.self_ty().skip_binder());
let (generator_def_id, substs) = match self_ty.sty {
ty::Generator(id, substs, _) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation),
};
debug!(
"confirm_generator_candidate({:?},{:?},{:?})",
obligation, generator_def_id, substs
);
let trait_ref = self.generator_trait_ref_unnormalized(obligation, generator_def_id, substs);
let Normalized {
value: trait_ref,
mut obligations,
} = normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&trait_ref,
);
debug!(
"confirm_generator_candidate(generator_def_id={:?}, \
trait_ref={:?}, obligations={:?})",
generator_def_id, trait_ref, obligations
);
obligations.extend(self.confirm_poly_trait_refs(
obligation.cause.clone(),
obligation.param_env,
obligation.predicate.to_poly_trait_ref(),
trait_ref,
)?);
Ok(VtableGeneratorData {
generator_def_id: generator_def_id,
substs: substs.clone(),
nested: obligations,
})
}
fn confirm_closure_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
debug!("confirm_closure_candidate({:?})", obligation);
let kind = self.tcx()
.lang_items()
.fn_trait_kind(obligation.predicate.def_id())
.unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation));
// OK to skip binder because the substs on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters
let self_ty = self.infcx
.shallow_resolve(obligation.self_ty().skip_binder());
let (closure_def_id, substs) = match self_ty.sty {
ty::Closure(id, substs) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation),
};
let trait_ref = self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs);
let Normalized {
value: trait_ref,
mut obligations,
} = normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&trait_ref,
);
debug!(
"confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})",
closure_def_id, trait_ref, obligations
);
obligations.extend(self.confirm_poly_trait_refs(
obligation.cause.clone(),
obligation.param_env,
obligation.predicate.to_poly_trait_ref(),
trait_ref,
)?);
obligations.push(Obligation::new(
obligation.cause.clone(),
obligation.param_env,
ty::Predicate::ClosureKind(closure_def_id, substs, kind),
));
Ok(VtableClosureData {
closure_def_id,
substs: substs.clone(),
nested: obligations,
})
}
/// In the case of closure types and fn pointers,
/// we currently treat the input type parameters on the trait as
/// outputs. This means that when we have a match we have only
/// considered the self type, so we have to go back and make sure
/// to relate the argument types too. This is kind of wrong, but
/// since we control the full set of impls, also not that wrong,
/// and it DOES yield better error messages (since we don't report
/// errors as if there is no applicable impl, but rather report
/// errors are about mismatched argument types.
///
/// Here is an example. Imagine we have a closure expression
/// and we desugared it so that the type of the expression is
/// `Closure`, and `Closure` expects an int as argument. Then it
/// is "as if" the compiler generated this impl:
///
/// impl Fn(int) for Closure { ... }
///
/// Now imagine our obligation is `Fn(usize) for Closure`. So far
/// we have matched the self-type `Closure`. At this point we'll
/// compare the `int` to `usize` and generate an error.
///
/// Note that this checking occurs *after* the impl has selected,
/// because these output type parameters should not affect the
/// selection of the impl. Therefore, if there is a mismatch, we
/// report an error to the user.
fn confirm_poly_trait_refs(
&mut self,
obligation_cause: ObligationCause<'tcx>,
obligation_param_env: ty::ParamEnv<'tcx>,
obligation_trait_ref: ty::PolyTraitRef<'tcx>,
expected_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let obligation_trait_ref = obligation_trait_ref.clone();
self.infcx
.at(&obligation_cause, obligation_param_env)
.sup(obligation_trait_ref, expected_trait_ref)
.map(|InferOk { obligations, .. }| obligations)
.map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e))
}
fn confirm_builtin_unsize_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let tcx = self.tcx();
// assemble_candidates_for_unsizing should ensure there are no late bound
// regions here. See the comment there for more details.
let source = self.infcx
.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
let target = obligation
.predicate
.skip_binder()
.trait_ref
.substs
.type_at(1);
let target = self.infcx.shallow_resolve(target);
debug!(
"confirm_builtin_unsize_candidate(source={:?}, target={:?})",
source, target
);
let mut nested = vec![];
match (&source.sty, &target.sty) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
(&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
// See assemble_candidates_for_unsizing for more info.
let existential_predicates = data_a.map_bound(|data_a| {
let iter = iter::once(ty::ExistentialPredicate::Trait(data_a.principal()))
.chain(
data_a
.projection_bounds()
.map(|x| ty::ExistentialPredicate::Projection(x)),
)
.chain(
data_b
.auto_traits()
.map(ty::ExistentialPredicate::AutoTrait),
);
tcx.mk_existential_predicates(iter)
});
let new_trait = tcx.mk_dynamic(existential_predicates, r_b);
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(target, new_trait)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
// Register one obligation for 'a: 'b.
let cause = ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
ObjectCastObligation(target),
);
let outlives = ty::OutlivesPredicate(r_a, r_b);
nested.push(Obligation::with_depth(
cause,
obligation.recursion_depth + 1,
obligation.param_env,
ty::Binder::bind(outlives).to_predicate(),
));
}
// T -> Trait.
(_, &ty::Dynamic(ref data, r)) => {
let mut object_dids = data.auto_traits()
.chain(iter::once(data.principal().def_id()));
if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) {
return Err(TraitNotObjectSafe(did));
}
let cause = ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
ObjectCastObligation(target),
);
let predicate_to_obligation = |predicate| {
Obligation::with_depth(
cause.clone(),
obligation.recursion_depth + 1,
obligation.param_env,
predicate,
)
};
// Create obligations:
// - Casting T to Trait
// - For all the various builtin bounds attached to the object cast. (In other
// words, if the object type is Foo+Send, this would create an obligation for the
// Send check.)
// - Projection predicates
nested.extend(
data.iter()
.map(|d| predicate_to_obligation(d.with_self_ty(tcx, source))),
);
// We can only make objects from sized types.
let tr = ty::TraitRef {
def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem),
substs: tcx.mk_substs_trait(source, &[]),
};
nested.push(predicate_to_obligation(tr.to_predicate()));
// If the type is `Foo+'a`, ensures that the type
// being cast to `Foo+'a` outlives `'a`:
let outlives = ty::OutlivesPredicate(source, r);
nested.push(predicate_to_obligation(
ty::Binder::dummy(outlives).to_predicate(),
));
}
// [T; n] -> [T].
(&ty::Array(a, _), &ty::Slice(b)) => {
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(b, a)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
}
// Struct<T> -> Struct<U>.
(&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
let fields = def.all_fields()
.map(|f| tcx.type_of(f.did))
.collect::<Vec<_>>();
// The last field of the structure has to exist and contain type parameters.
let field = if let Some(&field) = fields.last() {
field
} else {
return Err(Unimplemented);
};
let mut ty_params = GrowableBitSet::new_empty();
let mut found = false;
for ty in field.walk() {
if let ty::Param(p) = ty.sty {
ty_params.insert(p.idx as usize);
found = true;
}
}
if !found {
return Err(Unimplemented);
}
// Replace type parameters used in unsizing with
// Error and ensure they do not affect any other fields.
// This could be checked after type collection for any struct
// with a potentially unsized trailing field.
let params = substs_a.iter().enumerate().map(|(i, &k)| {
if ty_params.contains(i) {
tcx.types.err.into()
} else {
k
}
});
let substs = tcx.mk_substs(params);
for &ty in fields.split_last().unwrap().1 {
if ty.subst(tcx, substs).references_error() {
return Err(Unimplemented);
}
}
// Extract Field<T> and Field<U> from Struct<T> and Struct<U>.
let inner_source = field.subst(tcx, substs_a);
let inner_target = field.subst(tcx, substs_b);
// Check that the source struct with the target's
// unsized parameters is equal to the target.
let params = substs_a.iter().enumerate().map(|(i, &k)| {
if ty_params.contains(i) {
substs_b.type_at(i).into()
} else {
k
}
});
let new_struct = tcx.mk_adt(def, tcx.mk_substs(params));
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(target, new_struct)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
// Construct the nested Field<T>: Unsize<Field<U>> predicate.
nested.push(tcx.predicate_for_trait_def(
obligation.param_env,
obligation.cause.clone(),
obligation.predicate.def_id(),
obligation.recursion_depth + 1,
inner_source,
&[inner_target.into()],
));
}
// (.., T) -> (.., U).
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
assert_eq!(tys_a.len(), tys_b.len());
// The last field of the tuple has to exist.
let (&a_last, a_mid) = if let Some(x) = tys_a.split_last() {
x
} else {
return Err(Unimplemented);
};
let &b_last = tys_b.last().unwrap();
// Check that the source tuple with the target's
// last element is equal to the target.
let new_tuple = tcx.mk_tup(a_mid.iter().cloned().chain(iter::once(b_last)));
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(target, new_tuple)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
// Construct the nested T: Unsize<U> predicate.
nested.push(tcx.predicate_for_trait_def(
obligation.param_env,
obligation.cause.clone(),
obligation.predicate.def_id(),
obligation.recursion_depth + 1,
a_last,
&[b_last.into()],
));
}
_ => bug!(),
};
Ok(VtableBuiltinData { nested })
}
///////////////////////////////////////////////////////////////////////////
// Matching
//
// Matching is a common path used for both evaluation and
// confirmation. It basically unifies types that appear in impls
// and traits. This does affect the surrounding environment;
// therefore, when used during evaluation, match routines must be
// run inside of a `probe()` so that their side-effects are
// contained.
fn rematch_impl(
&mut self,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> (
Normalized<'tcx, &'tcx Substs<'tcx>>,
infer::PlaceholderMap<'tcx>,
) {
match self.match_impl(impl_def_id, obligation, snapshot) {
Ok((substs, placeholder_map)) => (substs, placeholder_map),
Err(()) => {
bug!(
"Impl {:?} was matchable against {:?} but now is not",
impl_def_id,
obligation
);
}
}
}
fn match_impl(
&mut self,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> Result<
(
Normalized<'tcx, &'tcx Substs<'tcx>>,
infer::PlaceholderMap<'tcx>,
),
(),
> {
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
// Before we create the substitutions and everything, first
// consider a "quick reject". This avoids creating more types
// and so forth that we need to.
if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
return Err(());
}
let (skol_obligation, placeholder_map) = self.infcx()
.replace_bound_vars_with_placeholders(&obligation.predicate);
let skol_obligation_trait_ref = skol_obligation.trait_ref;
let impl_substs = self.infcx
.fresh_substs_for_item(obligation.cause.span, impl_def_id);
let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
let Normalized {
value: impl_trait_ref,
obligations: mut nested_obligations,
} = project::normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&impl_trait_ref,
);
debug!(
"match_impl(impl_def_id={:?}, obligation={:?}, \
impl_trait_ref={:?}, skol_obligation_trait_ref={:?})",
impl_def_id, obligation, impl_trait_ref, skol_obligation_trait_ref
);
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(skol_obligation_trait_ref, impl_trait_ref)
.map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
nested_obligations.extend(obligations);
if let Err(e) =
self.infcx
.leak_check(false, obligation.cause.span, &placeholder_map, snapshot)
{
debug!("match_impl: failed leak check due to `{}`", e);
return Err(());
}
debug!("match_impl: success impl_substs={:?}", impl_substs);
Ok((
Normalized {
value: impl_substs,
obligations: nested_obligations,
},
placeholder_map,
))
}
fn fast_reject_trait_refs(
&mut self,
obligation: &TraitObligation<'_>,
impl_trait_ref: &ty::TraitRef<'_>,
) -> bool {
// We can avoid creating type variables and doing the full
// substitution if we find that any of the input types, when
// simplified, do not match.
obligation
.predicate
.skip_binder()
.input_types()
.zip(impl_trait_ref.input_types())
.any(|(obligation_ty, impl_ty)| {
let simplified_obligation_ty =
fast_reject::simplify_type(self.tcx(), obligation_ty, true);
let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false);
simplified_obligation_ty.is_some()
&& simplified_impl_ty.is_some()
&& simplified_obligation_ty != simplified_impl_ty
})
}
/// Normalize `where_clause_trait_ref` and try to match it against
/// `obligation`. If successful, return any predicates that
/// result from the normalization. Normalization is necessary
/// because where-clauses are stored in the parameter environment
/// unnormalized.
fn match_where_clause_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
self.match_poly_trait_ref(obligation, where_clause_trait_ref)
}
/// Returns `Ok` if `poly_trait_ref` being true implies that the
/// obligation is satisfied.
fn match_poly_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
poly_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
debug!(
"match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
obligation, poly_trait_ref
);
self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
.map(|InferOk { obligations, .. }| obligations)
.map_err(|_| ())
}
///////////////////////////////////////////////////////////////////////////
// Miscellany
fn match_fresh_trait_refs(
&self,
previous: &ty::PolyTraitRef<'tcx>,
current: &ty::PolyTraitRef<'tcx>,
) -> bool {
let mut matcher = ty::_match::Match::new(self.tcx());
matcher.relate(previous, current).is_ok()
}
fn push_stack<'o, 's: 'o>(
&mut self,
previous_stack: TraitObligationStackList<'s, 'tcx>,
obligation: &'o TraitObligation<'tcx>,
) -> TraitObligationStack<'o, 'tcx> {
let fresh_trait_ref = obligation
.predicate
.to_poly_trait_ref()
.fold_with(&mut self.freshener);
TraitObligationStack {
obligation,
fresh_trait_ref,
previous: previous_stack,
}
}
fn closure_trait_ref_unnormalized(
&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
debug!(
"closure_trait_ref_unnormalized(obligation={:?}, closure_def_id={:?}, substs={:?})",
obligation, closure_def_id, substs,
);
let closure_type = self.infcx.closure_sig(closure_def_id, substs);
debug!(
"closure_trait_ref_unnormalized: closure_type = {:?}",
closure_type
);
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an unboxed closure type and hence is
// in fact unparameterized (or at least does not reference any
// regions bound in the obligation). Still probably some
// refactoring could make this nicer.
self.tcx()
.closure_trait_ref_and_return_type(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().self_ty(), // (1)
closure_type,
util::TupleArgumentsFlag::No,
)
.map_bound(|(trait_ref, _)| trait_ref)
}
fn generator_trait_ref_unnormalized(
&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
substs: ty::GeneratorSubsts<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
let gen_sig = substs.poly_sig(closure_def_id, self.tcx());
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an generator type and hence is
// in fact unparameterized (or at least does not reference any
// regions bound in the obligation). Still probably some
// refactoring could make this nicer.
self.tcx()
.generator_trait_ref_and_outputs(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().self_ty(), // (1)
gen_sig,
)
.map_bound(|(trait_ref, ..)| trait_ref)
}
/// Returns the obligations that are implied by instantiating an
/// impl or trait. The obligations are substituted and fully
/// normalized. This is used when confirming an impl or default
/// impl.
fn impl_or_trait_obligations(
&mut self,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId, // of impl or trait
substs: &Substs<'tcx>, // for impl or trait
placeholder_map: infer::PlaceholderMap<'tcx>,
snapshot: &infer::CombinedSnapshot<'cx, 'tcx>,
) -> Vec<PredicateObligation<'tcx>> {
debug!("impl_or_trait_obligations(def_id={:?})", def_id);
let tcx = self.tcx();
// To allow for one-pass evaluation of the nested obligation,
// each predicate must be preceded by the obligations required
// to normalize it.
// for example, if we have:
// impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy
// the impl will have the following predicates:
// <V as Iterator>::Item = U,
// U: Iterator, U: Sized,
// V: Iterator, V: Sized,
// <U as Iterator>::Item: Copy
// When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
// obligation will normalize to `<$0 as Iterator>::Item = $1` and
// `$1: Copy`, so we must ensure the obligations are emitted in
// that order.
let predicates = tcx.predicates_of(def_id);
assert_eq!(predicates.parent, None);
let mut predicates: Vec<_> = predicates
.predicates
.iter()
.flat_map(|(predicate, _)| {
let predicate = normalize_with_depth(
self,
param_env,
cause.clone(),
recursion_depth,
&predicate.subst(tcx, substs),
);
predicate.obligations.into_iter().chain(Some(Obligation {
cause: cause.clone(),
recursion_depth,
param_env,
predicate: predicate.value,
}))
})
.collect();
// We are performing deduplication here to avoid exponential blowups
// (#38528) from happening, but the real cause of the duplication is
// unknown. What we know is that the deduplication avoids exponential
// amount of predicates being propagated when processing deeply nested
// types.
//
// This code is hot enough that it's worth avoiding the allocation
// required for the FxHashSet when possible. Special-casing lengths 0,
// 1 and 2 covers roughly 75--80% of the cases.
if predicates.len() <= 1 {
// No possibility of duplicates.
} else if predicates.len() == 2 {
// Only two elements. Drop the second if they are equal.
if predicates[0] == predicates[1] {
predicates.truncate(1);
}
} else {
// Three or more elements. Use a general deduplication process.
let mut seen = FxHashSet::default();
predicates.retain(|i| seen.insert(i.clone()));
}
self.infcx()
.plug_leaks(placeholder_map, snapshot, predicates)
}
}
impl<'tcx> TraitObligation<'tcx> {
#[allow(unused_comparisons)]
pub fn derived_cause(
&self,
variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
) -> ObligationCause<'tcx> {
/*!
* Creates a cause for obligations that are derived from
* `obligation` by a recursive search (e.g., for a builtin
* bound, or eventually a `auto trait Foo`). If `obligation`
* is itself a derived obligation, this is just a clone, but
* otherwise we create a "derived obligation" cause so as to
* keep track of the original root obligation for error
* reporting.
*/
let obligation = self;
// NOTE(flaper87): As of now, it keeps track of the whole error
// chain. Ideally, we should have a way to configure this either
// by using -Z verbose or just a CLI argument.
if obligation.recursion_depth >= 0 {
let derived_cause = DerivedObligationCause {
parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
parent_code: Rc::new(obligation.cause.code.clone()),
};
let derived_code = variant(derived_cause);
ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
derived_code,
)
} else {
obligation.cause.clone()
}
}
}
impl<'tcx> SelectionCache<'tcx> {
/// Actually frees the underlying memory in contrast to what stdlib containers do on `clear`
pub fn clear(&self) {
*self.hashmap.borrow_mut() = Default::default();
}
}
impl<'tcx> EvaluationCache<'tcx> {
/// Actually frees the underlying memory in contrast to what stdlib containers do on `clear`
pub fn clear(&self) {
*self.hashmap.borrow_mut() = Default::default();
}
}
impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList::with(self)
}
fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
self.list()
}
}
#[derive(Copy, Clone)]
struct TraitObligationStackList<'o, 'tcx: 'o> {
head: Option<&'o TraitObligationStack<'o, 'tcx>>,
}
impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
fn empty() -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList { head: None }
}
fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList { head: Some(r) }
}
}
impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
type Item = &'o TraitObligationStack<'o, 'tcx>;
fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
match self.head {
Some(o) => {
*self = o.previous;
Some(o)
}
None => None,
}
}
}
impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "TraitObligationStack({:?})", self.obligation)
}
}
#[derive(Clone, Eq, PartialEq)]
pub struct WithDepNode<T> {
dep_node: DepNodeIndex,
cached_value: T,
}
impl<T: Clone> WithDepNode<T> {
pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self {
WithDepNode {
dep_node,
cached_value,
}
}
pub fn get(&self, tcx: TyCtxt<'_, '_, '_>) -> T {
tcx.dep_graph.read_index(self.dep_node);
self.cached_value.clone()
}
}
| 39.239869 | 100 | 0.543778 |
75bf12ef8c9c3bbc6bf7ee4da2456b76dbc3cb4f | 3,426 | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use std::env;
use std::sync::Once;
use opentelemetry::global;
use opentelemetry::sdk::propagation::TraceContextPropagator;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_appender::rolling::RollingFileAppender;
use tracing_appender::rolling::Rotation;
use tracing_bunyan_formatter::BunyanFormattingLayer;
use tracing_bunyan_formatter::JsonStorageLayer;
use tracing_subscriber::fmt;
use tracing_subscriber::fmt::Layer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::registry::Registry;
use tracing_subscriber::EnvFilter;
/// Write logs to stdout.
pub fn init_default_tracing() {
static START: Once = Once::new();
START.call_once(|| {
init_tracing_stdout();
});
}
/// Init logging and tracing.
///
/// To enable reporting tracing data to jaeger, set env var `FUSE_JAEGER` to non-empty value.
/// A local tracing collection(maybe for testing) can be done with a local jaeger server.
/// To report tracing data and view it:
/// docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 jaegertracing/all-in-one:latest
/// FUSE_JAEGER=on RUST_LOG=trace cargo test
/// open http://localhost:16686/
///
/// To adjust batch sending delay, use `OTEL_BSP_SCHEDULE_DELAY`:
/// FUSE_JAEGER=on RUST_LOG=trace OTEL_BSP_SCHEDULE_DELAY=1 cargo test
///
// TODO(xp): use FUSE_JAEGER to assign jaeger server address.
fn init_tracing_stdout() {
let fmt_layer = fmt::Layer::default()
.with_thread_ids(true)
.with_thread_names(true)
.pretty()
.with_ansi(true)
.with_span_events(fmt::format::FmtSpan::FULL);
let fuse_jaeger = env::var("FUSE_JAEGER").unwrap_or_else(|_| "".to_string());
let ot_layer = if !fuse_jaeger.is_empty() {
global::set_text_map_propagator(TraceContextPropagator::new());
let tracer = opentelemetry_jaeger::new_pipeline()
.with_service_name("fuse-store")
.install_batch(opentelemetry::runtime::Tokio)
.expect("install");
let ot_layer = tracing_opentelemetry::layer().with_tracer(tracer);
Some(ot_layer)
} else {
None
};
let subscriber = Registry::default()
.with(EnvFilter::from_default_env())
.with(fmt_layer)
.with(ot_layer);
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
}
/// Write logs to file and rotation by HOUR.
pub fn init_tracing_with_file(app_name: &str, dir: &str, level: &str) -> Vec<WorkerGuard> {
let mut guards = vec![];
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
let stdout_logging_layer = Layer::new().with_writer(stdout_writer);
guards.push(stdout_guard);
let file_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
let (file_writer, file_guard) = tracing_appender::non_blocking(file_appender);
let file_logging_layer = BunyanFormattingLayer::new(app_name.to_string(), file_writer);
guards.push(file_guard);
let subscriber = Registry::default()
.with(EnvFilter::new(level))
.with(stdout_logging_layer)
.with(JsonStorageLayer)
.with(file_logging_layer);
tracing::subscriber::set_global_default(subscriber)
.expect("error setting global tracing subscriber");
guards
}
| 34.959184 | 97 | 0.706947 |
e435bc483b8b814684154b2970fb629b34e0fcd7 | 7,621 | use sysinfo::{System, SystemExt, ProcessorExt, DiskExt, UserExt, PidExt, ProcessExt, DiskUsage, RefreshKind};
//use sysinfo::{NetworkExt, NetworksExt};
use default_net::Interface as NetworkInterface;
use std::collections::HashMap;
#[derive(Clone, Debug)]
pub struct CpuInfo {
pub vendor_id: String,
pub brand: String,
pub frequency: u64,
pub cpu_usage: f32,
pub physical_core_count: usize,
pub logical_processor_count: usize,
}
#[derive(Clone, Debug)]
pub struct ProcessorStat {
pub name: String,
pub vendor_id: String,
pub brand: String,
pub frequency: u64,
pub cpu_usage: f32,
}
#[derive(Clone, Debug)]
pub struct MemoryInfo {
pub total_memory: u64,
pub free_memory: u64,
pub available_memory: u64,
pub used_memory: u64,
pub total_swap: u64,
pub free_swap: u64,
pub used_swap: u64,
}
#[derive(Clone, Debug)]
pub struct Diskinfo {
pub name: String,
pub file_system: String,
pub mount_point: String,
pub total_space: u64,
pub available_space: u64,
pub is_removable: bool,
}
#[derive(Clone, Debug)]
pub struct NetworkUsageInfo {
pub interface_name: String,
pub received: u64,
pub total_received: u64,
pub transmitted: u64,
pub total_transmitted: u64,
pub packets_received: u64,
pub total_packets_received: u64,
pub packets_transmitted: u64,
pub total_packets_transmitted: u64,
pub errors_on_received: u64,
pub total_errors_on_received: u64,
pub errors_on_transmitted: u64,
pub total_errors_on_transmitted: u64,
}
#[derive(Clone, Debug)]
pub struct NetworkInterfaceInfo {
pub index: u32,
pub name: String,
pub mac: String,
pub ipv4_addr: Vec<String>,
pub ipv6_addr: Vec<String>,
pub gateway_ip: String,
pub gateway_mac: String,
}
#[derive(Clone, Debug)]
pub struct LoadAverage {
pub one: f64,
pub five: f64,
pub fifteen: f64,
}
#[derive(Clone, Debug)]
pub struct OsInfo {
pub name: String,
pub kernel_version: String,
pub os_version: String,
pub long_os_version: String,
pub host_name: String,
pub uptime: u64,
pub boot_time: u64,
pub load_average: LoadAverage,
}
#[derive(Clone, Debug)]
pub struct UserInfo {
pub user_id: String,
pub user_name: String,
pub group_id: String,
pub groups: Vec<String>,
}
#[derive(Clone, Debug)]
pub struct SystemOverview {
pub cpu: CpuInfo,
pub memory: MemoryInfo,
pub disks: Vec<Diskinfo>,
pub network_interfaces: Vec<NetworkInterface>,
pub os: OsInfo,
pub users: Vec<UserInfo>,
}
#[derive(Clone, Debug)]
pub struct ProcessInfo {
pub pid: u32,
pub name: String,
pub cmd: String,
pub full_path: String,
pub environ: String,
pub cw_dir: String,
pub root_dir: String,
pub memory: u64,
pub virtual_memory: u64,
pub parent: u32,
pub status: String,
pub start_time: u64,
pub cpu_usage: f32,
pub disk_usage: DiskUsage,
}
pub fn get_system_overview() -> SystemOverview {
let sys_cpu = System::new_with_specifics(RefreshKind::new().with_cpu());
// CPU
let global_cpu = sys_cpu.global_processor_info();
let processors = sys_cpu.processors();
let cpu_info = CpuInfo {
vendor_id: global_cpu.vendor_id().to_string(),
brand: global_cpu.brand().to_string(),
frequency: {
if processors.len() > 0 {
processors[0].frequency()
}else{
global_cpu.frequency()
}
},
cpu_usage: global_cpu.cpu_usage(),
physical_core_count: {
match sys_cpu.physical_core_count() {
Some(cnt) => cnt,
None => processors.len() / 2,
}
},
logical_processor_count: processors.len(),
};
let sys_ram = System::new_with_specifics(RefreshKind::new().with_memory());
// Memory
let mem_info = MemoryInfo {
total_memory: sys_ram.total_memory(),
free_memory: sys_ram.free_memory(),
available_memory: sys_ram.available_memory(),
used_memory: sys_ram.used_memory(),
total_swap: sys_ram.total_swap(),
free_swap: sys_ram.free_swap(),
used_swap: sys_ram.used_swap(),
};
let mut sys = System::new_all();
sys.refresh_all();
// Disk
let mut disks: Vec<Diskinfo> = vec![];
for disk in sys.disks() {
let disk_info = Diskinfo {
name: disk.name().to_str().unwrap_or("").to_string(),
file_system: String::from_utf8(disk.file_system().to_vec()).unwrap_or(String::new()),
mount_point: disk.mount_point().to_str().unwrap_or("").to_string(),
total_space: disk.total_space(),
available_space: disk.available_space(),
is_removable: disk.is_removable(),
};
disks.push(disk_info);
}
// Network
let interfaces: Vec<NetworkInterface> = default_net::get_interfaces();
// OS
let load_avg = sys.load_average();
let load_average = LoadAverage {
one: load_avg.one,
five: load_avg.five,
fifteen: load_avg.fifteen,
};
let os_info = OsInfo {
name: sys.name().unwrap_or(String::new()),
kernel_version: sys.kernel_version().unwrap_or(String::new()),
os_version: sys.os_version().unwrap_or(String::new()),
long_os_version: sys.long_os_version().unwrap_or(String::new()),
host_name: sys.host_name().unwrap_or(String::new()),
uptime: sys.uptime(),
boot_time: sys.boot_time(),
load_average: load_average,
};
// User
let mut user_list: Vec<UserInfo> = vec![];
for user in sys.users() {
let user_info = UserInfo {
user_id: user.uid().to_string(),
user_name: user.name().to_string(),
group_id: user.gid().to_string(),
groups: user.groups().to_vec(),
};
user_list.push(user_info);
}
// Overview
let system_overview = SystemOverview {
cpu: cpu_info,
memory: mem_info,
disks: disks,
network_interfaces: interfaces,
os: os_info,
users: user_list,
};
return system_overview;
}
pub fn get_process_map() -> HashMap<u32, ProcessInfo> {
let mut process_map: HashMap<u32, ProcessInfo> = HashMap::new();
let mut sys = System::new_all();
sys.refresh_all();
for (pid, process) in sys.processes() {
let mut cmd: String = String::new();
for ele in process.cmd().to_vec() {
cmd = format!("{} {}",cmd,ele);
}
let mut env: String = String::new();
for ele in process.environ() {
env = format!("{} {}",env,ele);
}
let process_info = ProcessInfo {
pid: pid.as_u32(),
name: process.name().to_string(),
cmd: cmd,
full_path: process.exe().to_str().unwrap_or("").to_string(),
environ: env,
cw_dir: process.cwd().to_str().unwrap_or("").to_string(),
root_dir: process.root().to_str().unwrap_or("").to_string(),
memory: process.memory(),
virtual_memory: process.virtual_memory(),
parent: match process.parent() {
Some(p) => p.as_u32(),
None => 0,
},
status: process.status().to_string(),
start_time: process.start_time(),
cpu_usage: process.cpu_usage(),
disk_usage: process.disk_usage(),
};
process_map.insert(pid.as_u32(), process_info);
}
return process_map;
}
| 28.977186 | 109 | 0.611862 |
e8cdd4004065461d5050c6d944a1e1ff98c220c4 | 8,847 | use std::collections::VecDeque;
use std::fs::File;
use std::io::prelude::*;
use std::iter::FromIterator;
const TERMINATE: isize = 99;
fn extract_modes(instruction: isize) -> (u8, u8, u8, u8) {
let op = instruction % 100;
let first = (instruction % 1000) / 100;
let second = (instruction % 10000) / 1000;
let third = (instruction % 100000) / 10000;
(op as u8, first as u8, second as u8, third as u8)
}
fn get_val(mode: u8, pointer: usize, memory: Vec<isize>) -> isize {
match mode {
0 => memory[memory[pointer] as usize],
1 => memory[pointer] as isize,
_ => panic!("bad mode {} @ {}", mode, pointer),
}
}
fn permutate(
input: &mut Vec<isize>,
l: usize,
r: usize,
output: &mut Vec<VecDeque<isize>>,
) -> Vec<VecDeque<isize>> {
if l == r {
output.push(VecDeque::from_iter(input.clone().into_iter()));
} else {
for i in l..r + 1 {
let i = i as usize;
let _b = input[i];
input[i] = input[l];
input[l] = _b;
permutate(input, l + 1, r, output);
let _b = input[i];
input[i] = input[l];
input[l] = _b;
}
}
return vec![VecDeque::new()];
}
fn spawn_processes(
count: isize,
code: Vec<isize>,
mut inputs: VecDeque<isize>,
) -> Vec<ProcessState> {
let mut process_states = vec![];
for i in 0..count {
let mut fresh_state = ProcessState {
memory: code.clone(),
input: VecDeque::new(),
iptr: 0,
instruction: 0,
output: 0,
};
fresh_state.input.push_back(inputs.pop_front().unwrap());
process_states.push(fresh_state);
}
process_states
}
struct ProcessState {
memory: Vec<isize>,
input: VecDeque<isize>,
iptr: usize,
instruction: isize,
output: isize,
}
impl ProcessState {
fn run_to_interrupt(&mut self) {
while self.memory[self.iptr] != TERMINATE {
let instruction = self.memory[self.iptr];
let (op, mode_1, mode_2, mode_3) = extract_modes(instruction);
//println!("running {} @ {}", instruction, self.iptr);
match op {
1 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
let b = get_val(mode_2, self.iptr + 2, self.memory.to_vec());
let out = get_val(1, self.iptr + 3, self.memory.to_vec());
self.memory[out as usize] = a + b;
//println!("1@{} {}+{}= {}=>{}", self.iptr, a, b, a + b, out);
self.iptr += 4;
}
2 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
let b = get_val(mode_2, self.iptr + 2, self.memory.to_vec());
let out = get_val(1, self.iptr + 3, self.memory.to_vec());
self.memory[out as usize] = a * b;
//println!("2@{} {}*{}={}=>@{}", self.iptr, a, b, a * b, out);
self.iptr += 4;
}
3 => {
let out = get_val(1, self.iptr + 1, self.memory.to_vec());
self.memory[out as usize] = self.input.pop_front().unwrap();
println!(
"3 @ {} self.input> {} into {}",
self.iptr, self.memory[out as usize], out
);
self.iptr += 2;
}
4 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
println!("4 @ {} output> {}", self.iptr, a);
self.output = a;
self.iptr += 2;
// interrupt out of here
break;
}
5 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
if a != 0 {
let new_iptr =
get_val(mode_2, self.iptr + 2, self.memory.to_vec()) as usize;
//println!("5@{} -> jumping to {}", self.iptr, new_self.iptr);
self.iptr = new_iptr
} else {
//println!("5@{} -> not jumping", self.iptr);
self.iptr += 3;
}
}
6 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
if a == 0 {
let new_iptr =
get_val(mode_2, self.iptr + 2, self.memory.to_vec()) as usize;
//println!("6@{} -> jumping to {}", self.iptr, new_self.iptr);
self.iptr = new_iptr;
} else {
self.iptr += 3;
//println!("6@{} -> not jumping", self.iptr);
}
}
7 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
let b = get_val(mode_2, self.iptr + 2, self.memory.to_vec());
let out = get_val(1, self.iptr + 3, self.memory.to_vec());
//println!("7@{} {}<{} -> {}", self.iptr, a, b, out);
if a < b {
self.memory[out as usize] = 1;
} else {
self.memory[out as usize] = 0;
}
self.iptr += 4;
}
8 => {
let a = get_val(mode_1, self.iptr + 1, self.memory.to_vec());
let b = get_val(mode_2, self.iptr + 2, self.memory.to_vec());
let out = get_val(1, self.iptr + 3, self.memory.to_vec());
//println!("8@{} {}=={} -> {}", self.iptr, a, b, out);
if a == b {
self.memory[out as usize] = 1;
} else {
self.memory[out as usize] = 0;
}
self.iptr += 4;
}
_ => {
panic!("illegal instruction {} @ {}", instruction, self.iptr);
}
}
}
self.instruction = self.memory[self.iptr];
}
}
fn main() -> std::io::Result<()> {
let mut file = File::open("input.txt")?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let tokens: Vec<isize> = contents
.trim()
.split(',')
.map(|c| c.parse::<isize>().unwrap())
.collect();
// part 1
/*
let permutations = {
let mut output = vec![];
permutate(&mut vec![0, 1, 2, 3, 4], 0, 4, &mut output);
output
};
let mut best_score = 0;
for phase_state in permutations.iter() {
let mut prev_output = 0;
println!("permutation {:?}", phase_state);
let mut phase_state = phase_state.clone();
for _ in 0..5 {
let mut input = VecDeque::new();
input.push_back(phase_state.pop_front().unwrap());
input.push_back(prev_output);
prev_output = run_machine(tokens.clone(), input.clone());
}
if prev_output > best_score {
best_score = prev_output;
}
}
println!("p1 result: {}", best_score);
*/
// part 2
let phase_states = {
let mut output = vec![];
permutate(&mut vec![5, 6, 7, 8, 9], 0, 4, &mut output);
output
};
// initialize software states
let process_count: usize = 5;
let mut best_score = 0;
let mut terminated = false;
for phase_state in phase_states.iter() {
let mut processes =
spawn_processes(process_count as isize, tokens.clone(), phase_state.clone());
let mut phase_state = &phase_state.clone();
println!("testing phase state {:?}", phase_state);
let mut prev_output = 0;
let mut i = 0;
let mut terminations = 0;
loop {
println!("engine {}", i);
let process_state = &mut processes[i];
process_state.input.push_back(prev_output);
process_state.run_to_interrupt();
if process_state.instruction == TERMINATE {
terminations += 1;
println!("TERMINATE {}", terminations);
}
prev_output = process_state.output;
if terminations == process_count {
break;
}
i = (i + 1) % process_count;
}
if prev_output > best_score {
best_score = prev_output;
}
}
println!("p2 result: {}", best_score);
Ok(())
}
| 34.158301 | 90 | 0.450661 |
f5a956cf876538e694f60a8a7e5d0df439264465 | 1,123 | // This file is auto generated by [`cg`] from [`schema`].
//
// **DO NOT EDIT THIS FILE**,
//
// Edit `cg` or `schema` instead.
//
// [cg]: https://github.com/teloxide/cg
// [`schema`]: https://github.com/WaffleLapkin/tg-methods-schema
use serde::Serialize;
use crate::types::{ChatId, InlineKeyboardMarkup, Poll};
impl_payload! {
/// Use this method to stop a poll which was sent by the bot. On success, the stopped Poll with the final results is returned.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize)]
pub StopPoll (StopPollSetters) => Poll {
required {
/// Unique identifier for the target chat or username of the target channel (in the format `@channelusername`).
pub chat_id: ChatId [into],
/// Identifier of the message to edit
pub message_id: i32,
}
optional {
/// A JSON-serialized object for an [inline keyboard].
///
/// [inline keyboard]: https://core.telegram.org/bots#inline-keyboards-and-on-the-fly-updating
pub reply_markup: InlineKeyboardMarkup,
}
}
}
| 36.225806 | 130 | 0.62244 |
1dc9d91040868988b49dfe4e900f75fcde30b000 | 12,523 | use std::convert::TryFrom;
use std::error::Error;
use std::fmt;
use std::str::FromStr;
use crate::regexp;
/// NAME_TOTAL_LENGTH_MAX is the maximum total number of characters in a repository name.
const NAME_TOTAL_LENGTH_MAX: usize = 255;
/// Reasons that parsing a string as a Reference can fail.
#[derive(Debug, PartialEq, Eq)]
pub enum ParseError {
/// Invalid checksum digest format
DigestInvalidFormat,
/// Invalid checksum digest length
DigestInvalidLength,
/// Unsupported digest algorithm
DigestUnsupported,
/// Repository name must be lowercase
NameContainsUppercase,
/// Repository name must have at least one component
NameEmpty,
/// Repository name must not be more than NAME_TOTAL_LENGTH_MAX characters
NameTooLong,
/// Invalid reference format
ReferenceInvalidFormat,
/// Invalid tag format
TagInvalidFormat,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseError::DigestInvalidFormat => write!(f, "invalid checksum digest format"),
ParseError::DigestInvalidLength => write!(f, "invalid checksum digest length"),
ParseError::DigestUnsupported => write!(f, "unsupported digest algorithm"),
ParseError::NameContainsUppercase => write!(f, "repository name must be lowercase"),
ParseError::NameEmpty => write!(f, "repository name must have at least one component"),
ParseError::NameTooLong => write!(
f,
"repository name must not be more than {} characters",
NAME_TOTAL_LENGTH_MAX
),
ParseError::ReferenceInvalidFormat => write!(f, "invalid reference format"),
ParseError::TagInvalidFormat => write!(f, "invalid tag format"),
}
}
}
impl Error for ParseError {}
/// Reference provides a general type to represent any way of referencing images within an OCI registry.
///
/// # Examples
///
/// Parsing a tagged image reference:
///
/// ```
/// use oci_distribution::Reference;
///
/// let reference: Reference = "docker.io/library/hello-world:latest".parse().unwrap();
///
/// assert_eq!("docker.io/library/hello-world:latest", reference.whole().as_str());
/// assert_eq!("docker.io", reference.registry());
/// assert_eq!("library/hello-world", reference.repository());
/// assert_eq!(Some("latest"), reference.tag());
/// assert_eq!(None, reference.digest());
/// ```
#[derive(Clone, Hash, PartialEq, Eq)]
pub struct Reference {
registry: String,
repository: String,
tag: Option<String>,
digest: Option<String>,
}
impl Reference {
/// Resolve the registry address of a given `Reference`.
///
/// Some registries, such as docker.io, uses a different address for the actual
/// registry. This function implements such redirection.
pub fn resolve_registry(&self) -> &str {
let registry = self.registry();
match registry {
"docker.io" => "registry-1.docker.io".into(),
_ => registry.into(),
}
}
/// registry returns the name of the registry.
pub fn registry(&self) -> &str {
&self.registry
}
/// repository returns the name of the repository.
pub fn repository(&self) -> &str {
&self.repository
}
/// tag returns the object's tag, if present.
pub fn tag(&self) -> Option<&str> {
self.tag.as_deref()
}
/// digest returns the object's digest, if present.
pub fn digest(&self) -> Option<&str> {
self.digest.as_deref()
}
/// full_name returns the full repository name and path.
fn full_name(&self) -> String {
if self.registry() == "" {
self.repository().to_string()
} else {
format!("{}/{}", self.registry(), self.repository())
}
}
/// whole returns the whole reference.
pub fn whole(&self) -> String {
let mut s = self.full_name();
if let Some(t) = self.tag() {
if !s.is_empty() {
s.push(':');
}
s.push_str(t);
}
if let Some(d) = self.digest() {
if !s.is_empty() {
s.push('@');
}
s.push_str(d);
}
s
}
}
impl std::fmt::Debug for Reference {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.whole())
}
}
impl fmt::Display for Reference {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.whole())
}
}
impl FromStr for Reference {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Reference::try_from(s)
}
}
impl TryFrom<String> for Reference {
type Error = ParseError;
fn try_from(s: String) -> Result<Self, Self::Error> {
if s.is_empty() {
return Err(ParseError::NameEmpty);
}
lazy_static! {
static ref RE: regex::Regex = regexp::must_compile(regexp::REFERENCE_REGEXP);
};
let captures;
match RE.captures(&s) {
Some(caps) => captures = caps,
None => {
return Err(ParseError::ReferenceInvalidFormat);
}
}
let name = &captures[1];
let tag = captures.get(2).map(|m| m.as_str().to_owned());
let digest = captures.get(3).map(|m| m.as_str().to_owned());
let (registry, repository) = split_domain(name);
let reference = Reference {
registry,
repository,
tag,
digest,
};
if reference.repository().len() > NAME_TOTAL_LENGTH_MAX {
return Err(ParseError::NameTooLong);
}
// Digests much always be hex-encoded, ensuring that their hex portion will always be
// size*2
if reference.digest().is_some() {
let d = reference.digest().unwrap();
// FIXME: we should actually separate the algorithm from the digest
// using regular expressions. This won't hold up if we support an
// algorithm more or less than 6 characters like sha1024.
if d.len() < 8 {
return Err(ParseError::DigestInvalidFormat);
}
let algo = &d[0..6];
let digest = &d[7..];
match algo {
"sha256" => {
if digest.len() != 64 {
return Err(ParseError::DigestInvalidLength);
}
}
"sha384" => {
if digest.len() != 96 {
return Err(ParseError::DigestInvalidLength);
}
}
"sha512" => {
if digest.len() != 128 {
return Err(ParseError::DigestInvalidLength);
}
}
_ => return Err(ParseError::DigestUnsupported),
}
}
Ok(reference)
}
}
impl TryFrom<&str> for Reference {
type Error = ParseError;
fn try_from(string: &str) -> Result<Self, Self::Error> {
TryFrom::try_from(string.to_owned())
}
}
impl From<Reference> for String {
fn from(reference: Reference) -> Self {
reference.whole()
}
}
fn split_domain(name: &str) -> (String, String) {
lazy_static! {
static ref RE: regex::Regex = regexp::must_compile(regexp::ANCHORED_NAME_REGEXP);
};
let captures;
match RE.captures(name) {
Some(caps) => captures = caps,
None => {
return ("".to_owned(), name.to_owned());
}
}
if let Some(repository) = captures.get(2).map(|m| m.as_str().to_owned()) {
let registry = captures
.get(1)
.map_or("".to_owned(), |m| m.as_str().to_owned());
return (registry, repository);
}
("".to_owned(), name.to_owned())
}
#[cfg(test)]
mod test {
use super::*;
mod parse {
use super::*;
use rstest::rstest;
#[rstest(input, registry, repository, tag, digest,
case("test_com", "", "test_com", None, None),
case("test.com:tag", "", "test.com", Some("tag"), None),
case("test.com:5000", "", "test.com", Some("5000"), None),
case("test.com/repo:tag", "test.com", "repo", Some("tag"), None),
case("test:5000/repo", "test:5000", "repo", None, None),
case("test:5000/repo:tag", "test:5000", "repo", Some("tag"), None),
case("test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "test:5000", "repo", None, Some("sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")),
case("test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "test:5000", "repo", Some("tag"), Some("sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")),
case("lowercase:Uppercase", "", "lowercase", Some("Uppercase"), None),
case("sub-dom1.foo.com/bar/baz/quux", "sub-dom1.foo.com", "bar/baz/quux", None, None),
case("sub-dom1.foo.com/bar/baz/quux:some-long-tag", "sub-dom1.foo.com", "bar/baz/quux", Some("some-long-tag"), None),
case("b.gcr.io/test.example.com/my-app:test.example.com", "b.gcr.io", "test.example.com/my-app", Some("test.example.com"), None),
// ☃.com in punycode
case("xn--n3h.com/myimage:xn--n3h.com", "xn--n3h.com", "myimage", Some("xn--n3h.com"), None),
// 🐳.com in punycode
case("xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "xn--7o8h.com", "myimage", Some("xn--7o8h.com"), Some("sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")),
case("foo_bar.com:8080", "", "foo_bar.com", Some("8080"), None),
case("foo/foo_bar.com:8080", "foo", "foo_bar.com", Some("8080"), None),
)]
fn parse_good_reference(
input: &str,
registry: &str,
repository: &str,
tag: Option<&str>,
digest: Option<&str>,
) {
let reference = Reference::try_from(input).expect("could not parse reference");
assert_eq!(registry, reference.registry());
assert_eq!(repository, reference.repository());
assert_eq!(tag, reference.tag());
assert_eq!(digest, reference.digest());
assert_eq!(input, reference.whole());
}
#[rstest(input, err,
case("", ParseError::NameEmpty),
case(":justtag", ParseError::ReferenceInvalidFormat),
case("@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ParseError::ReferenceInvalidFormat),
case("repo@sha256:ffffffffffffffffffffffffffffffffff", ParseError::DigestInvalidLength),
case("validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ParseError::DigestUnsupported),
// FIXME: should really pass a ParseError::NameContainsUppercase, but "invalid format" is good enough for now.
case("Uppercase:tag", ParseError::ReferenceInvalidFormat),
// FIXME: "Uppercase" is incorrectly handled as a domain-name here, and therefore passes.
// https://github.com/docker/distribution/blob/master/reference/reference_test.go#L104-L109
// case("Uppercase/lowercase:tag", ParseError::NameContainsUppercase),
// FIXME: should really pass a ParseError::NameContainsUppercase, but "invalid format" is good enough for now.
case("test:5000/Uppercase/lowercase:tag", ParseError::ReferenceInvalidFormat),
case("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", ParseError::NameTooLong),
case("aa/asdf$$^/aa", ParseError::ReferenceInvalidFormat)
)]
fn parse_bad_reference(input: &str, err: ParseError) {
assert_eq!(Reference::try_from(input).unwrap_err(), err)
}
}
}
| 39.012461 | 384 | 0.599377 |
7994f348c7c3663532cb7ec98752cdde6d4e7cb8 | 3,242 | use crate::common::Message;
use arrayvec::ArrayVec;
use bytes::{Bytes, BytesMut};
use derive_more::{Deref, DerefMut};
use ethnum::U256;
use getset::{Getters, MutGetters};
use serde::Serialize;
pub const STACK_SIZE: usize = 1024;
/// EVM stack.
#[derive(Clone, Debug, Default, Serialize)]
pub struct Stack(pub ArrayVec<U256, STACK_SIZE>);
impl Stack {
#[inline(always)]
pub const fn new() -> Self {
Self(ArrayVec::new_const())
}
#[inline(always)]
fn get_pos(&self, pos: usize) -> usize {
self.len() - 1 - pos
}
#[inline(always)]
pub fn get(&self, pos: usize) -> &U256 {
&self.0[self.get_pos(pos)]
}
#[inline(always)]
pub fn get_mut(&mut self, pos: usize) -> &mut U256 {
let pos = self.get_pos(pos);
&mut self.0[pos]
}
#[inline(always)]
pub fn len(&self) -> usize {
self.0.len()
}
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline(always)]
pub fn push(&mut self, v: U256) {
unsafe { self.0.push_unchecked(v) }
}
#[inline(always)]
pub fn pop(&mut self) -> U256 {
self.0.pop().expect("underflow")
}
#[inline(always)]
pub fn swap_top(&mut self, pos: usize) {
let top = self.0.len() - 1;
let pos = self.get_pos(pos);
self.0.swap(top, pos);
}
}
const PAGE_SIZE: usize = 4 * 1024;
#[derive(Clone, Debug, Deref, DerefMut)]
pub struct Memory(BytesMut);
impl Memory {
#[inline(always)]
pub fn new() -> Self {
Self(BytesMut::with_capacity(PAGE_SIZE))
}
#[inline(always)]
pub fn grow(&mut self, size: usize) {
let cap = self.0.capacity();
if size > cap {
let additional_pages = ((size - cap) + PAGE_SIZE - 1) / PAGE_SIZE;
self.0.reserve(PAGE_SIZE * additional_pages);
}
self.0.resize(size, 0);
}
}
impl Default for Memory {
fn default() -> Self {
Self::new()
}
}
/// EVM execution state.
#[derive(Clone, Debug, Getters, MutGetters)]
pub struct ExecutionState {
#[getset(get = "pub", get_mut = "pub")]
pub(crate) gas_left: i64,
#[getset(get = "pub", get_mut = "pub")]
pub(crate) stack: Stack,
#[getset(get = "pub", get_mut = "pub")]
pub(crate) memory: Memory,
pub(crate) message: Message,
#[getset(get = "pub", get_mut = "pub")]
pub(crate) return_data: Bytes,
pub(crate) output_data: Bytes,
}
impl ExecutionState {
pub fn new(message: Message) -> Self {
Self {
gas_left: message.gas,
stack: Stack::default(),
memory: Memory::new(),
message,
return_data: Default::default(),
output_data: Bytes::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn stack() {
let mut stack = Stack::default();
let items: [u128; 4] = [0xde, 0xad, 0xbe, 0xef];
for (i, item) in items.iter().copied().enumerate() {
stack.push(item.into());
assert_eq!(stack.len(), i + 1);
}
assert_eq!(*stack.get(2), 0xad);
assert_eq!(stack.pop(), 0xef);
assert_eq!(*stack.get(2), 0xde);
}
}
| 22.671329 | 78 | 0.54781 |
fe53879bf7616fd62e00f3d14cdefa267ee3ffdc | 10,260 | use std::{convert::TryInto, error::Error, fmt, str::FromStr};
use bech32::{self, FromBase32, ToBase32, Variant};
use crate::kind::unified::Encoding;
use crate::{kind::*, AddressKind, Network, ZcashAddress};
/// An error while attempting to parse a string as a Zcash address.
#[derive(Debug, PartialEq)]
pub enum ParseError {
/// The string is an invalid encoding.
InvalidEncoding,
/// The string is not a Zcash address.
NotZcash,
/// Errors specific to unified addresses.
Unified(unified::ParseError),
}
impl From<unified::ParseError> for ParseError {
fn from(e: unified::ParseError) -> Self {
match e {
unified::ParseError::InvalidEncoding(_) => Self::InvalidEncoding,
unified::ParseError::UnknownPrefix(_) => Self::NotZcash,
_ => Self::Unified(e),
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseError::InvalidEncoding => write!(f, "Invalid encoding"),
ParseError::NotZcash => write!(f, "Not a Zcash address"),
ParseError::Unified(e) => e.fmt(f),
}
}
}
impl Error for ParseError {}
impl FromStr for ZcashAddress {
type Err = ParseError;
/// Attempts to parse the given string as a Zcash address.
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Remove leading and trailing whitespace, to handle copy-paste errors.
let s = s.trim();
// Try decoding as a unified address
match unified::Address::decode(s) {
Ok((net, data)) => {
return Ok(ZcashAddress {
net,
kind: AddressKind::Unified(data),
});
}
Err(unified::ParseError::NotUnified) => {
// allow decoding to fall through to Sapling/Transparent
}
Err(e) => {
return Err(ParseError::from(e));
}
}
// Try decoding as a Sapling address (Bech32)
if let Ok((hrp, data, Variant::Bech32)) = bech32::decode(s) {
// If we reached this point, the encoding is supposed to be valid Bech32.
let data = Vec::<u8>::from_base32(&data).map_err(|_| ParseError::InvalidEncoding)?;
let net = match hrp.as_str() {
sapling::MAINNET => Network::Main,
sapling::TESTNET => Network::Test,
sapling::REGTEST => Network::Regtest,
// We will not define new Bech32 address encodings.
_ => {
return Err(ParseError::NotZcash);
}
};
return data[..]
.try_into()
.map(AddressKind::Sapling)
.map_err(|_| ParseError::InvalidEncoding)
.map(|kind| ZcashAddress { net, kind });
}
// The rest use Base58Check.
if let Ok(decoded) = bs58::decode(s).with_check(None).into_vec() {
let net = match decoded[..2].try_into().unwrap() {
sprout::MAINNET | p2pkh::MAINNET | p2sh::MAINNET => Network::Main,
sprout::TESTNET | p2pkh::TESTNET | p2sh::TESTNET => Network::Test,
// We will not define new Base58Check address encodings.
_ => return Err(ParseError::NotZcash),
};
return match decoded[..2].try_into().unwrap() {
sprout::MAINNET | sprout::TESTNET => {
decoded[2..].try_into().map(AddressKind::Sprout)
}
p2pkh::MAINNET | p2pkh::TESTNET => decoded[2..].try_into().map(AddressKind::P2pkh),
p2sh::MAINNET | p2sh::TESTNET => decoded[2..].try_into().map(AddressKind::P2sh),
_ => unreachable!(),
}
.map_err(|_| ParseError::InvalidEncoding)
.map(|kind| ZcashAddress { kind, net });
};
// If it's not valid Bech32, Bech32m, or Base58Check, it's not a Zcash address.
Err(ParseError::NotZcash)
}
}
fn encode_bech32(hrp: &str, data: &[u8]) -> String {
bech32::encode(hrp, data.to_base32(), Variant::Bech32).expect("hrp is invalid")
}
fn encode_b58(prefix: [u8; 2], data: &[u8]) -> String {
let mut bytes = Vec::with_capacity(2 + data.len());
bytes.extend_from_slice(&prefix);
bytes.extend_from_slice(data);
bs58::encode(bytes).with_check().into_string()
}
impl fmt::Display for ZcashAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded = match &self.kind {
AddressKind::Sprout(data) => encode_b58(
match self.net {
Network::Main => sprout::MAINNET,
Network::Test | Network::Regtest => sprout::TESTNET,
},
data,
),
AddressKind::Sapling(data) => encode_bech32(
match self.net {
Network::Main => sapling::MAINNET,
Network::Test => sapling::TESTNET,
Network::Regtest => sapling::REGTEST,
},
data,
),
AddressKind::Unified(addr) => addr.encode(&self.net),
AddressKind::P2pkh(data) => encode_b58(
match self.net {
Network::Main => p2pkh::MAINNET,
Network::Test | Network::Regtest => p2pkh::TESTNET,
},
data,
),
AddressKind::P2sh(data) => encode_b58(
match self.net {
Network::Main => p2sh::MAINNET,
Network::Test | Network::Regtest => p2sh::TESTNET,
},
data,
),
};
write!(f, "{}", encoded)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::kind::unified;
fn encoding(encoded: &str, decoded: ZcashAddress) {
assert_eq!(decoded.to_string(), encoded);
assert_eq!(encoded.parse(), Ok(decoded));
}
#[test]
fn sprout() {
encoding(
"zc8E5gYid86n4bo2Usdq1cpr7PpfoJGzttwBHEEgGhGkLUg7SPPVFNB2AkRFXZ7usfphup5426dt1buMmY3fkYeRrQGLa8y",
ZcashAddress { net: Network::Main, kind: AddressKind::Sprout([0; 64]) },
);
encoding(
"ztJ1EWLKcGwF2S4NA17pAJVdco8Sdkz4AQPxt1cLTEfNuyNswJJc2BbBqYrsRZsp31xbVZwhF7c7a2L9jsF3p3ZwRWpqqyS",
ZcashAddress { net: Network::Test, kind: AddressKind::Sprout([0; 64]) },
);
}
#[test]
fn sapling() {
encoding(
"zs1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpq6d8g",
ZcashAddress {
net: Network::Main,
kind: AddressKind::Sapling([0; 43]),
},
);
encoding(
"ztestsapling1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqfhgwqu",
ZcashAddress {
net: Network::Test,
kind: AddressKind::Sapling([0; 43]),
},
);
encoding(
"zregtestsapling1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqknpr3m",
ZcashAddress {
net: Network::Regtest,
kind: AddressKind::Sapling([0; 43]),
},
);
}
#[test]
fn unified() {
encoding(
"u1qpatys4zruk99pg59gcscrt7y6akvl9vrhcfyhm9yxvxz7h87q6n8cgrzzpe9zru68uq39uhmlpp5uefxu0su5uqyqfe5zp3tycn0ecl",
ZcashAddress {
net: Network::Main,
kind: AddressKind::Unified(unified::Address(vec![unified::address::Receiver::Sapling([0; 43])])),
},
);
encoding(
"utest10c5kutapazdnf8ztl3pu43nkfsjx89fy3uuff8tsmxm6s86j37pe7uz94z5jhkl49pqe8yz75rlsaygexk6jpaxwx0esjr8wm5ut7d5s",
ZcashAddress {
net: Network::Test,
kind: AddressKind::Unified(unified::Address(vec![unified::address::Receiver::Sapling([0; 43])])),
},
);
encoding(
"uregtest15xk7vj4grjkay6mnfl93dhsflc2yeunhxwdh38rul0rq3dfhzzxgm5szjuvtqdha4t4p2q02ks0jgzrhjkrav70z9xlvq0plpcjkd5z3",
ZcashAddress {
net: Network::Regtest,
kind: AddressKind::Unified(unified::Address(vec![unified::address::Receiver::Sapling([0; 43])])),
},
);
let badencoded = "uinvalid1ck5navqwcng43gvsxwrxsplc22p7uzlcag6qfa0zh09e87efq6rq8wsnv25umqjjravw70rl994n5ueuhza2fghge5gl7zrl2qp6cwmp";
assert_eq!(
badencoded.parse::<ZcashAddress>(),
Err(ParseError::NotZcash)
);
}
#[test]
fn transparent() {
encoding(
"t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs",
ZcashAddress {
net: Network::Main,
kind: AddressKind::P2pkh([0; 20]),
},
);
encoding(
"tm9iMLAuYMzJ6jtFLcA7rzUmfreGuKvr7Ma",
ZcashAddress {
net: Network::Test,
kind: AddressKind::P2pkh([0; 20]),
},
);
encoding(
"t3JZcvsuaXE6ygokL4XUiZSTrQBUoPYFnXJ",
ZcashAddress {
net: Network::Main,
kind: AddressKind::P2sh([0; 20]),
},
);
encoding(
"t26YoyZ1iPgiMEWL4zGUm74eVWfhyDMXzY2",
ZcashAddress {
net: Network::Test,
kind: AddressKind::P2sh([0; 20]),
},
);
}
#[test]
fn whitespace() {
assert_eq!(
" t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs".parse(),
Ok(ZcashAddress {
net: Network::Main,
kind: AddressKind::P2pkh([0; 20])
}),
);
assert_eq!(
"t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs ".parse(),
Ok(ZcashAddress {
net: Network::Main,
kind: AddressKind::P2pkh([0; 20])
}),
);
assert_eq!(
"something t1Hsc1LR8yKnbbe3twRp88p6vFfC5t7DLbs".parse::<ZcashAddress>(),
Err(ParseError::NotZcash),
);
}
}
| 34.897959 | 141 | 0.537622 |
e5743f81457b369f8fa386cbaad60f8306825820 | 3,131 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
/// A builder for a column.
use common_arrow::arrow::bitmap::MutableBitmap;
use crate::prelude::MutableColumn;
use crate::prelude::Scalar;
use crate::ColumnRef;
use crate::ConstColumn;
use crate::NullableColumn;
use crate::ScalarColumn;
use crate::ScalarColumnBuilder;
pub type NullableColumnBuilder<T> = ColumnBuilderBase<true, T>;
pub type ColumnBuilder<T> = ColumnBuilderBase<false, T>;
pub struct ColumnBuilderBase<const NULLABLE: bool, T: Scalar> {
builder: <T::ColumnType as ScalarColumn>::Builder,
validity: MutableBitmap,
}
impl<const NULLABLE: bool, T> ColumnBuilderBase<NULLABLE, T>
where T: Scalar
{
pub fn with_capacity(capacity: usize) -> Self {
Self {
builder: <<T::ColumnType as ScalarColumn>::Builder>::with_capacity(capacity),
validity: MutableBitmap::with_capacity(capacity),
}
}
fn build_nonull(&mut self, length: usize) -> ColumnRef {
let size = self.len();
let col = self.builder.to_column();
if length != size && size == 1 {
return Arc::new(ConstColumn::new(col, length));
}
col
}
#[inline]
pub fn len(&self) -> usize {
self.builder.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.builder.len() == 0
}
}
impl<T> ColumnBuilderBase<true, T>
where T: Scalar
{
#[inline]
pub fn build(&mut self, length: usize) -> ColumnRef {
let validity = std::mem::take(&mut self.validity).into();
let column = self.build_nonull(length);
NullableColumn::wrap_inner(column, Some(validity))
}
#[inline]
pub fn append_null(&mut self) {
self.builder.append_default();
self.validity.push(false);
}
#[inline]
pub fn append(&mut self, value: <T as Scalar>::RefType<'_>, valid: bool) {
self.builder.push(value);
self.validity.push(valid);
}
}
impl<T> ColumnBuilderBase<false, T>
where T: Scalar
{
#[inline]
pub fn build(&mut self, length: usize) -> ColumnRef {
self.build_nonull(length)
}
#[inline]
pub fn build_column(&mut self) -> <T as Scalar>::ColumnType {
self.builder.finish()
}
pub fn from_iterator<'a>(
it: impl Iterator<Item = <T as Scalar>::RefType<'a>>,
) -> <T as Scalar>::ColumnType {
<<T as Scalar>::ColumnType as ScalarColumn>::from_iterator(it)
}
#[inline]
pub fn append(&mut self, value: <T as Scalar>::RefType<'_>) {
self.builder.push(value);
}
}
| 27.707965 | 89 | 0.641967 |
f880b81998c6bfe663808f484c4719769187230a | 1,808 |
pub struct IconOutbond {
props: crate::Props,
}
impl yew::Component for IconOutbond {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><rect fill="none" height="24" width="24" x="0" y="0"/><path d="M12,4c-4.41,0-8,3.59-8,8c0,4.41,3.59,8,8,8s8-3.59,8-8C20,7.59,16.41,4,12,4z M13.88,11.54l-4.96,4.96 l-1.41-1.41l4.96-4.96L10.34,8l5.65,0.01L16,13.66L13.88,11.54z" opacity=".3"/><path d="M12,4c4.41,0,8,3.59,8,8s-3.59,8-8,8s-8-3.59-8-8S7.59,4,12,4 M12,2C6.48,2,2,6.48,2,12c0,5.52,4.48,10,10,10s10-4.48,10-10 C22,6.48,17.52,2,12,2L12,2z M13.88,11.54l-4.96,4.96l-1.41-1.41l4.96-4.96L10.34,8l5.65,0.01L16,13.66L13.88,11.54z"/></svg>
</svg>
}
}
}
| 39.304348 | 619 | 0.586283 |
1df6e2b6e59b56503170f803ceecc700fdc3b2b5 | 1,402 | use std::fs::File;
use std::io::BufReader;
use std::sync::Arc;
use arrow2::array::Array;
use arrow2::chunk::Chunk;
use arrow2::error::Result;
use arrow2::io::json::read;
fn read_path(path: &str, projection: Option<Vec<&str>>) -> Result<Chunk<Arc<dyn Array>>> {
// Example of reading a NDJSON file.
let mut reader = BufReader::new(File::open(path)?);
let fields = read::infer_and_reset(&mut reader, None)?;
let fields = if let Some(projection) = projection {
fields
.into_iter()
.filter(|field| projection.contains(&field.name.as_ref()))
.collect()
} else {
fields
};
// at most 1024 rows. This container can be re-used across batches.
let mut rows = vec![String::default(); 1024];
// Reads up to 1024 rows.
// this is IO-intensive and performs minimal CPU work. In particular,
// no deserialization is performed.
let read = read::read_rows(&mut reader, &mut rows)?;
let rows = &rows[..read];
// deserialize `rows` into `Chunk`. This is CPU-intensive, has no IO,
// and can be performed on a different thread pool via a channel.
read::deserialize(rows, &fields)
}
fn main() -> Result<()> {
use std::env;
let args: Vec<String> = env::args().collect();
let file_path = &args[1];
let batch = read_path(file_path, None)?;
println!("{:#?}", batch);
Ok(())
}
| 28.612245 | 90 | 0.617689 |
16ef61025b33762193cc8a4bf957a5dc324ecc6a | 207 | use std::collections::HashMap;
fn main() {
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
println!("{:?}", scores);
}
| 18.818182 | 46 | 0.594203 |
f49b293300696ad45debec8022a958ce3dae6552 | 3,222 | use super::parameter::Parameter;
use indexmap::IndexMap;
use stdweb::traits::IEvent;
use yew::prelude::*;
use yew::virtual_dom::VNode;
#[derive(Clone, Default, PartialEq)]
pub struct Properties {
pub name: String,
pub is_expanded: bool,
pub parameters: IndexMap<String, bool>,
pub onexpandchange: Option<Callback<bool>>,
pub onchange: Option<Callback<(String, bool)>>,
}
pub struct Feature {
name: String,
is_expanded: bool,
parameters: IndexMap<String, bool>,
onexpandchange: Option<Callback<bool>>,
onchange: Option<Callback<(String, bool)>>,
}
pub enum Message {
Toggle,
SetParameter { name: String, is_selected: bool },
}
impl Component for Feature {
type Message = Message;
type Properties = Properties;
fn create(properties: Self::Properties, _link: ComponentLink<Self>) -> Self {
Self {
name: properties.name,
is_expanded: properties.is_expanded,
parameters: properties.parameters,
onexpandchange: properties.onexpandchange,
onchange: properties.onchange,
}
}
fn update(&mut self, message: Self::Message) -> ShouldRender {
match message {
Message::Toggle => {
if let Some(ref mut callback) = self.onexpandchange {
callback.emit(!self.is_expanded);
}
}
Message::SetParameter { name, is_selected } => {
if let Some(ref mut callback) = self.onchange {
callback.emit((name, is_selected));
}
}
}
false
}
fn change(&mut self, properties: Self::Properties) -> ShouldRender {
let changed = self.name != properties.name
|| self.is_expanded != properties.is_expanded
|| self.parameters != properties.parameters;
self.name = properties.name;
self.is_expanded = properties.is_expanded;
self.parameters = properties.parameters;
self.onchange = properties.onchange;
self.onexpandchange = properties.onexpandchange;
changed
}
}
impl Renderable<Feature> for Feature {
fn view(&self) -> Html<Self> {
let mut tag = html! {
<details>
<summary onclick=|e| { e.prevent_default(); Message::Toggle },>{ &self.name }</summary>
// capitalize `type` because otherwise yew eats it
<div Type="checkbox", class="sdpi-item",>
<div class="sdpi-item-value min100",>
{ for self.parameters.iter().map(|(name, is_selected)| {
let cb_name = name.clone();
html! {
<Parameter: name=name, is_selected=is_selected, onchange=move |is_selected| { Message::SetParameter { name: cb_name.clone(), is_selected } }, />
}
}) }
</div>
</div>
</details>
};
if self.is_expanded {
if let VNode::VTag(ref mut vtag) = tag {
vtag.add_attribute("open", &"");
}
}
tag
}
}
| 32.877551 | 176 | 0.55121 |
03b7b4c53ce1487ffc3ff81e74fa1d57f4a0325e | 1,046 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
contract_event::{ContractEvent, EventWithProof},
event::EventKey,
};
use lcs::test_helpers::assert_canonical_encode_decode;
use move_core_types::language_storage::TypeTag;
use proptest::prelude::*;
proptest! {
#[test]
fn event_lcs_roundtrip(event in any::<ContractEvent>()) {
assert_canonical_encode_decode(event);
}
#[test]
fn event_with_proof_lcs_roundtrip(event_with_proof in any::<EventWithProof>()) {
assert_canonical_encode_decode(event_with_proof);
}
}
#[test]
fn test_event_json_serialize() {
let event_key = EventKey::random();
let contract_event = ContractEvent::new(event_key, 0, TypeTag::Address, vec![0u8]);
let contract_json =
serde_json::to_string(&contract_event).expect("event serialize to json should succeed.");
let contract_event2: ContractEvent = serde_json::from_str(contract_json.as_str()).unwrap();
assert_eq!(contract_event, contract_event2)
}
| 31.69697 | 97 | 0.726577 |
11e691a7f204b5cac069f85442acd224905be98a | 109 | pub mod class;
pub mod control_flow;
pub mod definition;
pub mod error;
pub mod function;
pub mod operation;
| 15.571429 | 21 | 0.779817 |
8f29dd5bbe5a727bd0d384555da657e56dfec8f3 | 1,772 | #[doc = r" Value read from the register"]
pub struct R {
bits: u16,
}
impl super::PLASC {
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R { bits: self.register.get() }
}
}
#[doc = "Possible values of the field `ASC`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ASCR {
#[doc = "A bus slave connection to AXBS input port n is absent"]
_0,
#[doc = "A bus slave connection to AXBS input port n is present"]
_1,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl ASCR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
ASCR::_0 => 0,
ASCR::_1 => 1,
ASCR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> ASCR {
match value {
0 => ASCR::_0,
1 => ASCR::_1,
i => ASCR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ASCR::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ASCR::_1
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
#[doc = "Bits 0:7 - Each bit in the ASC field indicates whether there is a corresponding connection to the crossbar switch's slave input port."]
#[inline]
pub fn asc(&self) -> ASCR {
ASCR::_from({
const MASK: u8 = 255;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u16) as u8
})
}
}
| 25.681159 | 148 | 0.51298 |
6ad5e6c04c9ce705e829879b756a4a6a0bf2ed7c | 31,145 | use serde::{Deserialize, Serialize};
use serde_json::{from_value, Value};
use std::convert::TryFrom;
use crate::errors::*;
#[derive(Deserialize, Clone)]
pub struct Empty {}
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ServerTime {
pub server_time: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ExchangeInformation {
pub timezone: String,
pub server_time: u64,
pub rate_limits: Vec<RateLimit>,
pub symbols: Vec<Symbol>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct RateLimit {
pub rate_limit_type: String,
pub interval: String,
pub interval_num: u16,
pub limit: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Symbol {
pub symbol: String,
pub status: String,
pub base_asset: String,
pub base_asset_precision: u64,
pub quote_asset: String,
pub quote_precision: u64,
pub order_types: Vec<String>,
pub iceberg_allowed: bool,
pub is_spot_trading_allowed: bool,
pub is_margin_trading_allowed: bool,
pub filters: Vec<Filters>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "filterType")]
pub enum Filters {
#[serde(rename = "PRICE_FILTER")]
#[serde(rename_all = "camelCase")]
PriceFilter {
min_price: String,
max_price: String,
tick_size: String,
},
#[serde(rename = "PERCENT_PRICE")]
#[serde(rename_all = "camelCase")]
PercentPrice {
multiplier_up: String,
multiplier_down: String,
avg_price_mins: Option<f64>,
},
#[serde(rename = "LOT_SIZE")]
#[serde(rename_all = "camelCase")]
LotSize {
min_qty: String,
max_qty: String,
step_size: String,
},
#[serde(rename = "MIN_NOTIONAL")]
#[serde(rename_all = "camelCase")]
MinNotional {
notional: Option<String>,
min_notional: Option<String>,
apply_to_market: Option<bool>,
avg_price_mins: Option<f64>,
},
#[serde(rename = "ICEBERG_PARTS")]
#[serde(rename_all = "camelCase")]
IcebergParts { limit: Option<u16> },
#[serde(rename = "MAX_NUM_ORDERS")]
#[serde(rename_all = "camelCase")]
MaxNumOrders { max_num_orders: Option<u16> },
#[serde(rename = "MAX_NUM_ALGO_ORDERS")]
#[serde(rename_all = "camelCase")]
MaxNumAlgoOrders { max_num_algo_orders: Option<u16> },
#[serde(rename = "MAX_NUM_ICEBERG_ORDERS")]
#[serde(rename_all = "camelCase")]
MaxNumIcebergOrders { max_num_iceberg_orders: u16 },
#[serde(rename = "MAX_POSITION")]
#[serde(rename_all = "camelCase")]
MaxPosition { max_position: String },
#[serde(rename = "MARKET_LOT_SIZE")]
#[serde(rename_all = "camelCase")]
MarketLotSize {
min_qty: String,
max_qty: String,
step_size: String,
},
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AccountInformation {
pub maker_commission: f32,
pub taker_commission: f32,
pub buyer_commission: f32,
pub seller_commission: f32,
pub can_trade: bool,
pub can_withdraw: bool,
pub can_deposit: bool,
pub balances: Vec<Balance>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Balance {
pub asset: String,
pub free: String,
pub locked: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Order {
pub symbol: String,
pub order_id: u64,
pub order_list_id: i64,
pub client_order_id: String,
#[serde(with = "string_or_float")]
pub price: f64,
pub orig_qty: String,
pub executed_qty: String,
pub cummulative_quote_qty: String,
pub status: String,
pub time_in_force: String,
#[serde(rename = "type")]
pub type_name: String,
pub side: String,
#[serde(with = "string_or_float")]
pub stop_price: f64,
pub iceberg_qty: String,
pub time: u64,
pub update_time: u64,
pub is_working: bool,
pub orig_quote_order_qty: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct OrderCanceled {
pub symbol: String,
pub orig_client_order_id: Option<String>,
pub order_id: Option<u64>,
pub client_order_id: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Transaction {
pub symbol: String,
pub order_id: u64,
pub order_list_id: Option<i64>,
pub client_order_id: String,
pub transact_time: u64,
#[serde(with = "string_or_float")]
pub price: f64,
#[serde(with = "string_or_float")]
pub orig_qty: f64,
#[serde(with = "string_or_float")]
pub executed_qty: f64,
#[serde(with = "string_or_float")]
pub cummulative_quote_qty: f64,
#[serde(with = "string_or_float", default = "default_stop_price")]
pub stop_price: f64,
pub status: String,
pub time_in_force: String,
#[serde(rename = "type")]
pub type_name: String,
pub side: String,
pub fills: Option<Vec<FillInfo>>,
}
fn default_stop_price() -> f64 {
0.0
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct FillInfo {
#[serde(with = "string_or_float")]
pub price: f64,
#[serde(with = "string_or_float")]
pub qty: f64,
#[serde(with = "string_or_float")]
pub commission: f64,
pub commission_asset: String,
pub trade_id: Option<u64>,
}
/// Response to a test order (endpoint /api/v3/order/test).
///
/// Currently, the API responds {} on a successful test transaction,
/// hence this struct has no fields.
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct TestResponse {}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct OrderBook {
pub last_update_id: u64,
pub bids: Vec<PriceLevel>,
pub asks: Vec<PriceLevel>,
}
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)]
pub struct PriceLevel {
#[serde(with = "string_or_float")]
pub price: f64,
#[serde(with = "string_or_float")]
pub qty: f64,
}
impl PriceLevel {
pub fn new(price: f64, qty: f64) -> PriceLevel {
PriceLevel { price, qty }
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct UserDataStream {
pub listen_key: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Success {}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
#[serde(untagged)]
pub enum Prices {
AllPrices(Vec<SymbolPrice>),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SymbolPrice {
pub symbol: String,
#[serde(with = "string_or_float")]
pub price: f64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct AveragePrice {
pub mins: u64,
#[serde(with = "string_or_float")]
pub price: f64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
#[serde(untagged)]
pub enum BookTickers {
AllBookTickers(Vec<Tickers>),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum KlineSummaries {
AllKlineSummaries(Vec<KlineSummary>),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Tickers {
pub symbol: String,
#[serde(with = "string_or_float")]
pub bid_price: f64,
#[serde(with = "string_or_float")]
pub bid_qty: f64,
#[serde(with = "string_or_float")]
pub ask_price: f64,
#[serde(with = "string_or_float")]
pub ask_qty: f64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct TradeHistory {
pub id: u64,
#[serde(with = "string_or_float")]
pub price: f64,
#[serde(with = "string_or_float")]
pub qty: f64,
pub commission: String,
pub commission_asset: String,
pub time: u64,
pub is_buyer: bool,
pub is_maker: bool,
pub is_best_match: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct PriceStats {
pub symbol: String,
pub price_change: String,
pub price_change_percent: String,
pub weighted_avg_price: String,
#[serde(with = "string_or_float")]
pub prev_close_price: f64,
#[serde(with = "string_or_float")]
pub last_price: f64,
#[serde(with = "string_or_float")]
pub bid_price: f64,
#[serde(with = "string_or_float")]
pub ask_price: f64,
#[serde(with = "string_or_float")]
pub open_price: f64,
#[serde(with = "string_or_float")]
pub high_price: f64,
#[serde(with = "string_or_float")]
pub low_price: f64,
#[serde(with = "string_or_float")]
pub volume: f64,
pub open_time: u64,
pub close_time: u64,
pub first_id: i64,
pub last_id: i64,
pub count: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct AggTrade {
#[serde(rename = "T")]
pub time: u64,
#[serde(rename = "a")]
pub agg_id: u64,
#[serde(rename = "f")]
pub first_id: u64,
#[serde(rename = "l")]
pub last_id: u64,
#[serde(rename = "m")]
pub maker: bool,
#[serde(rename = "M")]
pub best_match: bool,
#[serde(rename = "p", with = "string_or_float")]
pub price: f64,
#[serde(rename = "q", with = "string_or_float")]
pub qty: f64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AccountUpdateEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
m: u64,
t: u64,
b: u64,
s: u64,
#[serde(rename = "T")]
t_ignore: bool,
#[serde(rename = "W")]
w_ignore: bool,
#[serde(rename = "D")]
d_ignore: bool,
#[serde(rename = "B")]
pub balance: Vec<EventBalance>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct BalanceUpdateEvent {
#[serde(rename = "B")]
pub balance: Vec<EventBalance>,
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "u")]
pub last_account_update_time: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct EventBalance {
#[serde(rename = "a")]
pub asset: String,
#[serde(rename = "f")]
pub free: String,
#[serde(rename = "l")]
pub locked: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct OrderTradeEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "c")]
pub new_client_order_id: String,
#[serde(rename = "S")]
pub side: String,
#[serde(rename = "o")]
pub order_type: String,
#[serde(rename = "f")]
pub time_in_force: String,
#[serde(rename = "q")]
pub qty: String,
#[serde(rename = "p")]
pub price: String,
#[serde(skip, rename = "P")]
pub p_ignore: String,
#[serde(skip, rename = "F")]
pub f_ignore: String,
#[serde(skip)]
pub g: i32,
#[serde(skip, rename = "C")]
pub c_ignore: Option<String>,
#[serde(rename = "x")]
pub execution_type: String,
#[serde(rename = "X")]
pub order_status: String,
#[serde(rename = "r")]
pub order_reject_reason: String,
#[serde(rename = "i")]
pub order_id: u64,
#[serde(rename = "l")]
pub qty_last_filled_trade: String,
#[serde(rename = "z")]
pub accumulated_qty_filled_trades: String,
#[serde(rename = "L")]
pub price_last_filled_trade: String,
#[serde(rename = "n")]
pub commission: String,
#[serde(skip, rename = "N")]
pub asset_commisioned: Option<String>,
#[serde(rename = "T")]
pub trade_order_time: u64,
#[serde(rename = "t")]
pub trade_id: i64,
#[serde(skip, rename = "I")]
pub i_ignore: u64,
#[serde(skip)]
pub w: bool,
#[serde(rename = "m")]
pub is_buyer_maker: bool,
#[serde(skip, rename = "M")]
pub m_ignore: bool,
}
/// The Aggregate Trade Streams push trade information that is aggregated for a single taker order.
///
/// Stream Name: \<symbol\>@aggTrade
///
/// Update Speed: Real-time
///
/// https://github.com/binance/binance-spot-api-docs/blob/master/web-socket-streams.md#aggregate-trade-streams
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AggrTradesEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "a")]
pub aggregated_trade_id: u64,
#[serde(rename = "p")]
pub price: String,
#[serde(rename = "q")]
pub qty: String,
#[serde(rename = "f")]
pub first_break_trade_id: u64,
#[serde(rename = "l")]
pub last_break_trade_id: u64,
#[serde(rename = "T")]
pub trade_order_time: u64,
#[serde(rename = "m")]
pub is_buyer_maker: bool,
#[serde(skip, rename = "M")]
pub m_ignore: bool,
}
/// The Trade Streams push raw trade information; each trade has a unique buyer and seller.
///
/// Stream Name: \<symbol\>@trade
///
/// Update Speed: Real-time
///
/// https://github.com/binance/binance-spot-api-docs/blob/master/web-socket-streams.md#trade-streams
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct TradeEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "t")]
pub trade_id: u64,
#[serde(rename = "p")]
pub price: String,
#[serde(rename = "q")]
pub qty: String,
#[serde(rename = "b")]
pub buyer_order_id: u64,
#[serde(rename = "a")]
pub seller_order_id: u64,
#[serde(rename = "T")]
pub trade_order_time: u64,
#[serde(rename = "m")]
pub is_buyer_maker: bool,
#[serde(skip, rename = "M")]
pub m_ignore: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct IndexPriceEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "i")]
pub pair: String,
#[serde(rename = "p")]
pub price: String,
}
// https://binance-docs.github.io/apidocs/futures/en/#mark-price-stream
// https://binance-docs.github.io/apidocs/delivery/en/#mark-price-stream
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct MarkPriceEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "P")]
pub estimate_settle_price: String,
#[serde(rename = "T")]
pub next_funding_time: u64,
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "i")]
pub index_price: Option<String>,
#[serde(rename = "p")]
pub mark_price: String,
#[serde(rename = "r")]
pub funding_rate: String,
#[serde(rename = "s")]
pub symbol: String,
}
// Object({"E": Number(1626118018407), "e": String("forceOrder"), "o": Object({"S": String("SELL"), "T": Number(1626118018404), "X": String("FILLED"), "ap": String("33028.07"), "f": String("IOC"), "l": String("0.010"), "o": String("LIMIT"), "p": String("32896.00"), "q": String("0.010"), "s": String("BTCUSDT"), "z": String("0.010")})})
// https://binance-docs.github.io/apidocs/futures/en/#liquidation-order-streams
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct LiquidationEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "o")]
pub liquidation_order: LiquidationOrder,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct LiquidationOrder {
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "S")]
pub side: String,
#[serde(rename = "o")]
pub order_type: String,
#[serde(rename = "f")]
pub time_in_force: String,
#[serde(rename = "q")]
pub original_quantity: String,
#[serde(rename = "p")]
pub price: String,
#[serde(rename = "ap")]
pub average_price: String,
#[serde(rename = "X")]
pub order_status: String,
#[serde(rename = "l")]
pub order_last_filled_quantity: String,
#[serde(rename = "z")]
pub order_filled_accumulated_quantity: String,
#[serde(rename = "T")]
pub order_trade_time: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct BookTickerEvent {
#[serde(rename = "u")]
pub update_id: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "b")]
pub best_bid: String,
#[serde(rename = "B")]
pub best_bid_qty: String,
#[serde(rename = "a")]
pub best_ask: String,
#[serde(rename = "A")]
pub best_ask_qty: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DayTickerEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "p")]
pub price_change: String,
#[serde(rename = "P")]
pub price_change_percent: String,
#[serde(rename = "w")]
pub average_price: String,
#[serde(rename = "x")]
pub prev_close: String,
#[serde(rename = "c")]
pub current_close: String,
#[serde(rename = "Q")]
pub current_close_qty: String,
#[serde(rename = "b")]
pub best_bid: String,
#[serde(rename = "B")]
pub best_bid_qty: String,
#[serde(rename = "a")]
pub best_ask: String,
#[serde(rename = "A")]
pub best_ask_qty: String,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "q")]
pub quote_volume: String,
#[serde(rename = "O")]
pub open_time: u64,
#[serde(rename = "C")]
pub close_time: u64,
#[serde(rename = "F")]
pub first_trade_id: i64,
#[serde(rename = "L")]
pub last_trade_id: i64,
#[serde(rename = "n")]
pub num_trades: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct MiniTickerEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "c")]
pub close: String,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "q")]
pub quote_volume: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct KlineEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "k")]
pub kline: Kline,
}
// https://binance-docs.github.io/apidocs/futures/en/#continuous-contract-kline-candlestick-streams
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ContinuousKlineEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "ps")]
pub pair: String,
#[serde(rename = "ct")]
pub contract_type: String,
#[serde(rename = "k")]
pub kline: ContinuousKline,
}
// https://binance-docs.github.io/apidocs/delivery/en/#index-kline-candlestick-streams
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct IndexKlineEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "ps")]
pub pair: String,
#[serde(rename = "k")]
pub kline: IndexKline,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct KlineSummary {
pub open_time: i64,
pub open: String,
pub high: String,
pub low: String,
pub close: String,
pub volume: String,
pub close_time: i64,
pub quote_asset_volume: String,
pub number_of_trades: i64,
pub taker_buy_base_asset_volume: String,
pub taker_buy_quote_asset_volume: String,
}
fn get_value(row: &[Value], index: usize, name: &'static str) -> Result<Value> {
Ok(row
.get(index)
.ok_or(ErrorKind::KlineValueMissingError(index, name))?
.to_owned())
}
impl TryFrom<&Vec<Value>> for KlineSummary {
type Error = Error;
fn try_from(row: &Vec<Value>) -> Result<Self> {
Ok(Self {
open_time: from_value(get_value(row, 0, "open_time")?)?,
open: from_value(get_value(row, 1, "open")?)?,
high: from_value(get_value(row, 2, "high")?)?,
low: from_value(get_value(row, 3, "low")?)?,
close: from_value(get_value(row, 4, "close")?)?,
volume: from_value(get_value(row, 5, "volume")?)?,
close_time: from_value(get_value(row, 6, "close_time")?)?,
quote_asset_volume: from_value(get_value(row, 7, "quote_asset_volume")?)?,
number_of_trades: from_value(get_value(row, 8, "number_of_trades")?)?,
taker_buy_base_asset_volume: from_value(get_value(
row,
9,
"taker_buy_base_asset_volume",
)?)?,
taker_buy_quote_asset_volume: from_value(get_value(
row,
10,
"taker_buy_quote_asset_volume",
)?)?,
})
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Kline {
#[serde(rename = "t")]
pub open_time: i64,
#[serde(rename = "T")]
pub close_time: i64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "i")]
pub interval: String,
#[serde(rename = "f")]
pub first_trade_id: i64,
#[serde(rename = "L")]
pub last_trade_id: i64,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "c")]
pub close: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "n")]
pub number_of_trades: i64,
#[serde(rename = "x")]
pub is_final_bar: bool,
#[serde(rename = "q")]
pub quote_asset_volume: String,
#[serde(rename = "V")]
pub taker_buy_base_asset_volume: String,
#[serde(rename = "Q")]
pub taker_buy_quote_asset_volume: String,
#[serde(skip, rename = "B")]
pub ignore_me: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ContinuousKline {
#[serde(rename = "t")]
pub start_time: i64,
#[serde(rename = "T")]
pub end_time: i64,
#[serde(rename = "i")]
pub interval: String,
#[serde(rename = "f")]
pub first_trade_id: i64,
#[serde(rename = "L")]
pub last_trade_id: i64,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "c")]
pub close: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "n")]
pub number_of_trades: i64,
#[serde(rename = "x")]
pub is_final_bar: bool,
#[serde(rename = "q")]
pub quote_volume: String,
#[serde(rename = "V")]
pub active_buy_volume: String,
#[serde(rename = "Q")]
pub active_volume_buy_quote: String,
#[serde(skip, rename = "B")]
pub ignore_me: String,
}
// https://binance-docs.github.io/apidocs/delivery/en/#index-kline-candlestick-streams
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct IndexKline {
#[serde(rename = "t")]
pub start_time: i64,
#[serde(rename = "T")]
pub end_time: i64,
#[serde(skip, rename = "s")]
pub ignore_me: String,
#[serde(rename = "i")]
pub interval: String,
#[serde(rename = "f")]
pub first_trade_id: i64,
#[serde(rename = "L")]
pub last_trade_id: i64,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "c")]
pub close: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "n")]
pub number_of_trades: i64,
#[serde(rename = "x")]
pub is_final_bar: bool,
#[serde(skip, rename = "q")]
pub ignore_me2: String,
#[serde(skip, rename = "V")]
pub ignore_me3: String,
#[serde(skip, rename = "Q")]
pub ignore_me4: String,
#[serde(skip, rename = "B")]
pub ignore_me5: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DepthOrderBookEvent {
#[serde(rename = "e")]
pub event_type: String,
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "U")]
pub first_update_id: u64,
#[serde(rename = "u")]
pub final_update_id: u64,
#[serde(rename = "pu")]
#[serde(default)]
pub previous_final_update_id: Option<u64>,
#[serde(rename = "b")]
pub bids: Vec<PriceLevel>,
#[serde(rename = "a")]
pub asks: Vec<PriceLevel>,
}
/// Response to the Savings API get all coins request
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct CoinInfo {
pub coin: String,
pub deposit_all_enable: bool,
#[serde(with = "string_or_float")]
pub free: f64,
#[serde(with = "string_or_float")]
pub freeze: f64,
#[serde(with = "string_or_float")]
pub ipoable: f64,
#[serde(with = "string_or_float")]
pub ipoing: f64,
pub is_legal_money: bool,
#[serde(with = "string_or_float")]
pub locked: f64,
pub name: String,
pub network_list: Vec<Network>,
#[serde(with = "string_or_float")]
pub storage: f64,
pub trading: bool,
pub withdraw_all_enable: bool,
#[serde(with = "string_or_float")]
pub withdrawing: f64,
}
/// Part of the Savings API get all coins response
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Network {
pub address_regex: String,
pub coin: String,
/// shown only when "depositEnable" is false.
pub deposit_desc: Option<String>,
pub deposit_enable: bool,
pub is_default: bool,
pub memo_regex: String,
/// min number for balance confirmation
pub min_confirm: u32,
pub name: String,
pub network: String,
pub reset_address_status: bool,
pub special_tips: Option<String>,
/// confirmation number for balance unlock
pub un_lock_confirm: u32,
/// shown only when "withdrawEnable" is false.
pub withdraw_desc: Option<String>,
pub withdraw_enable: bool,
#[serde(with = "string_or_float")]
pub withdraw_fee: f64,
#[serde(with = "string_or_float")]
pub withdraw_min: f64,
// pub insert_time: Option<u64>, //commented out for now, because they are not inside the actual response (only the api doc example)
// pub update_time: Option<u64>,
pub withdraw_integer_multiple: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AssetDetail {
#[serde(with = "string_or_float")]
pub min_withdraw_amount: f64,
/// false if ALL of networks' are false
pub deposit_status: bool,
#[serde(with = "string_or_float")]
pub withdraw_fee: f64,
/// false if ALL of networks' are false
pub withdraw_status: bool,
/// reason
pub deposit_tip: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct DepositAddress {
pub address: String,
pub coin: String,
pub tag: String,
pub url: String,
}
pub(crate) mod string_or_float {
use std::fmt;
use serde::{de, Serializer, Deserialize, Deserializer};
pub fn serialize<T, S>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: fmt::Display,
S: Serializer,
{
serializer.collect_str(value)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<f64, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum StringOrFloat {
String(String),
Float(f64),
}
match StringOrFloat::deserialize(deserializer)? {
StringOrFloat::String(s) => {
if s == "INF" {
Ok(f64::INFINITY)
} else {
s.parse().map_err(de::Error::custom)
}
}
StringOrFloat::Float(i) => Ok(i),
}
}
}
pub(crate) mod string_or_float_opt {
use std::fmt;
use serde::{Serializer, Deserialize, Deserializer};
pub fn serialize<T, S>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
where
T: fmt::Display,
S: Serializer,
{
match value {
Some(v) => crate::model::string_or_float::serialize(v, serializer),
None => serializer.serialize_none(),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<f64>, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum StringOrFloat {
String(String),
Float(f64),
}
Ok(Some(crate::model::string_or_float::deserialize(
deserializer,
)?))
}
}
pub(crate) mod string_or_bool {
use std::fmt;
use serde::{de, Serializer, Deserialize, Deserializer};
pub fn serialize<T, S>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: fmt::Display,
S: Serializer,
{
serializer.collect_str(value)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<bool, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum StringOrFloat {
String(String),
Bool(bool),
}
match StringOrFloat::deserialize(deserializer)? {
StringOrFloat::String(s) => s.parse().map_err(de::Error::custom),
StringOrFloat::Bool(i) => Ok(i),
}
}
}
| 24.050193 | 336 | 0.613068 |
33b093482e33213d50681db0332d731a4bd7b878 | 308 | pub fn load(file: &'static str) -> Vec<u8> {
/*
// Debugging for file paths
let paths = std::fs::read_dir("./").unwrap();
for path in paths {
println!("Name: {}", path.unwrap().path().display())
}
*/
let data = std::fs::read(&file).unwrap();
data
}
| 20.533333 | 64 | 0.49026 |
76229f2ab6b84b21399955967e4d810651347f7c | 9,992 | use crate::app::App;
use crate::common::{ColorLegend, ColorNetwork, Colorer};
use crate::helpers::amenity_type;
use crate::layer::{Layer, LayerOutcome};
use abstutil::Counter;
use ezgui::{
hotkey, Btn, Color, Composite, Drawable, EventCtx, GfxCtx, HorizontalAlignment, Key, Line,
Outcome, Text, TextExt, VerticalAlignment, Widget,
};
use geom::{Distance, Time};
use map_model::LaneType;
use sim::TripMode;
pub struct BikeNetwork {
composite: Composite,
time: Time,
unzoomed: Drawable,
zoomed: Drawable,
}
impl Layer for BikeNetwork {
fn name(&self) -> Option<&'static str> {
Some("bike network")
}
fn event(
&mut self,
ctx: &mut EventCtx,
app: &mut App,
minimap: &Composite,
) -> Option<LayerOutcome> {
if app.primary.sim.time() != self.time {
*self = BikeNetwork::new(ctx, app);
}
self.composite.align_above(ctx, minimap);
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => match x.as_ref() {
"close" => {
return Some(LayerOutcome::Close);
}
_ => unreachable!(),
},
None => {}
}
None
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
self.composite.draw(g);
if g.canvas.cam_zoom < app.opts.min_zoom_for_detail {
g.redraw(&self.unzoomed);
} else {
g.redraw(&self.zoomed);
}
}
fn draw_minimap(&self, g: &mut GfxCtx) {
g.redraw(&self.unzoomed);
}
}
impl BikeNetwork {
pub fn new(ctx: &mut EventCtx, app: &App) -> BikeNetwork {
let mut num_lanes = 0;
let mut total_dist = Distance::ZERO;
let mut on_bike_lanes = Counter::new();
let mut off_bike_lanes = Counter::new();
let mut intersections_on = Counter::new();
let mut intersections_off = Counter::new();
// Make sure all bikes lanes show up no matter what
for l in app.primary.map.all_lanes() {
if l.is_biking() {
on_bike_lanes.add(l.parent, 0);
intersections_on.add(l.src_i, 0);
intersections_on.add(l.src_i, 0);
num_lanes += 1;
total_dist += l.length();
}
}
// Show throughput, broken down by bike lanes or not
for ((r, mode, _), count) in &app.primary.sim.get_analytics().road_thruput.counts {
if *mode == TripMode::Bike {
let (fwd, back) = app.primary.map.get_r(*r).get_lane_types();
if fwd.contains(&LaneType::Biking) || back.contains(&LaneType::Biking) {
on_bike_lanes.add(*r, *count);
} else {
off_bike_lanes.add(*r, *count);
}
}
}
// Use intersection data too, but bin as on bike lanes or not based on connecting roads
for ((i, mode, _), count) in &app.primary.sim.get_analytics().intersection_thruput.counts {
if *mode == TripMode::Bike {
if app
.primary
.map
.get_i(*i)
.roads
.iter()
.any(|r| on_bike_lanes.get(*r) > 0)
{
intersections_on.add(*i, *count);
} else {
intersections_off.add(*i, *count);
}
}
}
let composite = Composite::new(
Widget::col(vec![
Widget::row(vec![
Widget::draw_svg(ctx, "../data/system/assets/tools/layers.svg")
.margin_right(10),
"Bike network".draw_text(ctx),
Btn::plaintext("X")
.build(ctx, "close", hotkey(Key::Escape))
.align_right(),
]),
Text::from_multiline(vec![
Line(format!("{} lanes", num_lanes)),
Line(format!("total distance of {}", total_dist)),
])
.draw(ctx)
.margin_below(10),
Line("Throughput on bike lanes").draw(ctx),
ColorLegend::gradient(ctx, &app.cs.good_to_bad_green, vec!["0%ile", "100%ile"]),
Line("Throughput on unprotected roads").draw(ctx),
ColorLegend::gradient(ctx, &app.cs.good_to_bad_red, vec!["0%ile", "100%ile"]),
])
.padding(5)
.bg(app.cs.panel_bg),
)
.aligned(HorizontalAlignment::Right, VerticalAlignment::Center)
.build(ctx);
let mut colorer = ColorNetwork::new(app);
colorer.road_percentiles(on_bike_lanes, &app.cs.good_to_bad_green);
colorer.road_percentiles(off_bike_lanes, &app.cs.good_to_bad_red);
colorer.intersection_percentiles(intersections_on, &app.cs.good_to_bad_green);
colorer.intersection_percentiles(intersections_off, &app.cs.good_to_bad_red);
let (unzoomed, zoomed) = colorer.build(ctx);
BikeNetwork {
composite,
time: app.primary.sim.time(),
unzoomed,
zoomed,
}
}
}
pub struct Static {
pub colorer: Colorer,
name: &'static str,
}
impl Layer for Static {
fn name(&self) -> Option<&'static str> {
Some(self.name)
}
fn event(
&mut self,
ctx: &mut EventCtx,
_: &mut App,
minimap: &Composite,
) -> Option<LayerOutcome> {
self.colorer.legend.align_above(ctx, minimap);
if self.colorer.event(ctx) {
return Some(LayerOutcome::Close);
}
None
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
self.colorer.draw(g, app);
}
fn draw_minimap(&self, g: &mut GfxCtx) {
g.redraw(&self.colorer.unzoomed);
}
}
impl Static {
pub fn bus_network(ctx: &mut EventCtx, app: &App) -> Static {
// TODO Same color for both?
let mut colorer = Colorer::discrete(
ctx,
"Bus network",
Vec::new(),
vec![
("bus lanes", app.cs.bus_layer),
("bus stops", app.cs.bus_layer),
],
);
for l in app.primary.map.all_lanes() {
if l.is_bus() {
colorer.add_l(l.id, app.cs.bus_layer, &app.primary.map);
}
}
colorer.intersections_from_roads(&app.primary.map);
for bs in app.primary.map.all_bus_stops().keys() {
colorer.add_bs(*bs, app.cs.bus_layer);
}
Static {
colorer: colorer.build(ctx, app),
name: "bus network",
}
}
pub fn edits(ctx: &mut EventCtx, app: &App) -> Static {
let edits = app.primary.map.get_edits();
let mut colorer = Colorer::discrete(
ctx,
format!("Map edits ({})", edits.edits_name),
vec![
format!("{} lane types changed", edits.original_lts.len()),
format!("{} lanes reversed", edits.reversed_lanes.len()),
format!("{} speed limits changed", edits.changed_speed_limits.len()),
format!(
"{} intersections changed",
edits.original_intersections.len()
),
],
vec![("modified lane/intersection", app.cs.edits_layer)],
);
for l in edits.original_lts.keys().chain(&edits.reversed_lanes) {
colorer.add_l(*l, app.cs.edits_layer, &app.primary.map);
}
for i in edits.original_intersections.keys() {
colorer.add_i(*i, app.cs.edits_layer);
}
for r in &edits.changed_speed_limits {
colorer.add_r(*r, app.cs.edits_layer, &app.primary.map);
}
Static {
colorer: colorer.build(ctx, app),
name: "map edits",
}
}
pub fn amenities(ctx: &mut EventCtx, app: &App) -> Static {
let mut colorer = Colorer::discrete(
ctx,
"Amenities",
Vec::new(),
vec![
("groceries", Color::BLACK),
("food", Color::RED),
("bar", Color::BLUE),
("medical", Color::PURPLE),
("church / temple", Color::GREEN),
("education", Color::CYAN),
("bank / post office", Color::YELLOW),
("media", Color::PINK),
("childcare", Color::ORANGE),
("shopping", Color::WHITE),
("other", Color::hex("#96322F")),
],
);
for b in app.primary.map.all_buildings() {
let mut other = false;
for (_, a) in &b.amenities {
if let Some(t) = amenity_type(a) {
colorer.add_b(
b.id,
match t {
"groceries" => Color::BLACK,
"food" => Color::RED,
"bar" => Color::BLUE,
"medical" => Color::PURPLE,
"church / temple" => Color::GREEN,
"education" => Color::CYAN,
"bank / post office" => Color::YELLOW,
"media" => Color::PINK,
"childcare" => Color::ORANGE,
"shopping" => Color::WHITE,
_ => unreachable!(),
},
);
} else {
other = true;
}
}
if other {
colorer.add_b(b.id, Color::hex("#96322F"));
}
}
Static {
colorer: colorer.build(ctx, app),
name: "amenities",
}
}
}
| 33.530201 | 99 | 0.481285 |
3833ee428da0243ed199a9d95363d9aa722a1fda | 14,719 | #[macro_use]
extern crate clap;
extern crate env_logger;
#[macro_use]
extern crate log;
use clap::{App, AppSettings, Arg, ArgMatches};
use rustpython_compiler::compile;
use rustpython_vm::{
exceptions::print_exception,
match_class,
obj::{objint::PyInt, objtype},
pyobject::{ItemProtocol, PyResult},
scope::Scope,
util, InitParameter, PySettings, VirtualMachine,
};
use std::convert::TryInto;
use std::env;
use std::path::PathBuf;
use std::process;
use std::str::FromStr;
mod shell;
fn main() {
#[cfg(feature = "flame-it")]
let main_guard = flame::start_guard("RustPython main");
env_logger::init();
let app = App::new("RustPython");
let matches = parse_arguments(app);
let mut settings = create_settings(&matches);
// We only include the standard library bytecode in WASI when initializing
if cfg!(target_os = "wasi") {
settings.initialization_parameter = InitParameter::InitializeInternal;
}
let vm = VirtualMachine::new(settings);
let res = run_rustpython(&vm, &matches);
#[cfg(feature = "flame-it")]
{
main_guard.end();
if let Err(e) = write_profile(&matches) {
error!("Error writing profile information: {}", e);
}
}
// See if any exception leaked out:
if let Err(err) = res {
if objtype::isinstance(&err, &vm.ctx.exceptions.system_exit) {
let args = err.args();
match args.as_slice().len() {
0 => return,
1 => match_class!(match args.as_slice()[0].clone() {
i @ PyInt => {
use num_traits::cast::ToPrimitive;
process::exit(i.as_bigint().to_i32().unwrap());
}
arg => {
if vm.is_none(&arg) {
return;
}
if let Ok(s) = vm.to_str(&arg) {
println!("{}", s);
}
}
}),
_ => {
if let Ok(r) = vm.to_repr(args.as_object()) {
println!("{}", r);
}
}
}
} else {
print_exception(&vm, &err);
}
process::exit(1);
}
}
fn parse_arguments<'a>(app: App<'a, '_>) -> ArgMatches<'a> {
let app = app
.setting(AppSettings::TrailingVarArg)
.version(crate_version!())
.author(crate_authors!())
.about("Rust implementation of the Python language")
.usage("rustpython [OPTIONS] [-c CMD | -m MODULE | FILE] [PYARGS]...")
.arg(
Arg::with_name("script")
.required(false)
.allow_hyphen_values(true)
.multiple(true)
.value_name("script, args")
.min_values(1),
)
.arg(
Arg::with_name("c")
.short("c")
.takes_value(true)
.allow_hyphen_values(true)
.multiple(true)
.value_name("cmd, args")
.min_values(1)
.help("run the given string as a program"),
)
.arg(
Arg::with_name("m")
.short("m")
.takes_value(true)
.allow_hyphen_values(true)
.multiple(true)
.value_name("module, args")
.min_values(1)
.help("run library module as script"),
)
.arg(
Arg::with_name("optimize")
.short("O")
.multiple(true)
.help("Optimize. Set __debug__ to false. Remove debug statements."),
)
.arg(
Arg::with_name("verbose")
.short("v")
.multiple(true)
.help("Give the verbosity (can be applied multiple times)"),
)
.arg(Arg::with_name("debug").short("d").help("Debug the parser."))
.arg(
Arg::with_name("quiet")
.short("q")
.help("Be quiet at startup."),
)
.arg(
Arg::with_name("inspect")
.short("i")
.help("Inspect interactively after running the script."),
)
.arg(
Arg::with_name("no-user-site")
.short("s")
.help("don't add user site directory to sys.path."),
)
.arg(
Arg::with_name("no-site")
.short("S")
.help("don't imply 'import site' on initialization"),
)
.arg(
Arg::with_name("dont-write-bytecode")
.short("B")
.help("don't write .pyc files on import"),
)
.arg(
Arg::with_name("ignore-environment")
.short("E")
.help("Ignore environment variables PYTHON* such as PYTHONPATH"),
);
#[cfg(feature = "flame-it")]
let app = app
.arg(
Arg::with_name("profile_output")
.long("profile-output")
.takes_value(true)
.help("the file to output the profiling information to"),
)
.arg(
Arg::with_name("profile_format")
.long("profile-format")
.takes_value(true)
.help("the profile format to output the profiling information in"),
);
app.get_matches()
}
/// Create settings by examining command line arguments and environment
/// variables.
fn create_settings(matches: &ArgMatches) -> PySettings {
let ignore_environment = matches.is_present("ignore-environment");
let mut settings: PySettings = Default::default();
settings.ignore_environment = ignore_environment;
// add the current directory to sys.path
settings.path_list.push("".to_owned());
if !ignore_environment {
settings.path_list.append(&mut get_paths("RUSTPYTHONPATH"));
settings.path_list.append(&mut get_paths("PYTHONPATH"));
}
// Now process command line flags:
if matches.is_present("debug") || (!ignore_environment && env::var_os("PYTHONDEBUG").is_some())
{
settings.debug = true;
}
if matches.is_present("inspect")
|| (!ignore_environment && env::var_os("PYTHONINSPECT").is_some())
{
settings.inspect = true;
}
if matches.is_present("optimize") {
settings.optimize = matches.occurrences_of("optimize").try_into().unwrap();
} else if !ignore_environment {
if let Ok(value) = get_env_var_value("PYTHONOPTIMIZE") {
settings.optimize = value;
}
}
if matches.is_present("verbose") {
settings.verbose = matches.occurrences_of("verbose").try_into().unwrap();
} else if !ignore_environment {
if let Ok(value) = get_env_var_value("PYTHONVERBOSE") {
settings.verbose = value;
}
}
settings.no_site = matches.is_present("no-site");
if matches.is_present("no-user-site")
|| (!ignore_environment && env::var_os("PYTHONNOUSERSITE").is_some())
{
settings.no_user_site = true;
}
if matches.is_present("quiet") {
settings.quiet = true;
}
if matches.is_present("dont-write-bytecode")
|| (!ignore_environment && env::var_os("PYTHONDONTWRITEBYTECODE").is_some())
{
settings.dont_write_bytecode = true;
}
let argv = if let Some(script) = matches.values_of("script") {
script.map(ToOwned::to_owned).collect()
} else if let Some(module) = matches.values_of("m") {
std::iter::once("PLACEHOLDER".to_owned())
.chain(module.skip(1).map(ToOwned::to_owned))
.collect()
} else if let Some(cmd) = matches.values_of("c") {
std::iter::once("-c".to_owned())
.chain(cmd.skip(1).map(ToOwned::to_owned))
.collect()
} else {
vec![]
};
settings.argv = argv;
settings
}
/// Get environment variable and turn it into integer.
fn get_env_var_value(name: &str) -> Result<u8, std::env::VarError> {
env::var(name).map(|value| {
if let Ok(value) = u8::from_str(&value) {
value
} else {
1
}
})
}
/// Helper function to retrieve a sequence of paths from an environment variable.
fn get_paths(env_variable_name: &str) -> Vec<String> {
let paths = env::var_os(env_variable_name);
match paths {
Some(paths) => env::split_paths(&paths)
.map(|path| {
path.into_os_string()
.into_string()
.unwrap_or_else(|_| panic!("{} isn't valid unicode", env_variable_name))
})
.collect(),
None => vec![],
}
}
#[cfg(feature = "flame-it")]
fn write_profile(matches: &ArgMatches) -> Result<(), Box<dyn std::error::Error>> {
use std::{fs, io};
enum ProfileFormat {
Html,
Text,
Speedscope,
}
let profile_output = matches.value_of_os("profile_output");
let profile_format = match matches.value_of("profile_format") {
Some("html") => ProfileFormat::Html,
Some("text") => ProfileFormat::Text,
None if profile_output == Some("-".as_ref()) => ProfileFormat::Text,
Some("speedscope") | None => ProfileFormat::Speedscope,
Some(other) => {
error!("Unknown profile format {}", other);
process::exit(1);
}
};
let profile_output = profile_output.unwrap_or_else(|| match profile_format {
ProfileFormat::Html => "flame-graph.html".as_ref(),
ProfileFormat::Text => "flame.txt".as_ref(),
ProfileFormat::Speedscope => "flamescope.json".as_ref(),
});
let profile_output: Box<dyn io::Write> = if profile_output == "-" {
Box::new(io::stdout())
} else {
Box::new(fs::File::create(profile_output)?)
};
let profile_output = io::BufWriter::new(profile_output);
match profile_format {
ProfileFormat::Html => flame::dump_html(profile_output)?,
ProfileFormat::Text => flame::dump_text_to_writer(profile_output)?,
ProfileFormat::Speedscope => flamescope::dump(profile_output)?,
}
Ok(())
}
fn run_rustpython(vm: &VirtualMachine, matches: &ArgMatches) -> PyResult<()> {
if let Some(paths) = option_env!("BUILDTIME_RUSTPYTHONPATH") {
let sys_path = vm.get_attribute(vm.sys_module.clone(), "path")?;
for (i, path) in std::env::split_paths(paths).enumerate() {
vm.call_method(
&sys_path,
"insert",
vec![
vm.ctx.new_int(i),
vm.ctx.new_str(
path.into_os_string()
.into_string()
.expect("Invalid UTF8 in BUILDTIME_RUSTPYTHONPATH"),
),
],
)?;
}
}
let scope = vm.new_scope_with_builtins();
let main_module = vm.new_module("__main__", scope.globals.clone());
vm.get_attribute(vm.sys_module.clone(), "modules")?
.set_item("__main__", main_module, vm)?;
let site_result = vm.import("site", &[], 0);
if site_result.is_err() {
warn!(
"Failed to import site, consider adding the Lib directory to your RUSTPYTHONPATH \
environment variable",
);
}
// Figure out if a -c option was given:
if let Some(command) = matches.value_of("c") {
run_command(&vm, scope, command.to_string())?;
} else if let Some(module) = matches.value_of("m") {
run_module(&vm, module)?;
} else if let Some(filename) = matches.value_of("script") {
run_script(&vm, scope, filename)?
} else {
shell::run_shell(&vm, scope)?;
}
Ok(())
}
fn _run_string(vm: &VirtualMachine, scope: Scope, source: &str, source_path: String) -> PyResult {
let code_obj = vm
.compile(source, compile::Mode::Exec, source_path.clone())
.map_err(|err| vm.new_syntax_error(&err))?;
// trace!("Code object: {:?}", code_obj.borrow());
scope
.globals
.set_item("__file__", vm.new_str(source_path), vm)?;
vm.run_code_obj(code_obj, scope)
}
fn run_command(vm: &VirtualMachine, scope: Scope, source: String) -> PyResult<()> {
debug!("Running command {}", source);
_run_string(vm, scope, &source, "<stdin>".to_string())?;
Ok(())
}
fn run_module(vm: &VirtualMachine, module: &str) -> PyResult<()> {
debug!("Running module {}", module);
let runpy = vm.import("runpy", &[], 0)?;
let run_module_as_main = vm.get_attribute(runpy, "_run_module_as_main")?;
vm.invoke(&run_module_as_main, vec![vm.new_str(module.to_owned())])?;
Ok(())
}
fn run_script(vm: &VirtualMachine, scope: Scope, script_file: &str) -> PyResult<()> {
debug!("Running file {}", script_file);
// Parse an ast from it:
let file_path = PathBuf::from(script_file);
let file_path = if file_path.is_file() {
file_path
} else if file_path.is_dir() {
let main_file_path = file_path.join("__main__.py");
if main_file_path.is_file() {
main_file_path
} else {
error!(
"can't find '__main__' module in '{}'",
file_path.to_str().unwrap()
);
process::exit(1);
}
} else {
error!(
"can't open file '{}': No such file or directory",
file_path.to_str().unwrap()
);
process::exit(1);
};
let dir = file_path.parent().unwrap().to_str().unwrap().to_string();
let sys_path = vm.get_attribute(vm.sys_module.clone(), "path").unwrap();
vm.call_method(&sys_path, "insert", vec![vm.new_int(0), vm.new_str(dir)])?;
match util::read_file(&file_path) {
Ok(source) => {
_run_string(vm, scope, &source, file_path.to_str().unwrap().to_string())?;
}
Err(err) => {
error!(
"Failed reading file '{}': {:?}",
file_path.to_str().unwrap(),
err.kind()
);
process::exit(1);
}
}
Ok(())
}
#[test]
fn test_run_script() {
let vm: VirtualMachine = Default::default();
// test file run
let r = run_script(
&vm,
vm.new_scope_with_builtins(),
"tests/snippets/dir_main/__main__.py",
);
assert!(r.is_ok());
// test module run
let r = run_script(&vm, vm.new_scope_with_builtins(), "tests/snippets/dir_main");
assert!(r.is_ok());
}
| 31.653763 | 99 | 0.539914 |
89b46b64310ee9a69d4d70dd13862da73f8b79be | 6,382 | use std::cell::Cell;
use std::rc::Rc;
use diesel::connection::StatementCache;
use diesel::connection::{Connection, MaybeCached, SimpleConnection, TransactionManager};
use diesel::deserialize::{Queryable, QueryableByName};
use diesel::migration::MigrationConnection;
use diesel::query_builder::bind_collector::RawBytesBindCollector;
use diesel::query_builder::QueryId;
use diesel::query_builder::{AsQuery, QueryFragment};
use diesel::result::*;
use diesel::sql_types::HasSqlType;
use self::cursor::{Cursor, NamedCursor};
use self::stmt::Statement;
use self::transaction::OCITransactionManager;
use super::backend::{HasSqlTypeExt, Oracle};
use diesel::RunQueryDsl;
mod oracle_value;
pub use self::oracle_value::OracleValue;
mod bind_context;
mod cursor;
mod raw;
mod row;
mod stmt;
mod transaction;
pub struct OciConnection {
raw: Rc<raw::RawConnection>,
transaction_manager: OCITransactionManager,
statement_cache: StatementCache<Oracle, Statement>,
has_open_test_transaction: Cell<bool>,
}
impl MigrationConnection for OciConnection {
fn setup(&self) -> QueryResult<usize> {
diesel::sql_query(include_str!("define_create_if_not_exists.sql")).execute(self)?;
diesel::sql_query(include_str!("create_migration_table.sql")).execute(self)
}
}
// This relies on the invariant that RawConnection or Statement are never
// leaked. If a reference to one of those was held on a different thread, this
// would not be thread safe.
// Similar to diesel::sqlite::SqliteConnection;
unsafe impl Send for OciConnection {}
impl SimpleConnection for OciConnection {
fn batch_execute(&self, query: &str) -> QueryResult<()> {
let mut stmt = Statement::prepare(&self.raw, query)?;
stmt.run(self.auto_commit(), &[])?;
stmt.bind_index = 0;
Ok(())
}
}
impl Connection for OciConnection {
type Backend = Oracle;
type TransactionManager = OCITransactionManager;
/// Establishes a new connection to the database at the given URL. The URL
/// should be a valid connection string for a given backend. See the
/// documentation for the specific backend for specifics.
fn establish(database_url: &str) -> ConnectionResult<Self> {
let r = raw::RawConnection::establish(database_url)?;
let ret = OciConnection {
raw: Rc::new(r),
transaction_manager: OCITransactionManager::new(),
statement_cache: StatementCache::new(),
has_open_test_transaction: Cell::new(false),
};
Ok(ret)
}
/// Creates a transaction that will never be committed. This is useful for
/// tests. Panics if called while inside of a transaction.
fn begin_test_transaction(&self) -> QueryResult<()> {
let transaction_manager = self.transaction_manager();
assert_eq!(transaction_manager.get_transaction_depth(), 0);
self.has_open_test_transaction.set(true);
transaction_manager.begin_transaction(self)
}
#[doc(hidden)]
fn execute(&self, query: &str) -> QueryResult<usize> {
let mut stmt = Statement::prepare(&self.raw, query)?;
stmt.run(self.auto_commit(), &[])?;
stmt.bind_index = 0;
Ok(stmt.get_affected_rows()?)
}
#[doc(hidden)]
fn execute_returning_count<T>(&self, source: &T) -> QueryResult<usize>
where
T: QueryFragment<Self::Backend> + QueryId,
{
// TODO: FIXME: this always returns 0 whereas the code looks proper
let mut stmt = self.prepare_query(source)?;
stmt.run(self.auto_commit(), &[])?;
stmt.bind_index = 0;
Ok(stmt.get_affected_rows()?)
}
fn transaction_manager(&self) -> &Self::TransactionManager {
&self.transaction_manager
}
fn query_by_index<T, U>(&self, source: T) -> QueryResult<Vec<U>>
where
T: AsQuery,
T::Query: QueryFragment<Self::Backend> + QueryId,
Self::Backend: HasSqlType<T::SqlType>,
U: Queryable<T::SqlType, Self::Backend>,
{
let mut stmt = self.prepare_query(&source.as_query())?;
let mut metadata = Vec::new();
Oracle::oci_row_metadata(&mut metadata);
let cursor: Cursor<T::SqlType, U> = stmt.run_with_cursor(self.auto_commit(), metadata)?;
cursor.collect()
}
fn query_by_name<T, U>(&self, source: &T) -> QueryResult<Vec<U>>
where
T: QueryFragment<Self::Backend> + QueryId,
U: QueryableByName<Self::Backend>,
{
let mut stmt = self.prepare_query(&source)?;
let mut metadata = Vec::new();
stmt.get_metadata(&mut metadata)?;
let mut cursor: NamedCursor = stmt.run_with_named_cursor(self.auto_commit(), metadata)?;
cursor.collect()
}
}
impl OciConnection {
fn prepare_query<T: QueryFragment<Oracle> + QueryId>(
&self,
source: &T,
) -> QueryResult<MaybeCached<Statement>> {
let mut statement = self.cached_prepared_statement(source)?;
let mut bind_collector = RawBytesBindCollector::<Oracle>::new();
source.collect_binds(&mut bind_collector, &())?;
let metadata = bind_collector.metadata;
let binds = bind_collector.binds;
for (tpe, value) in metadata.into_iter().zip(binds) {
let tpe = tpe.ok_or_else(|| diesel::result::Error::QueryBuilderError(
"Input binds need type information".into(),
))?;
statement.bind(tpe, value)?;
}
Ok(statement)
}
fn cached_prepared_statement<T: QueryFragment<Oracle> + QueryId>(
&self,
source: &T,
) -> QueryResult<MaybeCached<Statement>> {
self.statement_cache
.cached_statement(source, &[], |sql| Statement::prepare(&self.raw, sql))
}
fn auto_commit(&self) -> bool {
self.transaction_manager.get_transaction_depth() == 0
}
}
impl Drop for OciConnection {
fn drop(&mut self) {
if self.has_open_test_transaction.get() {
let tm = self.transaction_manager();
tm.rollback_transaction(&self)
.expect("This return Ok() for all paths anyway");
}
}
}
#[cfg(feature = "r2d2")]
use diesel::r2d2::R2D2Connection;
#[cfg(feature = "r2d2")]
impl R2D2Connection for OciConnection {
fn ping(&self) -> QueryResult<()> {
self.execute("SELECT 1 FROM DUAL").map(|_| ())
}
}
| 33.589474 | 96 | 0.653557 |
6aa806ccbd8bb312c61c3d3f1cc76ca5c948aeb3 | 13,930 | use crate::conversion::Wrap;
use crate::dataframe::PyDataFrame;
use crate::error::PyPolarsEr;
use crate::lazy::{dsl::PyExpr, utils::py_exprs_to_exprs};
use crate::prelude::NullValues;
use crate::utils::str_to_polarstype;
use polars::lazy::frame::{AllowedOptimizations, LazyCsvReader, LazyFrame, LazyGroupBy};
use polars::lazy::prelude::col;
use polars::prelude::{DataFrame, Field, JoinType, Schema};
use pyo3::prelude::*;
#[pyclass]
#[repr(transparent)]
pub struct PyLazyGroupBy {
// option because we cannot get a self by value in pyo3
pub lgb: Option<LazyGroupBy>,
}
#[pymethods]
impl PyLazyGroupBy {
pub fn agg(&mut self, aggs: Vec<PyExpr>) -> PyLazyFrame {
let lgb = self.lgb.take().unwrap();
let aggs = py_exprs_to_exprs(aggs);
lgb.agg(aggs).into()
}
pub fn head(&mut self, n: usize) -> PyLazyFrame {
let lgb = self.lgb.take().unwrap();
lgb.head(Some(n)).into()
}
pub fn tail(&mut self, n: usize) -> PyLazyFrame {
let lgb = self.lgb.take().unwrap();
lgb.tail(Some(n)).into()
}
pub fn apply(&mut self, lambda: PyObject) -> PyLazyFrame {
let lgb = self.lgb.take().unwrap();
let function = move |df: DataFrame| {
let gil = Python::acquire_gil();
let py = gil.python();
// get the pypolars module
let pypolars = PyModule::import(py, "polars").unwrap();
// create a PyDataFrame struct/object for Python
let pydf = PyDataFrame::new(df);
// Wrap this PySeries object in the python side DataFrame wrapper
let python_df_wrapper = pypolars.getattr("wrap_df").unwrap().call1((pydf,)).unwrap();
// call the lambda and get a python side DataFrame wrapper
let result_df_wrapper = match lambda.call1(py, (python_df_wrapper,)) {
Ok(pyobj) => pyobj,
Err(e) => panic!("UDF failed: {}", e.pvalue(py).to_string()),
};
// unpack the wrapper in a PyDataFrame
let py_pydf = result_df_wrapper.getattr(py, "_df").expect(
"Could net get DataFrame attribute '_df'. Make sure that you return a DataFrame object.",
);
// Downcast to Rust
let pydf = py_pydf.extract::<PyDataFrame>(py).unwrap();
// Finally get the actual DataFrame
Ok(pydf.df)
};
lgb.apply(function).into()
}
}
#[pyclass]
#[repr(transparent)]
#[derive(Clone)]
pub struct PyLazyFrame {
// option because we cannot get a self by value in pyo3
pub ldf: LazyFrame,
}
impl From<LazyFrame> for PyLazyFrame {
fn from(ldf: LazyFrame) -> Self {
PyLazyFrame { ldf }
}
}
#[pymethods]
#[allow(clippy::should_implement_trait)]
impl PyLazyFrame {
#[staticmethod]
#[allow(clippy::too_many_arguments)]
pub fn new_from_csv(
path: String,
sep: &str,
has_header: bool,
ignore_errors: bool,
skip_rows: usize,
stop_after_n_rows: Option<usize>,
cache: bool,
overwrite_dtype: Option<Vec<(&str, &PyAny)>>,
low_memory: bool,
comment_char: Option<&str>,
quote_char: Option<&str>,
null_values: Option<Wrap<NullValues>>,
) -> Self {
let null_values = null_values.map(|w| w.0);
let comment_char = comment_char.map(|s| s.as_bytes()[0]);
let quote_char = quote_char.map(|s| s.as_bytes()[0]);
let delimiter = sep.as_bytes()[0];
let overwrite_dtype = overwrite_dtype.map(|overwrite_dtype| {
let fields = overwrite_dtype
.iter()
.map(|(name, dtype)| {
let str_repr = dtype.str().unwrap().to_str().unwrap();
let dtype = str_to_polarstype(str_repr);
Field::new(name, dtype)
})
.collect();
Schema::new(fields)
});
LazyCsvReader::new(path)
.with_delimiter(delimiter)
.has_header(has_header)
.with_ignore_parser_errors(ignore_errors)
.with_skip_rows(skip_rows)
.with_stop_after_n_rows(stop_after_n_rows)
.with_cache(cache)
.with_dtype_overwrite(overwrite_dtype.as_ref())
.low_memory(low_memory)
.with_comment_char(comment_char)
.with_quote_char(quote_char)
.with_null_values(null_values)
.finish()
.into()
}
#[staticmethod]
#[cfg(feature = "parquet")]
pub fn new_from_parquet(path: String, stop_after_n_rows: Option<usize>, cache: bool) -> Self {
LazyFrame::new_from_parquet(path, stop_after_n_rows, cache).into()
}
pub fn describe_plan(&self) -> String {
self.ldf.describe_plan()
}
pub fn describe_optimized_plan(&self) -> PyResult<String> {
let result = self
.ldf
.describe_optimized_plan()
.map_err(PyPolarsEr::from)?;
Ok(result)
}
pub fn to_dot(&self, optimized: bool) -> PyResult<String> {
let result = self.ldf.to_dot(optimized).map_err(PyPolarsEr::from)?;
Ok(result)
}
pub fn optimization_toggle(
&self,
type_coercion: bool,
predicate_pushdown: bool,
projection_pushdown: bool,
simplify_expr: bool,
string_cache: bool,
) -> PyLazyFrame {
let ldf = self.ldf.clone();
let ldf = ldf
.with_type_coercion(type_coercion)
.with_predicate_pushdown(predicate_pushdown)
.with_simplify_expr(simplify_expr)
.with_string_cache(string_cache)
.with_projection_pushdown(projection_pushdown);
ldf.into()
}
pub fn sort(&self, by_column: &str, reverse: bool) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.sort(by_column, reverse).into()
}
pub fn sort_by_exprs(&self, by_column: Vec<PyExpr>, reverse: Vec<bool>) -> PyLazyFrame {
let ldf = self.ldf.clone();
let exprs = py_exprs_to_exprs(by_column);
ldf.sort_by_exprs(exprs, reverse).into()
}
pub fn cache(&self) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.cache().into()
}
pub fn collect(&self, py: Python) -> PyResult<PyDataFrame> {
// if we don't allow threads and we have udfs trying to acquire the gil from different
// threads we deadlock.
let df = py.allow_threads(|| {
let ldf = self.ldf.clone();
ldf.collect().map_err(PyPolarsEr::from)
})?;
Ok(df.into())
}
pub fn fetch(&self, n_rows: usize) -> PyResult<PyDataFrame> {
let ldf = self.ldf.clone();
let gil = Python::acquire_gil();
let py = gil.python();
let df = py.allow_threads(|| ldf.fetch(n_rows).map_err(PyPolarsEr::from))?;
Ok(df.into())
}
pub fn filter(&mut self, predicate: PyExpr) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.filter(predicate.inner).into()
}
pub fn select(&mut self, exprs: Vec<PyExpr>) -> PyLazyFrame {
let ldf = self.ldf.clone();
let exprs = py_exprs_to_exprs(exprs);
ldf.select(exprs).into()
}
pub fn groupby(&mut self, by: Vec<PyExpr>, maintain_order: bool) -> PyLazyGroupBy {
let ldf = self.ldf.clone();
let by = py_exprs_to_exprs(by);
let lazy_gb = if maintain_order {
ldf.stable_groupby(by)
} else {
ldf.groupby(by)
};
PyLazyGroupBy { lgb: Some(lazy_gb) }
}
pub fn join(
&mut self,
other: PyLazyFrame,
left_on: Vec<PyExpr>,
right_on: Vec<PyExpr>,
allow_parallel: bool,
force_parallel: bool,
how: &str,
suffix: String,
) -> PyLazyFrame {
let how = match how {
"left" => JoinType::Left,
"inner" => JoinType::Inner,
"outer" => JoinType::Outer,
"asof" => JoinType::AsOf,
"cross" => JoinType::Cross,
_ => panic!("not supported"),
};
let ldf = self.ldf.clone();
let other = other.ldf;
let left_on = left_on.into_iter().map(|pyexpr| pyexpr.inner).collect();
let right_on = right_on.into_iter().map(|pyexpr| pyexpr.inner).collect();
ldf.join_builder()
.with(other)
.left_on(left_on)
.right_on(right_on)
.allow_parallel(allow_parallel)
.force_parallel(force_parallel)
.how(how)
.suffix(suffix)
.finish()
.into()
}
pub fn with_column(&mut self, expr: PyExpr) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.with_column(expr.inner).into()
}
pub fn with_columns(&mut self, exprs: Vec<PyExpr>) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.with_columns(py_exprs_to_exprs(exprs)).into()
}
pub fn rename(&mut self, existing: Vec<String>, new: Vec<String>) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.rename(existing, new).into()
}
pub fn with_column_renamed(&mut self, existing: &str, new: &str) -> PyLazyFrame {
let ldf = self.ldf.clone();
ldf.with_column_renamed(existing, new).into()
}
pub fn reverse(&self) -> Self {
let ldf = self.ldf.clone();
ldf.reverse().into()
}
pub fn shift(&self, periods: i64) -> Self {
let ldf = self.ldf.clone();
ldf.shift(periods).into()
}
pub fn shift_and_fill(&self, periods: i64, fill_value: PyExpr) -> Self {
let ldf = self.ldf.clone();
ldf.shift_and_fill(periods, fill_value.inner).into()
}
pub fn fill_null(&self, fill_value: PyExpr) -> Self {
let ldf = self.ldf.clone();
ldf.fill_null(fill_value.inner).into()
}
pub fn fill_nan(&self, fill_value: PyExpr) -> Self {
let ldf = self.ldf.clone();
ldf.fill_nan(fill_value.inner).into()
}
pub fn min(&self) -> Self {
let ldf = self.ldf.clone();
ldf.min().into()
}
pub fn max(&self) -> Self {
let ldf = self.ldf.clone();
ldf.max().into()
}
pub fn sum(&self) -> Self {
let ldf = self.ldf.clone();
ldf.sum().into()
}
pub fn mean(&self) -> Self {
let ldf = self.ldf.clone();
ldf.mean().into()
}
pub fn std(&self) -> Self {
let ldf = self.ldf.clone();
ldf.std().into()
}
pub fn var(&self) -> Self {
let ldf = self.ldf.clone();
ldf.var().into()
}
pub fn median(&self) -> Self {
let ldf = self.ldf.clone();
ldf.median().into()
}
pub fn quantile(&self, quantile: f64) -> Self {
let ldf = self.ldf.clone();
ldf.quantile(quantile).into()
}
pub fn explode(&self, column: Vec<PyExpr>) -> Self {
let ldf = self.ldf.clone();
let column = py_exprs_to_exprs(column);
ldf.explode(column).into()
}
pub fn drop_duplicates(&self, maintain_order: bool, subset: Option<Vec<String>>) -> Self {
let ldf = self.ldf.clone();
ldf.drop_duplicates(maintain_order, subset).into()
}
pub fn drop_nulls(&self, subset: Option<Vec<String>>) -> Self {
let ldf = self.ldf.clone();
ldf.drop_nulls(subset.map(|v| v.into_iter().map(|s| col(&s)).collect()))
.into()
}
pub fn slice(&self, offset: i64, len: usize) -> Self {
let ldf = self.ldf.clone();
ldf.slice(offset, len).into()
}
pub fn tail(&self, n: usize) -> Self {
let ldf = self.ldf.clone();
ldf.tail(n).into()
}
pub fn melt(&self, id_vars: Vec<String>, value_vars: Vec<String>) -> Self {
let ldf = self.ldf.clone();
ldf.melt(id_vars, value_vars).into()
}
pub fn map(&self, lambda: PyObject, predicate_pd: bool, projection_pd: bool) -> Self {
let opt = AllowedOptimizations {
predicate_pushdown: predicate_pd,
projection_pushdown: projection_pd,
..Default::default()
};
let function = move |s: DataFrame| {
let gil = Python::acquire_gil();
let py = gil.python();
// get the pypolars module
let pypolars = PyModule::import(py, "polars").unwrap();
// create a PyDataFrame struct/object for Python
let pydf = PyDataFrame::new(s);
// Wrap this PyDataFrame object in the python side DataFrame wrapper
let python_df_wrapper = pypolars.getattr("wrap_df").unwrap().call1((pydf,)).unwrap();
// call the lambda and get a python side Series wrapper
let result_df_wrapper = match lambda.call1(py, (python_df_wrapper,)) {
Ok(pyobj) => pyobj,
Err(e) => panic!("UDF failed: {}", e.pvalue(py).to_string()),
};
// unpack the wrapper in a PyDataFrame
let py_pydf = result_df_wrapper.getattr(py, "_df").expect(
"Could net get DataFrame attribute '_s'. Make sure that you return a DataFrame object.",
);
// Downcast to Rust
let pydf = py_pydf.extract::<PyDataFrame>(py).unwrap();
// Finally get the actual Series
Ok(pydf.df)
};
let ldf = self.ldf.clone();
ldf.map(function, Some(opt), None).into()
}
pub fn drop_columns(&self, cols: Vec<String>) -> Self {
let ldf = self.ldf.clone();
ldf.drop_columns(cols).into()
}
pub fn clone(&self) -> PyLazyFrame {
self.ldf.clone().into()
}
pub fn columns(&self) -> Vec<String> {
self.ldf
.schema()
.fields()
.iter()
.map(|fld| fld.name().to_string())
.collect()
}
}
| 31.803653 | 105 | 0.563532 |
564a67d2c8ef0bec81b7d9e53e8124cbd5b17607 | 1,522 |
#![allow(unused_variables, unused_assignments, similar_names, blacklisted_name)]
#![warn(useless_let_if_seq)]
fn f() -> bool { true }
fn g(x: i32) -> i32 { x + 1 }
fn issue985() -> i32 {
let mut x = 42;
if f() {
x = g(x);
}
x
}
fn issue985_alt() -> i32 {
let mut x = 42;
if f() {
f();
} else {
x = g(x);
}
x
}
fn issue975() -> String {
let mut udn = "dummy".to_string();
if udn.starts_with("uuid:") {
udn = String::from(&udn[5..]);
}
udn
}
fn early_return() -> u8 {
// FIXME: we could extend the lint to include such cases:
let foo;
if f() {
return 42;
} else {
foo = 0;
}
foo
}
fn main() {
early_return();
issue975();
issue985();
issue985_alt();
let mut foo = 0;
if f() {
foo = 42;
}
let mut bar = 0;
if f() {
f();
bar = 42;
}
else {
f();
}
let quz;
if f() {
quz = 42;
} else {
quz = 0;
}
// `toto` is used several times
let mut toto;
if f() {
toto = 42;
} else {
for i in &[1, 2] {
toto = *i;
}
toto = 2;
}
// found in libcore, the inner if is not a statement but the block's expr
let mut ch = b'x';
if f() {
ch = b'*';
if f() {
ch = b'?';
}
}
// baz needs to be mut
let mut baz = 0;
if f() {
baz = 42;
}
baz = 1337;
}
| 14.224299 | 80 | 0.417871 |
d5b1415ae6cc6bbdf89c28fb72d39674be3eb85b | 82,403 | use material::Material;
use triangle::NormalTriangle;
use hitable::{
Hitable,
HitRecord,
HitableList,
};
use ray::Ray3;
use aabb::AABB;
use cgmath::{
Point3,
Vector3,
EuclideanSpace,
Quaternion,
Rotation,
};
use std::time::Instant;
pub struct Teapot {
hitable_list: HitableList,
}
impl Teapot {
pub fn new(centre: Point3<f32>, scale: f32, rotation: Quaternion<f32>, material: Material) -> Self {
Teapot {
hitable_list: (0..INDICES.len() / 3).fold(HitableList::new(), |hitable_list, i| {
hitable_list.with_hitable(NormalTriangle::new(
[
rotation.rotate_point(VERTICES[INDICES[i * 3 + 0]] * scale) + centre.to_vec(),
rotation.rotate_point(VERTICES[INDICES[i * 3 + 1]] * scale) + centre.to_vec(),
rotation.rotate_point(VERTICES[INDICES[i * 3 + 2]] * scale) + centre.to_vec(),
],
[
rotation.rotate_vector(NORMALS[INDICES[i * 3 + 0]]),
rotation.rotate_vector(NORMALS[INDICES[i * 3 + 1]]),
rotation.rotate_vector(NORMALS[INDICES[i * 3 + 2]]),
],
material.clone(),
))
})
}
}
}
impl Hitable for Teapot {
fn hit(&self, r: &Ray3<f32>, t_min: f32, t_max: f32) -> Option<HitRecord> {
self.hitable_list.hit(r, t_min, t_max)
}
fn bounding_box(&self, t0: Instant, t1: Instant) -> Option<AABB> {
self.hitable_list.bounding_box(t0, t1)
}
}
pub const VERTICES: [Point3<f32>; 530] = [
Point3 { x: 0.4537252, y: 0.26190555, z: -0.01237479 },
Point3 { x: 0.44752464, y: 0.28534314, z: -0.01237479 },
Point3 { x: 0.45471805, y: 0.29315528, z: -0.01237479 },
Point3 { x: 0.4693506, y: 0.28534314, z: -0.01237479 },
Point3 { x: 0.48547187, y: 0.26190555, z: -0.01237479 },
Point3 { x: 0.41928142, y: 0.26190555, z: 0.16206928 },
Point3 { x: 0.41356108, y: 0.28534314, z: 0.15963574 },
Point3 { x: 0.42019612, y: 0.29315528, z: 0.16245906 },
Point3 { x: 0.43369508, y: 0.28534314, z: 0.16820285 },
Point3 { x: 0.44856662, y: 0.26190555, z: 0.17452963 },
Point3 { x: 0.32483658, y: 0.26190555, z: 0.30318037 },
Point3 { x: 0.3204341, y: 0.28534314, z: 0.29877898 },
Point3 { x: 0.3255413, y: 0.29315528, z: 0.30388507 },
Point3 { x: 0.33592993, y: 0.28534314, z: 0.31427482 },
Point3 { x: 0.3473762, y: 0.26190555, z: 0.32571998 },
Point3 { x: 0.1837255, y: 0.26190555, z: 0.3976252 },
Point3 { x: 0.18129197, y: 0.28534314, z: 0.39190486 },
Point3 { x: 0.18411528, y: 0.29315528, z: 0.39853987 },
Point3 { x: 0.18985796, y: 0.28534314, z: 0.41203886 },
Point3 { x: 0.19618584, y: 0.26190555, z: 0.4269104 },
Point3 { x: 0.009281037, y: 0.26190555, z: 0.43207008 },
Point3 { x: 0.009281037, y: 0.28534314, z: 0.42586952 },
Point3 { x: 0.009281037, y: 0.29315528, z: 0.43306184 },
Point3 { x: 0.009281037, y: 0.28534314, z: 0.4476944 },
Point3 { x: 0.009281037, y: 0.26190555, z: 0.46381566 },
Point3 { x: -0.1772167, y: 0.26190555, z: 0.3976252 },
Point3 { x: -0.16781531, y: 0.28534314, z: 0.39190486 },
Point3 { x: -0.16705923, y: 0.29315528, z: 0.39853987 },
Point3 { x: -0.17148407, y: 0.28534314, z: 0.41203886 },
Point3 { x: -0.17762321, y: 0.26190555, z: 0.4269104 },
Point3 { x: -0.3169887, y: 0.26190555, z: 0.30318037 },
Point3 { x: -0.30639234, y: 0.28534314, z: 0.29877898 },
Point3 { x: -0.30831772, y: 0.29315528, z: 0.30388507 },
Point3 { x: -0.31753594, y: 0.28534314, z: 0.31427482 },
Point3 { x: -0.3288147, y: 0.26190555, z: 0.32571998 },
Point3 { x: -0.4047371, y: 0.26190555, z: 0.16206928 },
Point3 { x: -0.39669377, y: 0.28534314, z: 0.15963574 },
Point3 { x: -0.40213603, y: 0.29315528, z: 0.16245906 },
Point3 { x: -0.4151961, y: 0.28534314, z: 0.16820285 },
Point3 { x: -0.4300051, y: 0.26190555, z: 0.17452963 },
Point3 { x: -0.43516368, y: 0.26190555, z: -0.01237479 },
Point3 { x: -0.42896312, y: 0.28534314, z: -0.01237479 },
Point3 { x: -0.4361554, y: 0.29315528, z: -0.01237479 },
Point3 { x: -0.450788, y: 0.28534314, z: -0.01237479 },
Point3 { x: -0.46690923, y: 0.26190555, z: -0.01237479 },
Point3 { x: -0.4007188, y: 0.26190555, z: -0.18681909 },
Point3 { x: -0.39499956, y: 0.28534314, z: -0.18438554 },
Point3 { x: -0.40163454, y: 0.29315528, z: -0.18720885 },
Point3 { x: -0.41513357, y: 0.28534314, z: -0.19295153 },
Point3 { x: -0.4300051, y: 0.26190555, z: -0.19927943 },
Point3 { x: -0.30627394, y: 0.26190555, z: -0.32793015 },
Point3 { x: -0.30187255, y: 0.28534314, z: -0.32352766 },
Point3 { x: -0.30697867, y: 0.29315528, z: -0.3286349 },
Point3 { x: -0.31736842, y: 0.28534314, z: -0.3390235 },
Point3 { x: -0.3288147, y: 0.26190555, z: -0.3504698 },
Point3 { x: -0.16516288, y: 0.26190555, z: -0.422375 },
Point3 { x: -0.16272932, y: 0.28534314, z: -0.41665468 },
Point3 { x: -0.16555263, y: 0.29315528, z: -0.4232897 },
Point3 { x: -0.17129643, y: 0.28534314, z: -0.43678865 },
Point3 { x: -0.17762321, y: 0.26190555, z: -0.45166022 },
Point3 { x: 0.009281037, y: 0.26190555, z: -0.4568188 },
Point3 { x: 0.009281037, y: 0.28534314, z: -0.4506182 },
Point3 { x: 0.009281037, y: 0.29315528, z: -0.45781165 },
Point3 { x: 0.009281037, y: 0.28534314, z: -0.47244418 },
Point3 { x: 0.009281037, y: 0.26190555, z: -0.48856547 },
Point3 { x: 0.1837255, y: 0.26190555, z: -0.422375 },
Point3 { x: 0.18129197, y: 0.28534314, z: -0.41665468 },
Point3 { x: 0.18411528, y: 0.29315528, z: -0.4232897 },
Point3 { x: 0.18985796, y: 0.28534314, z: -0.43678865 },
Point3 { x: 0.19618584, y: 0.26190555, z: -0.45166022 },
Point3 { x: 0.32483658, y: 0.26190555, z: -0.32793015 },
Point3 { x: 0.3204341, y: 0.28534314, z: -0.32352766 },
Point3 { x: 0.3255413, y: 0.29315528, z: -0.3286349 },
Point3 { x: 0.33592993, y: 0.28534314, z: -0.3390235 },
Point3 { x: 0.3473762, y: 0.26190555, z: -0.3504698 },
Point3 { x: 0.41928142, y: 0.26190555, z: -0.18681909 },
Point3 { x: 0.41356108, y: 0.28534314, z: -0.18438554 },
Point3 { x: 0.42019612, y: 0.29315528, z: -0.18720885 },
Point3 { x: 0.43369508, y: 0.28534314, z: -0.19295153 },
Point3 { x: 0.44856662, y: 0.26190555, z: -0.19927943 },
Point3 { x: 0.54375523, y: 0.13727762, z: -0.01237479 },
Point3 { x: 0.59459835, y: 0.014881478, z: -0.01237479 },
Point3 { x: 0.63056105, y: -0.10305001, z: -0.01237479 },
Point3 { x: 0.6442019, y: -0.21428554, z: -0.01237479 },
Point3 { x: 0.5023336, y: 0.13727762, z: 0.19740653 },
Point3 { x: 0.5492366, y: 0.014881478, z: 0.21736184 },
Point3 { x: 0.5824117, y: -0.10305001, z: 0.23147729 },
Point3 { x: 0.5949949, y: -0.21428554, z: 0.23683132 },
Point3 { x: 0.38875765, y: 0.13727762, z: 0.36710146 },
Point3 { x: 0.42485654, y: 0.014881478, z: 0.40320036 },
Point3 { x: 0.45038927, y: -0.10305001, z: 0.42873418 },
Point3 { x: 0.4600743, y: -0.21428554, z: 0.4384192 },
Point3 { x: 0.21906163, y: 0.13727762, z: 0.4806774 },
Point3 { x: 0.23901804, y: 0.014881478, z: 0.5275804 },
Point3 { x: 0.2531335, y: -0.10305001, z: 0.56075543 },
Point3 { x: 0.25848755, y: -0.21428554, z: 0.57333976 },
Point3 { x: 0.009281037, y: 0.13727762, z: 0.522099 },
Point3 { x: 0.009281037, y: 0.014881478, z: 0.5729422 },
Point3 { x: 0.009281037, y: -0.10305001, z: 0.60890484 },
Point3 { x: 0.009281037, y: -0.21428554, z: 0.62254566 },
Point3 { x: -0.20050012, y: 0.13727762, z: 0.4806774 },
Point3 { x: -0.22045654, y: 0.014881478, z: 0.5275804 },
Point3 { x: -0.23457088, y: -0.10305001, z: 0.56075543 },
Point3 { x: -0.23992491, y: -0.21428554, z: 0.57333976 },
Point3 { x: -0.37019613, y: 0.13727762, z: 0.36710146 },
Point3 { x: -0.40629396, y: 0.014881478, z: 0.40320036 },
Point3 { x: -0.43182775, y: -0.10305001, z: 0.42873418 },
Point3 { x: -0.4415128, y: -0.21428554, z: 0.4384192 },
Point3 { x: -0.48377094, y: 0.13727762, z: 0.19740653 },
Point3 { x: -0.530674, y: 0.014881478, z: 0.21736184 },
Point3 { x: -0.56384903, y: -0.10305001, z: 0.23147729 },
Point3 { x: -0.57643336, y: -0.21428554, z: 0.23683132 },
Point3 { x: -0.5251926, y: 0.13727762, z: -0.01237479 },
Point3 { x: -0.5760369, y: 0.014881478, z: -0.01237479 },
Point3 { x: -0.61199844, y: -0.10305001, z: -0.01237479 },
Point3 { x: -0.6256392, y: -0.21428554, z: -0.01237479 },
Point3 { x: -0.48377094, y: 0.13727762, z: -0.22215632 },
Point3 { x: -0.530674, y: 0.014881478, z: -0.24211162 },
Point3 { x: -0.56384903, y: -0.10305001, z: -0.2562271 },
Point3 { x: -0.57643336, y: -0.21428554, z: -0.26158112 },
Point3 { x: -0.37019613, y: 0.13727762, z: -0.39185125 },
Point3 { x: -0.40629396, y: 0.014881478, z: -0.42795014 },
Point3 { x: -0.43182775, y: -0.10305001, z: -0.45348287 },
Point3 { x: -0.4415128, y: -0.21428554, z: -0.46316788 },
Point3 { x: -0.20050012, y: 0.13727762, z: -0.5054272 },
Point3 { x: -0.22045654, y: 0.014881478, z: -0.5523302 },
Point3 { x: -0.23457088, y: -0.10305001, z: -0.58550525 },
Point3 { x: -0.23992491, y: -0.21428554, z: -0.59808844 },
Point3 { x: 0.009281037, y: 0.13727762, z: -0.54684883 },
Point3 { x: 0.009281037, y: 0.014881478, z: -0.59769195 },
Point3 { x: 0.009281037, y: -0.10305001, z: -0.63365465 },
Point3 { x: 0.009281037, y: -0.21428554, z: -0.6472955 },
Point3 { x: 0.21906163, y: 0.13727762, z: -0.5054272 },
Point3 { x: 0.23901804, y: 0.014881478, z: -0.5523302 },
Point3 { x: 0.2531335, y: -0.10305001, z: -0.58550525 },
Point3 { x: 0.25848755, y: -0.21428554, z: -0.59808844 },
Point3 { x: 0.38875765, y: 0.13727762, z: -0.39185125 },
Point3 { x: 0.42485654, y: 0.014881478, z: -0.42795014 },
Point3 { x: 0.45038927, y: -0.10305001, z: -0.45348287 },
Point3 { x: 0.4600743, y: -0.21428554, z: -0.46316788 },
Point3 { x: 0.5023336, y: 0.13727762, z: -0.22215632 },
Point3 { x: 0.5492366, y: 0.014881478, z: -0.24211162 },
Point3 { x: 0.5824117, y: -0.10305001, z: -0.2562271 },
Point3 { x: 0.5949949, y: -0.21428554, z: -0.26158112 },
Point3 { x: 0.6193996, y: -0.3084076, z: -0.01237479 },
Point3 { x: 0.5648363, y: -0.3779753, z: -0.01237479 },
Point3 { x: 0.51027304, y: -0.4252223, z: -0.01237479 },
Point3 { x: 0.48547187, y: -0.45238, z: -0.01237479 },
Point3 { x: 0.5721157, y: -0.3084076, z: 0.22709712 },
Point3 { x: 0.5217807, y: -0.3779753, z: 0.20568104 },
Point3 { x: 0.47144574, y: -0.4252223, z: 0.18426493 },
Point3 { x: 0.44856662, y: -0.45238, z: 0.17452963 },
Point3 { x: 0.44246545, y: -0.3084076, z: 0.4208092 },
Point3 { x: 0.40372527, y: -0.3779753, z: 0.38207018 },
Point3 { x: 0.36498508, y: -0.4252223, z: 0.34332997 },
Point3 { x: 0.3473762, y: -0.45238, z: 0.32571998 },
Point3 { x: 0.24875224, y: -0.3084076, z: 0.5504595 },
Point3 { x: 0.22733612, y: -0.3779753, z: 0.50012565 },
Point3 { x: 0.20592004, y: -0.4252223, z: 0.44979066 },
Point3 { x: 0.19618584, y: -0.45238, z: 0.4269104 },
Point3 { x: 0.009281037, y: -0.3084076, z: 0.59774446 },
Point3 { x: 0.009281037, y: -0.3779753, z: 0.54318124 },
Point3 { x: 0.009281037, y: -0.4252223, z: 0.48861685 },
Point3 { x: 0.009281037, y: -0.45238, z: 0.46381566 },
Point3 { x: -0.23019071, y: -0.3084076, z: 0.5504595 },
Point3 { x: -0.20877463, y: -0.3779753, z: 0.50012565 },
Point3 { x: -0.18735851, y: -0.4252223, z: 0.44979066 },
Point3 { x: -0.17762321, y: -0.45238, z: 0.4269104 },
Point3 { x: -0.42390394, y: -0.3084076, z: 0.4208092 },
Point3 { x: -0.38516372, y: -0.3779753, z: 0.38207018 },
Point3 { x: -0.34642357, y: -0.4252223, z: 0.34332997 },
Point3 { x: -0.3288147, y: -0.45238, z: 0.32571998 },
Point3 { x: -0.55355424, y: -0.3084076, z: 0.22709712 },
Point3 { x: -0.50321925, y: -0.3779753, z: 0.20568104 },
Point3 { x: -0.45288423, y: -0.4252223, z: 0.18426493 },
Point3 { x: -0.4300051, y: -0.45238, z: 0.17452963 },
Point3 { x: -0.60083807, y: -0.3084076, z: -0.01237479 },
Point3 { x: -0.54627484, y: -0.3779753, z: -0.01237479 },
Point3 { x: -0.49171156, y: -0.4252223, z: -0.01237479 },
Point3 { x: -0.46690923, y: -0.45238, z: -0.01237479 },
Point3 { x: -0.55355424, y: -0.3084076, z: -0.25184694 },
Point3 { x: -0.50321925, y: -0.3779753, z: -0.23042972 },
Point3 { x: -0.45288423, y: -0.4252223, z: -0.20901361 },
Point3 { x: -0.4300051, y: -0.45238, z: -0.19927943 },
Point3 { x: -0.42390394, y: -0.3084076, z: -0.44555902 },
Point3 { x: -0.38516372, y: -0.3779753, z: -0.40681887 },
Point3 { x: -0.34642357, y: -0.4252223, z: -0.36807868 },
Point3 { x: -0.3288147, y: -0.45238, z: -0.3504698 },
Point3 { x: -0.23019071, y: -0.3084076, z: -0.5752093 },
Point3 { x: -0.20877463, y: -0.3779753, z: -0.5248743 },
Point3 { x: -0.18735851, y: -0.4252223, z: -0.47453934 },
Point3 { x: -0.17762321, y: -0.45238, z: -0.45166022 },
Point3 { x: 0.009281037, y: -0.3084076, z: -0.6224943 },
Point3 { x: 0.009281037, y: -0.3779753, z: -0.5679299 },
Point3 { x: 0.009281037, y: -0.4252223, z: -0.51336664 },
Point3 { x: 0.009281037, y: -0.45238, z: -0.48856547 },
Point3 { x: 0.24875224, y: -0.3084076, z: -0.5752093 },
Point3 { x: 0.22733612, y: -0.3779753, z: -0.5248743 },
Point3 { x: 0.20592004, y: -0.4252223, z: -0.47453934 },
Point3 { x: 0.19618584, y: -0.45238, z: -0.45166022 },
Point3 { x: 0.44246545, y: -0.3084076, z: -0.44555902 },
Point3 { x: 0.40372527, y: -0.3779753, z: -0.40681887 },
Point3 { x: 0.36498508, y: -0.4252223, z: -0.36807868 },
Point3 { x: 0.3473762, y: -0.45238, z: -0.3504698 },
Point3 { x: 0.5721157, y: -0.3084076, z: -0.25184694 },
Point3 { x: 0.5217807, y: -0.3779753, z: -0.23042972 },
Point3 { x: 0.47144574, y: -0.4252223, z: -0.20901361 },
Point3 { x: 0.44856662, y: -0.45238, z: -0.19927943 },
Point3 { x: 0.4746823, y: -0.469866, z: -0.01237479 },
Point3 { x: 0.41701874, y: -0.4851184, z: -0.01237479 },
Point3 { x: 0.274534, y: -0.49590686, z: -0.01237479 },
Point3 { x: 0.009281037, y: -0.5, z: -0.01237479 },
Point3 { x: 0.43861467, y: -0.469866, z: 0.17029576 },
Point3 { x: 0.38541952, y: -0.4851184, z: 0.14766233 },
Point3 { x: 0.2539767, y: -0.49590686, z: 0.091736995 },
Point3 { x: 0.33971596, y: -0.469866, z: 0.31806085 },
Point3 { x: 0.2987745, y: -0.4851184, z: 0.2771194 },
Point3 { x: 0.1976109, y: -0.49590686, z: 0.17595468 },
Point3 { x: 0.19195087, y: -0.469866, z: 0.41695842 },
Point3 { x: 0.16931856, y: -0.4851184, z: 0.36376327 },
Point3 { x: 0.11339277, y: -0.49590686, z: 0.2323216 },
Point3 { x: 0.009281037, y: -0.469866, z: 0.45302716 },
Point3 { x: 0.009281037, y: -0.4851184, z: 0.39536366 },
Point3 { x: 0.009281037, y: -0.49590686, z: 0.25287777 },
Point3 { x: -0.17338936, y: -0.469866, z: 0.41695842 },
Point3 { x: -0.15075593, y: -0.4851184, z: 0.36376327 },
Point3 { x: -0.09483069, y: -0.49590686, z: 0.2323216 },
Point3 { x: -0.32115445, y: -0.469866, z: 0.31806085 },
Point3 { x: -0.280213, y: -0.4851184, z: 0.2771194 },
Point3 { x: -0.17904827, y: -0.49590686, z: 0.17595468 },
Point3 { x: -0.42005202, y: -0.469866, z: 0.17029576 },
Point3 { x: -0.36685687, y: -0.4851184, z: 0.14766233 },
Point3 { x: -0.23541519, y: -0.49590686, z: 0.091736995 },
Point3 { x: -0.45612076, y: -0.469866, z: -0.01237479 },
Point3 { x: -0.3984572, y: -0.4851184, z: -0.01237479 },
Point3 { x: -0.25597247, y: -0.49590686, z: -0.01237479 },
Point3 { x: -0.42005202, y: -0.469866, z: -0.19504446 },
Point3 { x: -0.36685687, y: -0.4851184, z: -0.17241214 },
Point3 { x: -0.23541519, y: -0.49590686, z: -0.116486356 },
Point3 { x: -0.32115445, y: -0.469866, z: -0.34280953 },
Point3 { x: -0.280213, y: -0.4851184, z: -0.3018692 },
Point3 { x: -0.17904827, y: -0.49590686, z: -0.20070449 },
Point3 { x: -0.17338936, y: -0.469866, z: -0.4417082 },
Point3 { x: -0.15075593, y: -0.4851184, z: -0.38851306 },
Point3 { x: -0.09483069, y: -0.49590686, z: -0.2570703 },
Point3 { x: 0.009281037, y: -0.469866, z: -0.477777 },
Point3 { x: 0.009281037, y: -0.4851184, z: -0.42011234 },
Point3 { x: 0.009281037, y: -0.49590686, z: -0.2776276 },
Point3 { x: 0.19195087, y: -0.469866, z: -0.4417082 },
Point3 { x: 0.16931856, y: -0.4851184, z: -0.38851306 },
Point3 { x: 0.11339277, y: -0.49590686, z: -0.2570703 },
Point3 { x: 0.33971596, y: -0.469866, z: -0.34280953 },
Point3 { x: 0.2987745, y: -0.4851184, z: -0.3018692 },
Point3 { x: 0.1976109, y: -0.49590686, z: -0.20070449 },
Point3 { x: 0.43861467, y: -0.469866, z: -0.19504446 },
Point3 { x: 0.38541952, y: -0.4851184, z: -0.17241214 },
Point3 { x: 0.2539767, y: -0.49590686, z: -0.116486356 },
Point3 { x: -0.49865592, y: 0.14285722, z: -0.01237479 },
Point3 { x: -0.6469693, y: 0.14174153, z: -0.01237479 },
Point3 { x: -0.75659174, y: 0.13392939, z: -0.01237479 },
Point3 { x: -0.8245479, y: 0.11272324, z: -0.01237479 },
Point3 { x: -0.84786147, y: 0.07142891, z: -0.01237479 },
Point3 { x: -0.49369502, y: 0.15401867, z: 0.041196737 },
Point3 { x: -0.6513864, y: 0.15272768, z: 0.041196737 },
Point3 { x: -0.7671323, y: 0.14369485, z: 0.041196737 },
Point3 { x: -0.8384221, y: 0.119176224, z: 0.041196737 },
Point3 { x: -0.8627431, y: 0.07142891, z: 0.041196737 },
Point3 { x: -0.48278257, y: 0.17857194, z: 0.05905388 },
Point3 { x: -0.66110605, y: 0.17689784, z: 0.05905388 },
Point3 { x: -0.7903219, y: 0.16517906, z: 0.05905388 },
Point3 { x: -0.86894363, y: 0.13337098, z: 0.05905388 },
Point3 { x: -0.89548033, y: 0.07142891, z: 0.05905388 },
Point3 { x: -0.47187015, y: 0.20312521, z: 0.041196737 },
Point3 { x: -0.6708246, y: 0.20106801, z: 0.041196737 },
Point3 { x: -0.81351155, y: 0.18666331, z: 0.041196737 },
Point3 { x: -0.8994641, y: 0.14756574, z: 0.041196737 },
Point3 { x: -0.9282188, y: 0.07142891, z: 0.041196737 },
Point3 { x: -0.46690923, y: 0.21428666, z: -0.01237479 },
Point3 { x: -0.6752427, y: 0.21205416, z: -0.01237479 },
Point3 { x: -0.824052, y: 0.19642875, z: -0.01237479 },
Point3 { x: -0.9133383, y: 0.15401867, z: -0.01237479 },
Point3 { x: -0.94310033, y: 0.07142891, z: -0.01237479 },
Point3 { x: -0.47187015, y: 0.20312521, z: -0.06594621 },
Point3 { x: -0.6708246, y: 0.20106801, z: -0.06594621 },
Point3 { x: -0.81351155, y: 0.18666331, z: -0.06594621 },
Point3 { x: -0.8994641, y: 0.14756574, z: -0.06594621 },
Point3 { x: -0.9282188, y: 0.07142891, z: -0.06594621 },
Point3 { x: -0.48278257, y: 0.17857194, z: -0.08380335 },
Point3 { x: -0.66110605, y: 0.17689784, z: -0.08380335 },
Point3 { x: -0.7903219, y: 0.16517906, z: -0.08380335 },
Point3 { x: -0.86894363, y: 0.13337098, z: -0.08380335 },
Point3 { x: -0.89548033, y: 0.07142891, z: -0.08380335 },
Point3 { x: -0.49369502, y: 0.15401867, z: -0.06594621 },
Point3 { x: -0.6513864, y: 0.15272768, z: -0.06594621 },
Point3 { x: -0.7671323, y: 0.14369485, z: -0.06594621 },
Point3 { x: -0.8384221, y: 0.119176224, z: -0.06594621 },
Point3 { x: -0.8627431, y: 0.07142891, z: -0.06594621 },
Point3 { x: -0.8354614, y: 0.0066970144, z: -0.01237479 },
Point3 { x: -0.79627454, y: -0.071428105, z: -0.01237479 },
Point3 { x: -0.72732556, y: -0.14955299, z: -0.01237479 },
Point3 { x: -0.6256392, y: -0.21428554, z: -0.01237479 },
Point3 { x: -0.84898615, y: 0.00044805816, z: 0.041196737 },
Point3 { x: -0.8058847, y: -0.08096113, z: 0.041196737 },
Point3 { x: -0.73069715, y: -0.16132416, z: 0.041196737 },
Point3 { x: -0.62067944, y: -0.22916605, z: 0.041196737 },
Point3 { x: -0.8787393, y: -0.013299279, z: 0.05905388 },
Point3 { x: -0.82702833, y: -0.10193397, z: 0.05905388 },
Point3 { x: -0.7381151, y: -0.18722056, z: 0.05905388 },
Point3 { x: -0.609767, y: -0.26190442, z: 0.05905388 },
Point3 { x: -0.9084936, y: -0.027046744, z: 0.041196737 },
Point3 { x: -0.8481719, y: -0.12290682, z: 0.041196737 },
Point3 { x: -0.74553186, y: -0.21311626, z: 0.041196737 },
Point3 { x: -0.59885347, y: -0.29464284, z: 0.041196737 },
Point3 { x: -0.9220182, y: -0.03329557, z: -0.01237479 },
Point3 { x: -0.8577822, y: -0.13243997, z: -0.01237479 },
Point3 { x: -0.7489036, y: -0.2248875, z: -0.01237479 },
Point3 { x: -0.5938937, y: -0.30952334, z: -0.01237479 },
Point3 { x: -0.9084936, y: -0.027046744, z: -0.06594621 },
Point3 { x: -0.8481719, y: -0.12290682, z: -0.06594621 },
Point3 { x: -0.74553186, y: -0.21311626, z: -0.06594621 },
Point3 { x: -0.59885347, y: -0.29464284, z: -0.06594621 },
Point3 { x: -0.8787393, y: -0.013299279, z: -0.08380335 },
Point3 { x: -0.82702833, y: -0.10193397, z: -0.08380335 },
Point3 { x: -0.7381151, y: -0.18722056, z: -0.08380335 },
Point3 { x: -0.609767, y: -0.26190442, z: -0.08380335 },
Point3 { x: -0.84898615, y: 0.00044805816, z: -0.06594621 },
Point3 { x: -0.8058847, y: -0.08096113, z: -0.06594621 },
Point3 { x: -0.73069715, y: -0.16132416, z: -0.06594621 },
Point3 { x: -0.62067944, y: -0.22916605, z: -0.06594621 },
Point3 { x: 0.5489641, y: -0.047618523, z: -0.01237479 },
Point3 { x: 0.7012456, y: -0.012648302, z: -0.01237479 },
Point3 { x: 0.76721716, y: 0.07142891, z: -0.01237479 },
Point3 { x: 0.80342776, y: 0.1733631, z: -0.01237479 },
Point3 { x: 0.8664241, y: 0.26190555, z: -0.01237479 },
Point3 { x: 0.5489641, y: -0.0885411, z: 0.10548234 },
Point3 { x: 0.71287054, y: -0.04299147, z: 0.09404267 },
Point3 { x: 0.7820987, y: 0.053757947, z: 0.06887528 },
Point3 { x: 0.8215637, y: 0.16696933, z: 0.043707903 },
Point3 { x: 0.8961862, y: 0.26190555, z: 0.032268114 },
Point3 { x: 0.5489641, y: -0.17857084, z: 0.14476866 },
Point3 { x: 0.7384479, y: -0.10974647, z: 0.12951516 },
Point3 { x: 0.8148361, y: 0.014881478, z: 0.095958576 },
Point3 { x: 0.8614632, y: 0.15290187, z: 0.0624021 },
Point3 { x: 0.9616619, y: 0.26190555, z: 0.047149044 },
Point3 { x: 0.5489641, y: -0.26860088, z: 0.10548234 },
Point3 { x: 0.76402414, y: -0.17650136, z: 0.09404267 },
Point3 { x: 0.8475744, y: -0.023994947, z: 0.06887528 },
Point3 { x: 0.9013626, y: 0.13883556, z: 0.043707903 },
Point3 { x: 1.0271375, y: 0.26190555, z: 0.032268114 },
Point3 { x: 0.5489641, y: -0.30952334, z: -0.01237479 },
Point3 { x: 0.7756502, y: -0.20684418, z: -0.01237479 },
Point3 { x: 0.8624561, y: -0.041666128, z: -0.01237479 },
Point3 { x: 0.91949975, y: 0.13244066, z: -0.01237479 },
Point3 { x: 1.0568997, y: 0.26190555, z: -0.01237479 },
Point3 { x: 0.5489641, y: -0.26860088, z: -0.13023216 },
Point3 { x: 0.76402414, y: -0.17650136, z: -0.11879258 },
Point3 { x: 0.8475744, y: -0.023994947, z: -0.093624756 },
Point3 { x: 0.9013626, y: 0.13883556, z: -0.06845737 },
Point3 { x: 1.0271375, y: 0.26190555, z: -0.057017583 },
Point3 { x: 0.5489641, y: -0.17857084, z: -0.16951735 },
Point3 { x: 0.7384479, y: -0.10974647, z: -0.15426496 },
Point3 { x: 0.8148361, y: 0.014881478, z: -0.12070793 },
Point3 { x: 0.8614632, y: 0.15290187, z: -0.087151565 },
Point3 { x: 0.9616619, y: 0.26190555, z: -0.07189851 },
Point3 { x: 0.5489641, y: -0.0885411, z: -0.13023216 },
Point3 { x: 0.71287054, y: -0.04299147, z: -0.11879258 },
Point3 { x: 0.7820987, y: 0.053757947, z: -0.093624756 },
Point3 { x: 0.8215637, y: 0.16696933, z: -0.06845737 },
Point3 { x: 0.8961862, y: 0.26190555, z: -0.057017583 },
Point3 { x: 0.88924074, y: 0.27529848, z: -0.01237479 },
Point3 { x: 0.9061068, y: 0.27976236, z: -0.01237479 },
Point3 { x: 0.9110667, y: 0.27529848, z: -0.01237479 },
Point3 { x: 0.8981697, y: 0.26190555, z: -0.01237479 },
Point3 { x: 0.9211147, y: 0.27595174, z: 0.029477967 },
Point3 { x: 0.93602306, y: 0.28080878, z: 0.023339598 },
Point3 { x: 0.9364687, y: 0.27621308, z: 0.01720112 },
Point3 { x: 0.918011, y: 0.26190555, z: 0.0144109735 },
Point3 { x: 0.99123746, y: 0.2773903, z: 0.04342881 },
Point3 { x: 1.0018406, y: 0.28311056, z: 0.035244323 },
Point3 { x: 0.9923543, y: 0.27822787, z: 0.02705983 },
Point3 { x: 0.9616619, y: 0.26190555, z: 0.023339598 },
Point3 { x: 1.0613602, y: 0.27882978, z: 0.029477967 },
Point3 { x: 1.067658, y: 0.2854123, z: 0.023339598 },
Point3 { x: 1.0482388, y: 0.2802415, z: 0.01720112 },
Point3 { x: 1.0053127, y: 0.26190555, z: 0.0144109735 },
Point3 { x: 1.0932342, y: 0.27948314, z: -0.01237479 },
Point3 { x: 1.0975741, y: 0.28645882, z: -0.01237479 },
Point3 { x: 1.0736408, y: 0.28115728, z: -0.01237479 },
Point3 { x: 1.025154, y: 0.26190555, z: -0.01237479 },
Point3 { x: 1.0613602, y: 0.27882978, z: -0.054227434 },
Point3 { x: 1.067658, y: 0.2854123, z: -0.04808907 },
Point3 { x: 1.0482388, y: 0.2802415, z: -0.041950587 },
Point3 { x: 1.0053127, y: 0.26190555, z: -0.03916044 },
Point3 { x: 0.99123746, y: 0.2773903, z: -0.06817828 },
Point3 { x: 1.0018406, y: 0.28311056, z: -0.059993792 },
Point3 { x: 0.9923543, y: 0.27822787, z: -0.0518093 },
Point3 { x: 0.9616619, y: 0.26190555, z: -0.04808907 },
Point3 { x: 0.9211147, y: 0.27595174, z: -0.054227434 },
Point3 { x: 0.93602306, y: 0.28080878, z: -0.04808907 },
Point3 { x: 0.9364687, y: 0.27621308, z: -0.041950587 },
Point3 { x: 0.918011, y: 0.26190555, z: -0.03916044 },
Point3 { x: 0.009281037, y: 0.5, z: -0.01237479 },
Point3 { x: 0.11741555, y: 0.4843757, z: -0.01237479 },
Point3 { x: 0.112455755, y: 0.44642958, z: -0.01237479 },
Point3 { x: 0.071781024, y: 0.39955455, z: -0.01237479 },
Point3 { x: 0.07277309, y: 0.35714337, z: -0.01237479 },
Point3 { x: 0.10907315, y: 0.4843757, z: 0.030181225 },
Point3 { x: 0.104493074, y: 0.44642958, z: 0.028221767 },
Point3 { x: 0.06694989, y: 0.39955455, z: 0.0121942 },
Point3 { x: 0.067852505, y: 0.35714337, z: 0.012545887 },
Point3 { x: 0.08615726, y: 0.4843757, z: 0.06450149 },
Point3 { x: 0.08262432, y: 0.44642958, z: 0.060968548 },
Point3 { x: 0.053689465, y: 0.39955455, z: 0.032033693 },
Point3 { x: 0.05436045, y: 0.35714337, z: 0.032704677 },
Point3 { x: 0.051836997, y: 0.4843757, z: 0.08741737 },
Point3 { x: 0.049877543, y: 0.44642958, z: 0.0828373 },
Point3 { x: 0.033849973, y: 0.39955455, z: 0.04529412 },
Point3 { x: 0.03420166, y: 0.35714337, z: 0.04619673 },
Point3 { x: 0.009281037, y: 0.4843757, z: 0.095760226 },
Point3 { x: 0.009281037, y: 0.44642958, z: 0.090799876 },
Point3 { x: 0.009281037, y: 0.39955455, z: 0.050125252 },
Point3 { x: 0.009281037, y: 0.35714337, z: 0.051117323 },
Point3 { x: -0.033274923, y: 0.4843757, z: 0.08741737 },
Point3 { x: -0.031315465, y: 0.44642958, z: 0.0828373 },
Point3 { x: -0.015287899, y: 0.39955455, z: 0.04529412 },
Point3 { x: -0.015639585, y: 0.35714337, z: 0.04619673 },
Point3 { x: -0.06759519, y: 0.4843757, z: 0.06450149 },
Point3 { x: -0.064062245, y: 0.44642958, z: 0.060968548 },
Point3 { x: -0.03512739, y: 0.39955455, z: 0.032033693 },
Point3 { x: -0.035798375, y: 0.35714337, z: 0.032704677 },
Point3 { x: -0.09051107, y: 0.4843757, z: 0.030181225 },
Point3 { x: -0.085930996, y: 0.44642958, z: 0.028221767 },
Point3 { x: -0.048387818, y: 0.39955455, z: 0.0121942 },
Point3 { x: -0.049290426, y: 0.35714337, z: 0.012545887 },
Point3 { x: -0.09885392, y: 0.4843757, z: -0.01237479 },
Point3 { x: -0.09389357, y: 0.44642958, z: -0.01237479 },
Point3 { x: -0.05321895, y: 0.39955455, z: -0.01237479 },
Point3 { x: -0.05421102, y: 0.35714337, z: -0.01237479 },
Point3 { x: -0.09051107, y: 0.4843757, z: -0.054930694 },
Point3 { x: -0.085930996, y: 0.44642958, z: -0.05297124 },
Point3 { x: -0.048387818, y: 0.39955455, z: -0.03694367 },
Point3 { x: -0.049290426, y: 0.35714337, z: -0.037295356 },
Point3 { x: -0.06759519, y: 0.4843757, z: -0.08925096 },
Point3 { x: -0.064062245, y: 0.44642958, z: -0.08571802 },
Point3 { x: -0.03512739, y: 0.39955455, z: -0.056783162 },
Point3 { x: -0.035798375, y: 0.35714337, z: -0.057454146 },
Point3 { x: -0.033274923, y: 0.4843757, z: -0.1121665 },
Point3 { x: -0.031315465, y: 0.44642958, z: -0.10758677 },
Point3 { x: -0.015287899, y: 0.39955455, z: -0.070043586 },
Point3 { x: -0.015639585, y: 0.35714337, z: -0.0709462 },
Point3 { x: 0.009281037, y: 0.4843757, z: -0.12050913 },
Point3 { x: 0.009281037, y: 0.44642958, z: -0.11554935 },
Point3 { x: 0.009281037, y: 0.39955455, z: -0.07487472 },
Point3 { x: 0.009281037, y: 0.35714337, z: -0.07586679 },
Point3 { x: 0.051836997, y: 0.4843757, z: -0.1121665 },
Point3 { x: 0.049877543, y: 0.44642958, z: -0.10758677 },
Point3 { x: 0.033849973, y: 0.39955455, z: -0.070043586 },
Point3 { x: 0.03420166, y: 0.35714337, z: -0.0709462 },
Point3 { x: 0.08615726, y: 0.4843757, z: -0.08925096 },
Point3 { x: 0.08262432, y: 0.44642958, z: -0.08571802 },
Point3 { x: 0.053689465, y: 0.39955455, z: -0.056783162 },
Point3 { x: 0.05436045, y: 0.35714337, z: -0.057454146 },
Point3 { x: 0.10907315, y: 0.4843757, z: -0.054930694 },
Point3 { x: 0.104493074, y: 0.44642958, z: -0.05297124 },
Point3 { x: 0.06694989, y: 0.39955455, z: -0.03694367 },
Point3 { x: 0.067852505, y: 0.35714337, z: -0.037295356 },
Point3 { x: 0.15412201, y: 0.32887, z: -0.01237479 },
Point3 { x: 0.27118576, y: 0.30952448, z: -0.01237479 },
Point3 { x: 0.3763442, y: 0.29017892, z: -0.01237479 },
Point3 { x: 0.42197964, y: 0.26190555, z: -0.01237479 },
Point3 { x: 0.14289688, y: 0.32887, z: 0.044475492 },
Point3 { x: 0.2508887, y: 0.30952448, z: 0.09042284 },
Point3 { x: 0.34789664, y: 0.29017892, z: 0.13169742 },
Point3 { x: 0.3899951, y: 0.26190555, z: 0.14960894 },
Point3 { x: 0.112118475, y: 0.32887, z: 0.090462595 },
Point3 { x: 0.1952332, y: 0.30952448, z: 0.1735781 },
Point3 { x: 0.26989582, y: 0.29017892, z: 0.24824074 },
Point3 { x: 0.30229697, y: 0.26190555, z: 0.28064072 },
Point3 { x: 0.066131264, y: 0.32887, z: 0.12124176 },
Point3 { x: 0.11207827, y: 0.30952448, z: 0.22923248 },
Point3 { x: 0.15335365, y: 0.29017892, z: 0.32624155 },
Point3 { x: 0.17126517, y: 0.26190555, z: 0.36834002 },
Point3 { x: 0.009281037, y: 0.32887, z: 0.13246691 },
Point3 { x: 0.009281037, y: 0.30952448, z: 0.24952953 },
Point3 { x: 0.009281037, y: 0.29017892, z: 0.35468912 },
Point3 { x: 0.009281037, y: 0.26190555, z: 0.40032345 },
Point3 { x: -0.04756919, y: 0.32887, z: 0.12124176 },
Point3 { x: -0.093516536, y: 0.30952448, z: 0.22923248 },
Point3 { x: -0.134791, y: 0.29017892, z: 0.32624155 },
Point3 { x: -0.15270254, y: 0.26190555, z: 0.36834002 },
Point3 { x: -0.09355629, y: 0.32887, z: 0.090462595 },
Point3 { x: -0.17667167, y: 0.30952448, z: 0.1735781 },
Point3 { x: -0.2513343, y: 0.29017892, z: 0.24824074 },
Point3 { x: -0.28373432, y: 0.26190555, z: 0.28064072 },
Point3 { x: -0.124335356, y: 0.32887, z: 0.044475492 },
Point3 { x: -0.23232608, y: 0.30952448, z: 0.09042284 },
Point3 { x: -0.32933512, y: 0.29017892, z: 0.13169742 },
Point3 { x: -0.3714336, y: 0.26190555, z: 0.14960894 },
Point3 { x: -0.13556048, y: 0.32887, z: -0.01237479 },
Point3 { x: -0.25262424, y: 0.30952448, z: -0.01237479 },
Point3 { x: -0.35778272, y: 0.29017892, z: -0.01237479 },
Point3 { x: -0.40341702, y: 0.26190555, z: -0.01237479 },
Point3 { x: -0.124335356, y: 0.32887, z: -0.06922496 },
Point3 { x: -0.23232608, y: 0.30952448, z: -0.115171865 },
Point3 { x: -0.32933512, y: 0.29017892, z: -0.15644722 },
Point3 { x: -0.3714336, y: 0.26190555, z: -0.17435874 },
Point3 { x: -0.09355629, y: 0.32887, z: -0.11521207 },
Point3 { x: -0.17667167, y: 0.30952448, z: -0.19832678 },
Point3 { x: -0.2513343, y: 0.29017892, z: -0.27298942 },
Point3 { x: -0.28373432, y: 0.26190555, z: -0.30539054 },
Point3 { x: -0.04756919, y: 0.32887, z: -0.14599045 },
Point3 { x: -0.093516536, y: 0.30952448, z: -0.2539823 },
Point3 { x: -0.134791, y: 0.29017892, z: -0.35099024 },
Point3 { x: -0.15270254, y: 0.26190555, z: -0.3930887 },
Point3 { x: 0.009281037, y: 0.32887, z: -0.1572156 },
Point3 { x: 0.009281037, y: 0.30952448, z: -0.27427936 },
Point3 { x: 0.009281037, y: 0.29017892, z: -0.3794378 },
Point3 { x: 0.009281037, y: 0.26190555, z: -0.42507324 },
Point3 { x: 0.066131264, y: 0.32887, z: -0.14599045 },
Point3 { x: 0.11207827, y: 0.30952448, z: -0.2539823 },
Point3 { x: 0.15335365, y: 0.29017892, z: -0.35099024 },
Point3 { x: 0.17126517, y: 0.26190555, z: -0.3930887 },
Point3 { x: 0.112118475, y: 0.32887, z: -0.11521207 },
Point3 { x: 0.1952332, y: 0.30952448, z: -0.19832678 },
Point3 { x: 0.26989582, y: 0.29017892, z: -0.27298942 },
Point3 { x: 0.30229697, y: 0.26190555, z: -0.30539054 },
Point3 { x: 0.14289688, y: 0.32887, z: -0.06922496 },
Point3 { x: 0.2508887, y: 0.30952448, z: -0.115171865 },
Point3 { x: 0.34789664, y: 0.29017892, z: -0.15644722 },
Point3 { x: 0.3899951, y: 0.26190555, z: -0.17435874 },
];
pub const NORMALS: [Vector3<f32>; 530] = [
Vector3 { x: -0.966742, y: -0.255752, z: 0.0 },
Vector3 { x: -0.966824, y: 0.255443, z: 0.0 },
Vector3 { x: -0.092052, y: 0.995754, z: 0.0 },
Vector3 { x: 0.68205, y: 0.731305, z: 0.0 },
Vector3 { x: 0.870301, y: 0.492521, z: -0.0 },
Vector3 { x: -0.893014, y: -0.256345, z: -0.369882 },
Vector3 { x: -0.893437, y: 0.255997, z: -0.369102 },
Vector3 { x: -0.0838771, y: 0.995843, z: -0.0355068 },
Vector3 { x: 0.629724, y: 0.73186, z: 0.260439 },
Vector3 { x: 0.803725, y: 0.49337, z: 0.332584 },
Vector3 { x: -0.683407, y: -0.256729, z: -0.683407 },
Vector3 { x: -0.683531, y: 0.256067, z: -0.683531 },
Vector3 { x: -0.0649249, y: 0.995776, z: -0.0649248 },
Vector3 { x: 0.481398, y: 0.732469, z: 0.481398 },
Vector3 { x: 0.614804, y: 0.493997, z: 0.614804 },
Vector3 { x: -0.369882, y: -0.256345, z: -0.893014 },
Vector3 { x: -0.369102, y: 0.255997, z: -0.893437 },
Vector3 { x: -0.0355067, y: 0.995843, z: -0.0838772 },
Vector3 { x: 0.260439, y: 0.73186, z: 0.629724 },
Vector3 { x: 0.332584, y: 0.49337, z: 0.803725 },
Vector3 { x: -0.00284834, y: -0.257863, z: -0.966177 },
Vector3 { x: -0.00192311, y: 0.254736, z: -0.967009 },
Vector3 { x: -0.000266114, y: 0.995734, z: -0.0922702 },
Vector3 { x: 0.0, y: 0.731295, z: 0.682061 },
Vector3 { x: 0.0, y: 0.492521, z: 0.870301 },
Vector3 { x: 0.379058, y: -0.3593, z: -0.852771 },
Vector3 { x: 0.37711, y: 0.149086, z: -0.914091 },
Vector3 { x: 0.0275022, y: 0.992081, z: -0.122551 },
Vector3 { x: -0.26101, y: 0.726762, z: 0.635367 },
Vector3 { x: -0.332485, y: 0.492546, z: 0.804271 },
Vector3 { x: 0.663548, y: -0.410791, z: -0.625264 },
Vector3 { x: 0.712664, y: 0.0737216, z: -0.697621 },
Vector3 { x: 0.0997268, y: 0.987509, z: -0.121984 },
Vector3 { x: -0.48732, y: 0.723754, z: 0.488568 },
Vector3 { x: -0.615242, y: 0.492602, z: 0.615484 },
Vector3 { x: 0.880028, y: -0.332908, z: -0.338709 },
Vector3 { x: 0.917276, y: 0.167113, z: -0.361493 },
Vector3 { x: 0.113584, y: 0.992365, z: -0.0480695 },
Vector3 { x: -0.63415, y: 0.727508, z: 0.261889 },
Vector3 { x: -0.804126, y: 0.492634, z: 0.332705 },
Vector3 { x: 0.96669, y: -0.255738, z: 0.0104537 },
Vector3 { x: 0.967442, y: 0.252962, z: 0.00810329 },
Vector3 { x: 0.0934365, y: 0.995624, z: 0.00128063 },
Vector3 { x: -0.682167, y: 0.731196, z: -0.00034353 },
Vector3 { x: -0.870322, y: 0.492483, z: -0.0 },
Vector3 { x: 0.893014, y: -0.256345, z: 0.369882 },
Vector3 { x: 0.893437, y: 0.255997, z: 0.369102 },
Vector3 { x: 0.0838768, y: 0.995843, z: 0.0355066 },
Vector3 { x: -0.629724, y: 0.73186, z: -0.260439 },
Vector3 { x: -0.803725, y: 0.49337, z: -0.332584 },
Vector3 { x: 0.683407, y: -0.256729, z: 0.683407 },
Vector3 { x: 0.683531, y: 0.256067, z: 0.683531 },
Vector3 { x: 0.0649249, y: 0.995776, z: 0.0649249 },
Vector3 { x: -0.481398, y: 0.732469, z: -0.481398 },
Vector3 { x: -0.614804, y: 0.493997, z: -0.614804 },
Vector3 { x: 0.369882, y: -0.256345, z: 0.893014 },
Vector3 { x: 0.369102, y: 0.255997, z: 0.893437 },
Vector3 { x: 0.0355067, y: 0.995843, z: 0.083877 },
Vector3 { x: -0.260439, y: 0.73186, z: -0.629724 },
Vector3 { x: -0.332584, y: 0.49337, z: -0.803725 },
Vector3 { x: 0.0, y: -0.255752, z: 0.966742 },
Vector3 { x: 0.0, y: 0.255443, z: 0.966824 },
Vector3 { x: 0.0, y: 0.995754, z: 0.092052 },
Vector3 { x: 0.0, y: 0.731305, z: -0.68205 },
Vector3 { x: -0.0, y: 0.492521, z: -0.870301 },
Vector3 { x: -0.369882, y: -0.256345, z: 0.893014 },
Vector3 { x: -0.369102, y: 0.255996, z: 0.893437 },
Vector3 { x: -0.0355068, y: 0.995843, z: 0.0838771 },
Vector3 { x: 0.260439, y: 0.73186, z: -0.629724 },
Vector3 { x: 0.332584, y: 0.49337, z: -0.803725 },
Vector3 { x: -0.683407, y: -0.256729, z: 0.683407 },
Vector3 { x: -0.683531, y: 0.256067, z: 0.683531 },
Vector3 { x: -0.0649249, y: 0.995776, z: 0.064925 },
Vector3 { x: 0.481398, y: 0.732469, z: -0.481398 },
Vector3 { x: 0.614804, y: 0.493997, z: -0.614804 },
Vector3 { x: -0.893014, y: -0.256345, z: 0.369882 },
Vector3 { x: -0.893437, y: 0.255997, z: 0.369102 },
Vector3 { x: -0.0838767, y: 0.995843, z: 0.0355066 },
Vector3 { x: 0.629724, y: 0.73186, z: -0.260439 },
Vector3 { x: 0.803725, y: 0.49337, z: -0.332584 },
Vector3 { x: 0.915321, y: 0.402725, z: 0.0 },
Vector3 { x: 0.941808, y: 0.336151, z: -0.0 },
Vector3 { x: 0.97869, y: 0.205342, z: 0.0 },
Vector3 { x: 0.997804, y: -0.0662397, z: 0.0 },
Vector3 { x: 0.845438, y: 0.403546, z: 0.349835 },
Vector3 { x: 0.869996, y: 0.336859, z: 0.360047 },
Vector3 { x: 0.904193, y: 0.205791, z: 0.37428 },
Vector3 { x: 0.921879, y: -0.0663697, z: 0.381752 },
Vector3 { x: 0.646802, y: 0.404096, z: 0.646802 },
Vector3 { x: 0.665655, y: 0.337351, z: 0.665655 },
Vector3 { x: 0.691923, y: 0.20612, z: 0.691923 },
Vector3 { x: 0.705542, y: -0.0664796, z: 0.705543 },
Vector3 { x: 0.349835, y: 0.403546, z: 0.845438 },
Vector3 { x: 0.360047, y: 0.336859, z: 0.869996 },
Vector3 { x: 0.37428, y: 0.205791, z: 0.904193 },
Vector3 { x: 0.381752, y: -0.0663697, z: 0.921879 },
Vector3 { x: -0.0, y: 0.402725, z: 0.915321 },
Vector3 { x: 0.0, y: 0.336151, z: 0.941808 },
Vector3 { x: -0.0, y: 0.205342, z: 0.97869 },
Vector3 { x: -0.0, y: -0.0662397, z: 0.997804 },
Vector3 { x: -0.349835, y: 0.403546, z: 0.845438 },
Vector3 { x: -0.360047, y: 0.336859, z: 0.869996 },
Vector3 { x: -0.37428, y: 0.205791, z: 0.904193 },
Vector3 { x: -0.381752, y: -0.0663697, z: 0.921879 },
Vector3 { x: -0.646802, y: 0.404096, z: 0.646802 },
Vector3 { x: -0.665655, y: 0.337351, z: 0.665655 },
Vector3 { x: -0.691923, y: 0.20612, z: 0.691923 },
Vector3 { x: -0.705543, y: -0.0664796, z: 0.705543 },
Vector3 { x: -0.845438, y: 0.403546, z: 0.349835 },
Vector3 { x: -0.869996, y: 0.336859, z: 0.360047 },
Vector3 { x: -0.904193, y: 0.205791, z: 0.37428 },
Vector3 { x: -0.921879, y: -0.0663697, z: 0.381752 },
Vector3 { x: -0.915321, y: 0.402725, z: -0.0 },
Vector3 { x: -0.941808, y: 0.336151, z: -0.0 },
Vector3 { x: -0.97869, y: 0.205342, z: -0.0 },
Vector3 { x: -0.997804, y: -0.0662397, z: -0.0 },
Vector3 { x: -0.845438, y: 0.403546, z: -0.349835 },
Vector3 { x: -0.869996, y: 0.336859, z: -0.360047 },
Vector3 { x: -0.904193, y: 0.205791, z: -0.37428 },
Vector3 { x: -0.921879, y: -0.0663697, z: -0.381752 },
Vector3 { x: -0.646802, y: 0.404096, z: -0.646802 },
Vector3 { x: -0.665655, y: 0.337351, z: -0.665655 },
Vector3 { x: -0.691923, y: 0.20612, z: -0.691923 },
Vector3 { x: -0.705542, y: -0.0664796, z: -0.705543 },
Vector3 { x: -0.349835, y: 0.403546, z: -0.845438 },
Vector3 { x: -0.360047, y: 0.336859, z: -0.869996 },
Vector3 { x: -0.37428, y: 0.205791, z: -0.904193 },
Vector3 { x: -0.381752, y: -0.0663697, z: -0.921879 },
Vector3 { x: 0.0, y: 0.402725, z: -0.915321 },
Vector3 { x: -0.0, y: 0.336151, z: -0.941808 },
Vector3 { x: 0.0, y: 0.205342, z: -0.97869 },
Vector3 { x: 0.0, y: -0.0662397, z: -0.997804 },
Vector3 { x: 0.349835, y: 0.403546, z: -0.845438 },
Vector3 { x: 0.360047, y: 0.336859, z: -0.869996 },
Vector3 { x: 0.37428, y: 0.205791, z: -0.904193 },
Vector3 { x: 0.381752, y: -0.0663697, z: -0.921879 },
Vector3 { x: 0.646802, y: 0.404096, z: -0.646802 },
Vector3 { x: 0.665655, y: 0.337351, z: -0.665655 },
Vector3 { x: 0.691923, y: 0.20612, z: -0.691923 },
Vector3 { x: 0.705543, y: -0.0664796, z: -0.705542 },
Vector3 { x: 0.845438, y: 0.403546, z: -0.349835 },
Vector3 { x: 0.869996, y: 0.336859, z: -0.360047 },
Vector3 { x: 0.904193, y: 0.205791, z: -0.37428 },
Vector3 { x: 0.921879, y: -0.0663697, z: -0.381752 },
Vector3 { x: 0.900182, y: -0.435513, z: -0.0 },
Vector3 { x: 0.729611, y: -0.683863, z: -0.0 },
Vector3 { x: 0.693951, y: -0.720022, z: -0.0 },
Vector3 { x: 0.79395, y: -0.607984, z: 0.0 },
Vector3 { x: 0.831437, y: -0.43618, z: 0.344179 },
Vector3 { x: 0.673512, y: -0.684665, z: 0.278594 },
Vector3 { x: 0.640399, y: -0.720924, z: 0.264874 },
Vector3 { x: 0.732949, y: -0.608996, z: 0.303166 },
Vector3 { x: 0.636092, y: -0.436777, z: 0.636092 },
Vector3 { x: 0.514965, y: -0.685289, z: 0.514965 },
Vector3 { x: 0.489651, y: -0.721446, z: 0.489651 },
Vector3 { x: 0.560555, y: -0.609554, z: 0.560555 },
Vector3 { x: 0.344179, y: -0.43618, z: 0.831437 },
Vector3 { x: 0.278594, y: -0.684665, z: 0.673512 },
Vector3 { x: 0.264874, y: -0.720924, z: 0.640399 },
Vector3 { x: 0.303166, y: -0.608996, z: 0.732949 },
Vector3 { x: 0.0, y: -0.435513, z: 0.900182 },
Vector3 { x: -0.0, y: -0.683863, z: 0.729611 },
Vector3 { x: 0.0, y: -0.720022, z: 0.693951 },
Vector3 { x: -0.0, y: -0.607984, z: 0.79395 },
Vector3 { x: -0.344179, y: -0.43618, z: 0.831437 },
Vector3 { x: -0.278594, y: -0.684665, z: 0.673512 },
Vector3 { x: -0.264874, y: -0.720924, z: 0.640399 },
Vector3 { x: -0.303166, y: -0.608996, z: 0.732949 },
Vector3 { x: -0.636092, y: -0.436777, z: 0.636092 },
Vector3 { x: -0.514965, y: -0.685289, z: 0.514965 },
Vector3 { x: -0.489651, y: -0.721446, z: 0.489651 },
Vector3 { x: -0.560555, y: -0.609554, z: 0.560555 },
Vector3 { x: -0.831437, y: -0.43618, z: 0.344179 },
Vector3 { x: -0.673512, y: -0.684665, z: 0.278595 },
Vector3 { x: -0.640399, y: -0.720924, z: 0.264874 },
Vector3 { x: -0.732949, y: -0.608996, z: 0.303166 },
Vector3 { x: -0.900182, y: -0.435513, z: -0.0 },
Vector3 { x: -0.729611, y: -0.683863, z: -0.0 },
Vector3 { x: -0.693951, y: -0.720022, z: 0.0 },
Vector3 { x: -0.79395, y: -0.607983, z: -0.0 },
Vector3 { x: -0.831437, y: -0.43618, z: -0.344179 },
Vector3 { x: -0.673512, y: -0.684665, z: -0.278594 },
Vector3 { x: -0.640399, y: -0.720924, z: -0.264874 },
Vector3 { x: -0.732949, y: -0.608996, z: -0.303166 },
Vector3 { x: -0.636092, y: -0.436777, z: -0.636092 },
Vector3 { x: -0.514965, y: -0.685289, z: -0.514965 },
Vector3 { x: -0.489651, y: -0.721446, z: -0.489651 },
Vector3 { x: -0.560555, y: -0.609554, z: -0.560555 },
Vector3 { x: -0.344179, y: -0.43618, z: -0.831437 },
Vector3 { x: -0.278594, y: -0.684665, z: -0.673512 },
Vector3 { x: -0.264874, y: -0.720924, z: -0.640399 },
Vector3 { x: -0.303166, y: -0.608996, z: -0.732949 },
Vector3 { x: -0.0, y: -0.435513, z: -0.900182 },
Vector3 { x: 0.0, y: -0.683863, z: -0.729611 },
Vector3 { x: -0.0, y: -0.720022, z: -0.693951 },
Vector3 { x: 0.0, y: -0.607984, z: -0.79395 },
Vector3 { x: 0.344179, y: -0.43618, z: -0.831437 },
Vector3 { x: 0.278594, y: -0.684665, z: -0.673512 },
Vector3 { x: 0.264874, y: -0.720924, z: -0.640399 },
Vector3 { x: 0.303167, y: -0.608996, z: -0.732949 },
Vector3 { x: 0.636092, y: -0.436777, z: -0.636092 },
Vector3 { x: 0.514965, y: -0.685289, z: -0.514965 },
Vector3 { x: 0.489651, y: -0.721446, z: -0.489651 },
Vector3 { x: 0.560555, y: -0.609554, z: -0.560555 },
Vector3 { x: 0.831437, y: -0.43618, z: -0.344179 },
Vector3 { x: 0.673512, y: -0.684665, z: -0.278595 },
Vector3 { x: 0.640399, y: -0.720924, z: -0.264874 },
Vector3 { x: 0.732949, y: -0.608996, z: -0.303166 },
Vector3 { x: 0.62386, y: -0.781536, z: 0.0 },
Vector3 { x: 0.177291, y: -0.984159, z: -0.0 },
Vector3 { x: 0.0492072, y: -0.998789, z: 0.0 },
Vector3 { x: 0.0, y: -1.0, z: -0.0 },
Vector3 { x: 0.576229, y: -0.781801, z: 0.238217 },
Vector3 { x: 0.163629, y: -0.984208, z: 0.0675273 },
Vector3 { x: 0.0454217, y: -0.998792, z: 0.0187357 },
Vector3 { x: 0.440416, y: -0.782348, z: 0.440416 },
Vector3 { x: 0.124903, y: -0.984276, z: 0.124903 },
Vector3 { x: 0.0346621, y: -0.998798, z: 0.0346621 },
Vector3 { x: 0.238217, y: -0.781801, z: 0.576229 },
Vector3 { x: 0.0675273, y: -0.984208, z: 0.163629 },
Vector3 { x: 0.0187357, y: -0.998792, z: 0.0454217 },
Vector3 { x: -0.0, y: -0.781536, z: 0.62386 },
Vector3 { x: 0.0, y: -0.984159, z: 0.177291 },
Vector3 { x: -0.0, y: -0.998789, z: 0.0492072 },
Vector3 { x: -0.238216, y: -0.781801, z: 0.576229 },
Vector3 { x: -0.0675273, y: -0.984208, z: 0.163629 },
Vector3 { x: -0.0187357, y: -0.998792, z: 0.0454217 },
Vector3 { x: -0.440416, y: -0.782348, z: 0.440416 },
Vector3 { x: -0.124903, y: -0.984276, z: 0.124903 },
Vector3 { x: -0.0346621, y: -0.998798, z: 0.0346621 },
Vector3 { x: -0.576229, y: -0.781801, z: 0.238217 },
Vector3 { x: -0.163629, y: -0.984208, z: 0.0675273 },
Vector3 { x: -0.0454217, y: -0.998792, z: 0.0187357 },
Vector3 { x: -0.62386, y: -0.781536, z: -0.0 },
Vector3 { x: -0.177291, y: -0.984159, z: 0.0 },
Vector3 { x: -0.0492072, y: -0.998789, z: -0.0 },
Vector3 { x: -0.576229, y: -0.781801, z: -0.238217 },
Vector3 { x: -0.163629, y: -0.984208, z: -0.0675273 },
Vector3 { x: -0.0454217, y: -0.998792, z: -0.0187357 },
Vector3 { x: -0.440416, y: -0.782348, z: -0.440416 },
Vector3 { x: -0.124903, y: -0.984276, z: -0.124903 },
Vector3 { x: -0.0346621, y: -0.998798, z: -0.0346621 },
Vector3 { x: -0.238217, y: -0.781801, z: -0.576229 },
Vector3 { x: -0.0675273, y: -0.984208, z: -0.163629 },
Vector3 { x: -0.0187357, y: -0.998792, z: -0.0454217 },
Vector3 { x: 0.0, y: -0.781536, z: -0.62386 },
Vector3 { x: -0.0, y: -0.984159, z: -0.177291 },
Vector3 { x: 0.0, y: -0.998789, z: -0.0492072 },
Vector3 { x: 0.238217, y: -0.781801, z: -0.576229 },
Vector3 { x: 0.0675273, y: -0.984208, z: -0.163629 },
Vector3 { x: 0.0187357, y: -0.998792, z: -0.0454217 },
Vector3 { x: 0.440416, y: -0.782348, z: -0.440416 },
Vector3 { x: 0.124903, y: -0.984276, z: -0.124903 },
Vector3 { x: 0.0346621, y: -0.998798, z: -0.0346621 },
Vector3 { x: 0.576229, y: -0.781801, z: -0.238217 },
Vector3 { x: 0.163629, y: -0.984208, z: -0.0675273 },
Vector3 { x: 0.0454217, y: -0.998792, z: -0.0187357 },
Vector3 { x: 0.00778619, y: -0.99997, z: -0.000215809 },
Vector3 { x: 0.0391385, y: -0.999233, z: -0.000988567 },
Vector3 { x: 0.179511, y: -0.983746, z: -0.00436856 },
Vector3 { x: 0.6123, y: -0.790556, z: -0.0104598 },
Vector3 { x: 0.986152, y: -0.165707, z: -0.00666949 },
Vector3 { x: 0.00703893, y: -0.812495, z: 0.582926 },
Vector3 { x: 0.0361273, y: -0.837257, z: 0.545614 },
Vector3 { x: 0.161845, y: -0.810421, z: 0.563048 },
Vector3 { x: 0.482365, y: -0.595148, z: 0.642746 },
Vector3 { x: 0.73872, y: -0.114593, z: 0.664199 },
Vector3 { x: -0.00190867, y: 0.162121, z: 0.986769 },
Vector3 { x: 0.0027616, y: 0.0171073, z: 0.99985 },
Vector3 { x: 0.0105326, y: 0.0733989, z: 0.997247 },
Vector3 { x: -0.0660406, y: 0.130069, z: 0.989303 },
Vector3 { x: -0.0944272, y: 0.0165946, z: 0.995393 },
Vector3 { x: -0.009203, y: 0.871509, z: 0.490293 },
Vector3 { x: -0.0486064, y: 0.840609, z: 0.539457 },
Vector3 { x: -0.223298, y: 0.802881, z: 0.552739 },
Vector3 { x: -0.596365, y: 0.559971, z: 0.575135 },
Vector3 { x: -0.803337, y: 0.0682361, z: 0.591602 },
Vector3 { x: -0.0105609, y: 0.999944, z: 0.000103364 },
Vector3 { x: -0.0587986, y: 0.99827, z: 0.000709759 },
Vector3 { x: -0.28071, y: 0.959787, z: 0.00326876 },
Vector3 { x: -0.749723, y: 0.661738, z: 0.0042684 },
Vector3 { x: -0.997351, y: 0.0727144, z: 0.00205923 },
Vector3 { x: -0.00879197, y: 0.871493, z: -0.49033 },
Vector3 { x: -0.0464937, y: 0.841178, z: -0.538756 },
Vector3 { x: -0.217909, y: 0.806807, z: -0.549161 },
Vector3 { x: -0.597291, y: 0.560026, z: -0.574121 },
Vector3 { x: -0.804, y: 0.0629127, z: -0.591291 },
Vector3 { x: -0.00180555, y: 0.161691, z: -0.98684 },
Vector3 { x: 0.00203087, y: 0.014555, z: -0.999892 },
Vector3 { x: 0.00921499, y: 0.0600698, z: -0.998152 },
Vector3 { x: -0.0593333, y: 0.113865, z: -0.991723 },
Vector3 { x: -0.0868992, y: 0.0122903, z: -0.996141 },
Vector3 { x: 0.00641779, y: -0.812379, z: -0.583094 },
Vector3 { x: 0.0337833, y: -0.837512, z: -0.545373 },
Vector3 { x: 0.157112, y: -0.811947, z: -0.56219 },
Vector3 { x: 0.484407, y: -0.589365, z: -0.646528 },
Vector3 { x: 0.73887, y: -0.10132, z: -0.666187 },
Vector3 { x: 0.946512, y: 0.32265, z: -0.0033571 },
Vector3 { x: 0.82583, y: 0.56387, z: -0.00745213 },
Vector3 { x: 0.650011, y: 0.759893, z: -0.00693681 },
Vector3 { x: 0.532429, y: 0.846458, z: -0.00524544 },
Vector3 { x: 0.725608, y: 0.259351, z: 0.637362 },
Vector3 { x: 0.645945, y: 0.461988, z: 0.607719 },
Vector3 { x: 0.531614, y: 0.63666, z: 0.558615 },
Vector3 { x: 0.424964, y: 0.681717, z: 0.59554 },
Vector3 { x: -0.0495616, y: -0.019755, z: 0.998576 },
Vector3 { x: -0.0378162, y: -0.0356243, z: 0.99865 },
Vector3 { x: -0.0379139, y: -0.0365122, z: 0.998614 },
Vector3 { x: -0.168854, y: -0.297946, z: 0.93953 },
Vector3 { x: -0.742342, y: -0.299166, z: 0.599523 },
Vector3 { x: -0.619602, y: -0.529406, z: 0.579503 },
Vector3 { x: -0.483708, y: -0.685761, z: 0.543837 },
Vector3 { x: -0.445293, y: -0.794355, z: 0.413176 },
Vector3 { x: -0.926513, y: -0.376257, z: 0.00199587 },
Vector3 { x: -0.75392, y: -0.656952, z: 0.00431723 },
Vector3 { x: -0.566224, y: -0.824244, z: 0.00346105 },
Vector3 { x: -0.481804, y: -0.876277, z: 0.00185047 },
Vector3 { x: -0.744675, y: -0.294424, z: -0.598977 },
Vector3 { x: -0.621949, y: -0.528114, z: -0.578165 },
Vector3 { x: -0.481171, y: -0.68834, z: -0.542828 },
Vector3 { x: -0.438055, y: -0.797035, z: -0.415744 },
Vector3 { x: -0.0443368, y: -0.0170558, z: -0.998871 },
Vector3 { x: -0.0261761, y: -0.0281665, z: -0.99926 },
Vector3 { x: -0.0252939, y: -0.0283323, z: -0.999278 },
Vector3 { x: -0.157482, y: -0.289392, z: -0.944167 },
Vector3 { x: 0.728244, y: 0.25241, z: -0.637142 },
Vector3 { x: 0.647055, y: 0.459725, z: -0.608254 },
Vector3 { x: 0.522994, y: 0.640657, z: -0.562171 },
Vector3 { x: 0.409978, y: 0.682857, z: -0.604669 },
Vector3 { x: -0.230787, y: 0.972982, z: -0.00652338 },
Vector3 { x: -0.548936, y: 0.835863, z: -0.00151111 },
Vector3 { x: -0.875671, y: 0.482807, z: 0.00989278 },
Vector3 { x: -0.877554, y: 0.479097, z: 0.0190923 },
Vector3 { x: -0.69619, y: 0.717439, z: 0.024497 },
Vector3 { x: -0.152878, y: 0.687211, z: 0.71019 },
Vector3 { x: -0.316721, y: 0.63775, z: 0.702113 },
Vector3 { x: -0.601067, y: 0.471452, z: 0.64533 },
Vector3 { x: -0.635889, y: 0.44609, z: 0.6298 },
Vector3 { x: -0.435746, y: 0.601008, z: 0.670011 },
Vector3 { x: 0.111112, y: -0.0850694, z: 0.99016 },
Vector3 { x: 0.22331, y: 0.00654036, z: 0.974726 },
Vector3 { x: 0.190097, y: 0.154964, z: 0.969458 },
Vector3 { x: 0.00527077, y: 0.189482, z: 0.98187 },
Vector3 { x: -0.0117518, y: 0.246688, z: 0.969024 },
Vector3 { x: 0.343906, y: -0.722796, z: 0.599412 },
Vector3 { x: 0.572489, y: -0.567656, z: 0.591627 },
Vector3 { x: 0.787436, y: -0.256459, z: 0.560512 },
Vector3 { x: 0.647097, y: -0.306374, z: 0.698141 },
Vector3 { x: 0.427528, y: -0.499343, z: 0.753576 },
Vector3 { x: 0.410926, y: -0.911668, z: 0.00128446 },
Vector3 { x: 0.67152, y: -0.740986, z: -0.000899122 },
Vector3 { x: 0.922026, y: -0.38706, z: -0.00725269 },
Vector3 { x: 0.84691, y: -0.531556, z: -0.0138542 },
Vector3 { x: 0.535925, y: -0.8442, z: -0.0105045 },
Vector3 { x: 0.341188, y: -0.722822, z: -0.600931 },
Vector3 { x: 0.578664, y: -0.561139, z: -0.591838 },
Vector3 { x: 0.784869, y: -0.25102, z: -0.566542 },
Vector3 { x: 0.642681, y: -0.302257, z: -0.70399 },
Vector3 { x: 0.418589, y: -0.500042, z: -0.758117 },
Vector3 { x: 0.115806, y: -0.0791394, z: -0.990114 },
Vector3 { x: 0.232811, y: 0.0125652, z: -0.972441 },
Vector3 { x: 0.206662, y: 0.153601, z: -0.96628 },
Vector3 { x: 0.0244996, y: 0.161443, z: -0.986578 },
Vector3 { x: 0.00338193, y: 0.211115, z: -0.977455 },
Vector3 { x: -0.134912, y: 0.687491, z: -0.713551 },
Vector3 { x: -0.31954, y: 0.633073, z: -0.705062 },
Vector3 { x: -0.603902, y: 0.461442, z: -0.649903 },
Vector3 { x: -0.631816, y: 0.437169, z: -0.640072 },
Vector3 { x: -0.424306, y: 0.612706, z: -0.66675 },
Vector3 { x: -0.4258, y: 0.904753, z: 0.0108049 },
Vector3 { x: 0.0220472, y: 0.999756, z: 0.00162273 },
Vector3 { x: 0.999599, y: 0.0258705, z: 0.0115556 },
Vector3 { x: 0.709585, y: -0.704553, z: 0.00967183 },
Vector3 { x: -0.259858, y: 0.791936, z: 0.552549 },
Vector3 { x: 0.00953916, y: 0.99972, z: -0.0216718 },
Vector3 { x: 0.410156, y: 0.332912, z: -0.849083 },
Vector3 { x: 0.541523, y: -0.54862, z: -0.637 },
Vector3 { x: 0.0463104, y: 0.455224, z: 0.889172 },
Vector3 { x: -0.0106883, y: 0.988794, z: 0.148901 },
Vector3 { x: -0.0443756, y: 0.682947, z: -0.729118 },
Vector3 { x: 0.122825, y: 0.00923214, z: -0.992385 },
Vector3 { x: 0.481839, y: -0.180439, z: 0.85748 },
Vector3 { x: 0.455272, y: 0.736752, z: 0.499925 },
Vector3 { x: -0.220542, y: 0.907193, z: -0.358276 },
Vector3 { x: -0.23592, y: 0.657249, z: -0.715797 },
Vector3 { x: 0.728092, y: -0.685302, z: -0.0155853 },
Vector3 { x: 0.888739, y: 0.45811, z: -0.0166791 },
Vector3 { x: -0.260097, y: 0.965582, z: 0.000800195 },
Vector3 { x: -0.371612, y: 0.928378, z: -0.00441745 },
Vector3 { x: 0.480166, y: -0.17836, z: -0.858853 },
Vector3 { x: 0.488103, y: 0.716801, z: -0.497947 },
Vector3 { x: -0.222004, y: 0.905399, z: 0.361893 },
Vector3 { x: -0.235405, y: 0.66318, z: 0.710477 },
Vector3 { x: 0.0587203, y: 0.437704, z: -0.8972 },
Vector3 { x: 0.00132612, y: 0.986459, z: -0.164003 },
Vector3 { x: -0.0441901, y: 0.681677, z: 0.730317 },
Vector3 { x: 0.138801, y: -0.0341896, z: 0.98973 },
Vector3 { x: -0.25889, y: 0.797206, z: -0.54538 },
Vector3 { x: 0.0122703, y: 0.999739, z: 0.0192865 },
Vector3 { x: 0.39863, y: 0.35489, z: 0.845663 },
Vector3 { x: 0.537564, y: -0.5814, z: 0.610737 },
Vector3 { x: -0.0, y: 1.0, z: 0.0 },
Vector3 { x: 0.82454, y: 0.565804, z: 0.0 },
Vector3 { x: 0.917701, y: -0.397272, z: 0.0 },
Vector3 { x: 0.935269, y: -0.353939, z: 0.000112842 },
Vector3 { x: 0.780712, y: 0.624891, z: 0.0 },
Vector3 { x: 0.762641, y: 0.565035, z: 0.314825 },
Vector3 { x: 0.847982, y: -0.397998, z: 0.350034 },
Vector3 { x: 0.864141, y: -0.355261, z: 0.356441 },
Vector3 { x: 0.720991, y: 0.625625, z: 0.297933 },
Vector3 { x: 0.583357, y: 0.565165, z: 0.583338 },
Vector3 { x: 0.648485, y: -0.398726, z: 0.648448 },
Vector3 { x: 0.660872, y: -0.355894, z: 0.660748 },
Vector3 { x: 0.551862, y: 0.62529, z: 0.55178 },
Vector3 { x: 0.314824, y: 0.565051, z: 0.762629 },
Vector3 { x: 0.350045, y: -0.397976, z: 0.847988 },
Vector3 { x: 0.356474, y: -0.3552, z: 0.864153 },
Vector3 { x: 0.297983, y: 0.625515, z: 0.721067 },
Vector3 { x: -0.0, y: 0.565804, z: 0.82454 },
Vector3 { x: -0.0, y: -0.397272, z: 0.917701 },
Vector3 { x: -0.000112839, y: -0.353939, z: 0.935269 },
Vector3 { x: -0.0, y: 0.624891, z: 0.780712 },
Vector3 { x: -0.314825, y: 0.565035, z: 0.762641 },
Vector3 { x: -0.350034, y: -0.397998, z: 0.847982 },
Vector3 { x: -0.356441, y: -0.355261, z: 0.864141 },
Vector3 { x: -0.297933, y: 0.625625, z: 0.720991 },
Vector3 { x: -0.583338, y: 0.565165, z: 0.583357 },
Vector3 { x: -0.648448, y: -0.398726, z: 0.648485 },
Vector3 { x: -0.660748, y: -0.355894, z: 0.660872 },
Vector3 { x: -0.55178, y: 0.62529, z: 0.551862 },
Vector3 { x: -0.762629, y: 0.565051, z: 0.314824 },
Vector3 { x: -0.847988, y: -0.397976, z: 0.350045 },
Vector3 { x: -0.864153, y: -0.3552, z: 0.356474 },
Vector3 { x: -0.721067, y: 0.625515, z: 0.297983 },
Vector3 { x: -0.82454, y: 0.565804, z: -0.0 },
Vector3 { x: -0.917701, y: -0.397272, z: -0.0 },
Vector3 { x: -0.935269, y: -0.353939, z: -0.000112839 },
Vector3 { x: -0.780712, y: 0.624891, z: -0.0 },
Vector3 { x: -0.76264, y: 0.565035, z: -0.314825 },
Vector3 { x: -0.847982, y: -0.397998, z: -0.350034 },
Vector3 { x: -0.864141, y: -0.355261, z: -0.356441 },
Vector3 { x: -0.720991, y: 0.625625, z: -0.297933 },
Vector3 { x: -0.583357, y: 0.565165, z: -0.583338 },
Vector3 { x: -0.648485, y: -0.398726, z: -0.648448 },
Vector3 { x: -0.660872, y: -0.355894, z: -0.660748 },
Vector3 { x: -0.551862, y: 0.62529, z: -0.55178 },
Vector3 { x: -0.314824, y: 0.565051, z: -0.762629 },
Vector3 { x: -0.350045, y: -0.397976, z: -0.847988 },
Vector3 { x: -0.356474, y: -0.3552, z: -0.864153 },
Vector3 { x: -0.297983, y: 0.625515, z: -0.721067 },
Vector3 { x: 0.0, y: 0.565804, z: -0.82454 },
Vector3 { x: 0.0, y: -0.397272, z: -0.917701 },
Vector3 { x: 0.000112839, y: -0.353939, z: -0.935269 },
Vector3 { x: 0.0, y: 0.624891, z: -0.780712 },
Vector3 { x: 0.314825, y: 0.565035, z: -0.762641 },
Vector3 { x: 0.350034, y: -0.397998, z: -0.847982 },
Vector3 { x: 0.356441, y: -0.355261, z: -0.864141 },
Vector3 { x: 0.297933, y: 0.625625, z: -0.720991 },
Vector3 { x: 0.583338, y: 0.565165, z: -0.583357 },
Vector3 { x: 0.648448, y: -0.398726, z: -0.648485 },
Vector3 { x: 0.660748, y: -0.355894, z: -0.660872 },
Vector3 { x: 0.55178, y: 0.62529, z: -0.551862 },
Vector3 { x: 0.762629, y: 0.565051, z: -0.314824 },
Vector3 { x: 0.847988, y: -0.397976, z: -0.350045 },
Vector3 { x: 0.864153, y: -0.3552, z: -0.356474 },
Vector3 { x: 0.721067, y: 0.625515, z: -0.297983 },
Vector3 { x: 0.236584, y: 0.971611, z: 0.0 },
Vector3 { x: 0.173084, y: 0.984907, z: -0.0 },
Vector3 { x: 0.379703, y: 0.925108, z: 0.0 },
Vector3 { x: 0.526673, y: 0.850068, z: 0.0 },
Vector3 { x: 0.217978, y: 0.971775, z: 0.0902162 },
Vector3 { x: 0.15959, y: 0.984977, z: 0.0659615 },
Vector3 { x: 0.350498, y: 0.925312, z: 0.14474 },
Vector3 { x: 0.48559, y: 0.850653, z: 0.201474 },
Vector3 { x: 0.166631, y: 0.971838, z: 0.166631 },
Vector3 { x: 0.121908, y: 0.985026, z: 0.121908 },
Vector3 { x: 0.267668, y: 0.925585, z: 0.267668 },
Vector3 { x: 0.371315, y: 0.851029, z: 0.371315 },
Vector3 { x: 0.0902162, y: 0.971775, z: 0.217978 },
Vector3 { x: 0.0659615, y: 0.984977, z: 0.15959 },
Vector3 { x: 0.14474, y: 0.925312, z: 0.350498 },
Vector3 { x: 0.201474, y: 0.850653, z: 0.48559 },
Vector3 { x: -0.0, y: 0.971611, z: 0.236584 },
Vector3 { x: 0.0, y: 0.984907, z: 0.173084 },
Vector3 { x: 0.0, y: 0.925108, z: 0.379703 },
Vector3 { x: 0.0, y: 0.850068, z: 0.526673 },
Vector3 { x: -0.0902162, y: 0.971775, z: 0.217978 },
Vector3 { x: -0.0659615, y: 0.984977, z: 0.15959 },
Vector3 { x: -0.14474, y: 0.925312, z: 0.350498 },
Vector3 { x: -0.201474, y: 0.850653, z: 0.48559 },
Vector3 { x: -0.166631, y: 0.971838, z: 0.166631 },
Vector3 { x: -0.121908, y: 0.985026, z: 0.121908 },
Vector3 { x: -0.267668, y: 0.925585, z: 0.267668 },
Vector3 { x: -0.371315, y: 0.851029, z: 0.371315 },
Vector3 { x: -0.217978, y: 0.971775, z: 0.0902162 },
Vector3 { x: -0.15959, y: 0.984977, z: 0.0659615 },
Vector3 { x: -0.350498, y: 0.925312, z: 0.14474 },
Vector3 { x: -0.48559, y: 0.850653, z: 0.201474 },
Vector3 { x: -0.236583, y: 0.971611, z: -0.0 },
Vector3 { x: -0.173084, y: 0.984907, z: 0.0 },
Vector3 { x: -0.379703, y: 0.925108, z: -0.0 },
Vector3 { x: -0.526673, y: 0.850068, z: 0.0 },
Vector3 { x: -0.217978, y: 0.971775, z: -0.0902162 },
Vector3 { x: -0.15959, y: 0.984977, z: -0.0659615 },
Vector3 { x: -0.350498, y: 0.925312, z: -0.14474 },
Vector3 { x: -0.48559, y: 0.850653, z: -0.201474 },
Vector3 { x: -0.166631, y: 0.971838, z: -0.166631 },
Vector3 { x: -0.121908, y: 0.985026, z: -0.121908 },
Vector3 { x: -0.267668, y: 0.925585, z: -0.267668 },
Vector3 { x: -0.371315, y: 0.851029, z: -0.371315 },
Vector3 { x: -0.0902162, y: 0.971775, z: -0.217978 },
Vector3 { x: -0.0659615, y: 0.984977, z: -0.15959 },
Vector3 { x: -0.14474, y: 0.925312, z: -0.350498 },
Vector3 { x: -0.201474, y: 0.850653, z: -0.485589 },
Vector3 { x: 0.0, y: 0.971611, z: -0.236584 },
Vector3 { x: -0.0, y: 0.984907, z: -0.173084 },
Vector3 { x: -0.0, y: 0.925108, z: -0.379703 },
Vector3 { x: -0.0, y: 0.850068, z: -0.526673 },
Vector3 { x: 0.0902162, y: 0.971775, z: -0.217978 },
Vector3 { x: 0.0659615, y: 0.984977, z: -0.15959 },
Vector3 { x: 0.14474, y: 0.925312, z: -0.350498 },
Vector3 { x: 0.201474, y: 0.850653, z: -0.48559 },
Vector3 { x: 0.166631, y: 0.971838, z: -0.166631 },
Vector3 { x: 0.121908, y: 0.985026, z: -0.121908 },
Vector3 { x: 0.267668, y: 0.925585, z: -0.267668 },
Vector3 { x: 0.371315, y: 0.851029, z: -0.371315 },
Vector3 { x: 0.217978, y: 0.971775, z: -0.0902162 },
Vector3 { x: 0.15959, y: 0.984977, z: -0.0659615 },
Vector3 { x: 0.350498, y: 0.925312, z: -0.14474 },
Vector3 { x: 0.48559, y: 0.850653, z: -0.201474 },
];
pub const INDICES: [usize; 3072] = [
6, 5, 0,
0, 1, 6,
7, 6, 1,
1, 2, 7,
8, 7, 2,
2, 3, 8,
9, 8, 3,
3, 4, 9,
11, 10, 5,
5, 6, 11,
12, 11, 6,
6, 7, 12,
13, 12, 7,
7, 8, 13,
14, 13, 8,
8, 9, 14,
16, 15, 10,
10, 11, 16,
17, 16, 11,
11, 12, 17,
18, 17, 12,
12, 13, 18,
19, 18, 13,
13, 14, 19,
21, 20, 15,
15, 16, 21,
22, 21, 16,
16, 17, 22,
23, 22, 17,
17, 18, 23,
24, 23, 18,
18, 19, 24,
26, 25, 20,
20, 21, 26,
27, 26, 21,
21, 22, 27,
28, 27, 22,
22, 23, 28,
29, 28, 23,
23, 24, 29,
31, 30, 25,
25, 26, 31,
32, 31, 26,
26, 27, 32,
33, 32, 27,
27, 28, 33,
34, 33, 28,
28, 29, 34,
36, 35, 30,
30, 31, 36,
37, 36, 31,
31, 32, 37,
38, 37, 32,
32, 33, 38,
39, 38, 33,
33, 34, 39,
41, 40, 35,
35, 36, 41,
42, 41, 36,
36, 37, 42,
43, 42, 37,
37, 38, 43,
44, 43, 38,
38, 39, 44,
46, 45, 40,
40, 41, 46,
47, 46, 41,
41, 42, 47,
48, 47, 42,
42, 43, 48,
49, 48, 43,
43, 44, 49,
51, 50, 45,
45, 46, 51,
52, 51, 46,
46, 47, 52,
53, 52, 47,
47, 48, 53,
54, 53, 48,
48, 49, 54,
56, 55, 50,
50, 51, 56,
57, 56, 51,
51, 52, 57,
58, 57, 52,
52, 53, 58,
59, 58, 53,
53, 54, 59,
61, 60, 55,
55, 56, 61,
62, 61, 56,
56, 57, 62,
63, 62, 57,
57, 58, 63,
64, 63, 58,
58, 59, 64,
66, 65, 60,
60, 61, 66,
67, 66, 61,
61, 62, 67,
68, 67, 62,
62, 63, 68,
69, 68, 63,
63, 64, 69,
71, 70, 65,
65, 66, 71,
72, 71, 66,
66, 67, 72,
73, 72, 67,
67, 68, 73,
74, 73, 68,
68, 69, 74,
76, 75, 70,
70, 71, 76,
77, 76, 71,
71, 72, 77,
78, 77, 72,
72, 73, 78,
79, 78, 73,
73, 74, 79,
1, 0, 75,
75, 76, 1,
2, 1, 76,
76, 77, 2,
3, 2, 77,
77, 78, 3,
4, 3, 78,
78, 79, 4,
84, 9, 4,
4, 80, 84,
85, 84, 80,
80, 81, 85,
86, 85, 81,
81, 82, 86,
87, 86, 82,
82, 83, 87,
88, 14, 9,
9, 84, 88,
89, 88, 84,
84, 85, 89,
90, 89, 85,
85, 86, 90,
91, 90, 86,
86, 87, 91,
92, 19, 14,
14, 88, 92,
93, 92, 88,
88, 89, 93,
94, 93, 89,
89, 90, 94,
95, 94, 90,
90, 91, 95,
96, 24, 19,
19, 92, 96,
97, 96, 92,
92, 93, 97,
98, 97, 93,
93, 94, 98,
99, 98, 94,
94, 95, 99,
100, 29, 24,
24, 96, 100,
101, 100, 96,
96, 97, 101,
102, 101, 97,
97, 98, 102,
103, 102, 98,
98, 99, 103,
104, 34, 29,
29, 100, 104,
105, 104, 100,
100, 101, 105,
106, 105, 101,
101, 102, 106,
107, 106, 102,
102, 103, 107,
108, 39, 34,
34, 104, 108,
109, 108, 104,
104, 105, 109,
110, 109, 105,
105, 106, 110,
111, 110, 106,
106, 107, 111,
112, 44, 39,
39, 108, 112,
113, 112, 108,
108, 109, 113,
114, 113, 109,
109, 110, 114,
115, 114, 110,
110, 111, 115,
116, 49, 44,
44, 112, 116,
117, 116, 112,
112, 113, 117,
118, 117, 113,
113, 114, 118,
119, 118, 114,
114, 115, 119,
120, 54, 49,
49, 116, 120,
121, 120, 116,
116, 117, 121,
122, 121, 117,
117, 118, 122,
123, 122, 118,
118, 119, 123,
124, 59, 54,
54, 120, 124,
125, 124, 120,
120, 121, 125,
126, 125, 121,
121, 122, 126,
127, 126, 122,
122, 123, 127,
128, 64, 59,
59, 124, 128,
129, 128, 124,
124, 125, 129,
130, 129, 125,
125, 126, 130,
131, 130, 126,
126, 127, 131,
132, 69, 64,
64, 128, 132,
133, 132, 128,
128, 129, 133,
134, 133, 129,
129, 130, 134,
135, 134, 130,
130, 131, 135,
136, 74, 69,
69, 132, 136,
137, 136, 132,
132, 133, 137,
138, 137, 133,
133, 134, 138,
139, 138, 134,
134, 135, 139,
140, 79, 74,
74, 136, 140,
141, 140, 136,
136, 137, 141,
142, 141, 137,
137, 138, 142,
143, 142, 138,
138, 139, 143,
80, 4, 79,
79, 140, 80,
81, 80, 140,
140, 141, 81,
82, 81, 141,
141, 142, 82,
83, 82, 142,
142, 143, 83,
148, 87, 83,
83, 144, 148,
149, 148, 144,
144, 145, 149,
150, 149, 145,
145, 146, 150,
151, 150, 146,
146, 147, 151,
152, 91, 87,
87, 148, 152,
153, 152, 148,
148, 149, 153,
154, 153, 149,
149, 150, 154,
155, 154, 150,
150, 151, 155,
156, 95, 91,
91, 152, 156,
157, 156, 152,
152, 153, 157,
158, 157, 153,
153, 154, 158,
159, 158, 154,
154, 155, 159,
160, 99, 95,
95, 156, 160,
161, 160, 156,
156, 157, 161,
162, 161, 157,
157, 158, 162,
163, 162, 158,
158, 159, 163,
164, 103, 99,
99, 160, 164,
165, 164, 160,
160, 161, 165,
166, 165, 161,
161, 162, 166,
167, 166, 162,
162, 163, 167,
168, 107, 103,
103, 164, 168,
169, 168, 164,
164, 165, 169,
170, 169, 165,
165, 166, 170,
171, 170, 166,
166, 167, 171,
172, 111, 107,
107, 168, 172,
173, 172, 168,
168, 169, 173,
174, 173, 169,
169, 170, 174,
175, 174, 170,
170, 171, 175,
176, 115, 111,
111, 172, 176,
177, 176, 172,
172, 173, 177,
178, 177, 173,
173, 174, 178,
179, 178, 174,
174, 175, 179,
180, 119, 115,
115, 176, 180,
181, 180, 176,
176, 177, 181,
182, 181, 177,
177, 178, 182,
183, 182, 178,
178, 179, 183,
184, 123, 119,
119, 180, 184,
185, 184, 180,
180, 181, 185,
186, 185, 181,
181, 182, 186,
187, 186, 182,
182, 183, 187,
188, 127, 123,
123, 184, 188,
189, 188, 184,
184, 185, 189,
190, 189, 185,
185, 186, 190,
191, 190, 186,
186, 187, 191,
192, 131, 127,
127, 188, 192,
193, 192, 188,
188, 189, 193,
194, 193, 189,
189, 190, 194,
195, 194, 190,
190, 191, 195,
196, 135, 131,
131, 192, 196,
197, 196, 192,
192, 193, 197,
198, 197, 193,
193, 194, 198,
199, 198, 194,
194, 195, 199,
200, 139, 135,
135, 196, 200,
201, 200, 196,
196, 197, 201,
202, 201, 197,
197, 198, 202,
203, 202, 198,
198, 199, 203,
204, 143, 139,
139, 200, 204,
205, 204, 200,
200, 201, 205,
206, 205, 201,
201, 202, 206,
207, 206, 202,
202, 203, 207,
144, 83, 143,
143, 204, 144,
145, 144, 204,
204, 205, 145,
146, 145, 205,
205, 206, 146,
147, 146, 206,
206, 207, 147,
212, 151, 147,
147, 208, 212,
213, 212, 208,
208, 209, 213,
214, 213, 209,
209, 210, 214,
211, 214, 210,
210, 211, 211,
215, 155, 151,
151, 212, 215,
216, 215, 212,
212, 213, 216,
217, 216, 213,
213, 214, 217,
211, 217, 214,
214, 211, 211,
218, 159, 155,
155, 215, 218,
219, 218, 215,
215, 216, 219,
220, 219, 216,
216, 217, 220,
211, 220, 217,
217, 211, 211,
221, 163, 159,
159, 218, 221,
222, 221, 218,
218, 219, 222,
223, 222, 219,
219, 220, 223,
211, 223, 220,
220, 211, 211,
224, 167, 163,
163, 221, 224,
225, 224, 221,
221, 222, 225,
226, 225, 222,
222, 223, 226,
211, 226, 223,
223, 211, 211,
227, 171, 167,
167, 224, 227,
228, 227, 224,
224, 225, 228,
229, 228, 225,
225, 226, 229,
211, 229, 226,
226, 211, 211,
230, 175, 171,
171, 227, 230,
231, 230, 227,
227, 228, 231,
232, 231, 228,
228, 229, 232,
211, 232, 229,
229, 211, 211,
233, 179, 175,
175, 230, 233,
234, 233, 230,
230, 231, 234,
235, 234, 231,
231, 232, 235,
211, 235, 232,
232, 211, 211,
236, 183, 179,
179, 233, 236,
237, 236, 233,
233, 234, 237,
238, 237, 234,
234, 235, 238,
211, 238, 235,
235, 211, 211,
239, 187, 183,
183, 236, 239,
240, 239, 236,
236, 237, 240,
241, 240, 237,
237, 238, 241,
211, 241, 238,
238, 211, 211,
242, 191, 187,
187, 239, 242,
243, 242, 239,
239, 240, 243,
244, 243, 240,
240, 241, 244,
211, 244, 241,
241, 211, 211,
245, 195, 191,
191, 242, 245,
246, 245, 242,
242, 243, 246,
247, 246, 243,
243, 244, 247,
211, 247, 244,
244, 211, 211,
248, 199, 195,
195, 245, 248,
249, 248, 245,
245, 246, 249,
250, 249, 246,
246, 247, 250,
211, 250, 247,
247, 211, 211,
251, 203, 199,
199, 248, 251,
252, 251, 248,
248, 249, 252,
253, 252, 249,
249, 250, 253,
211, 253, 250,
250, 211, 211,
254, 207, 203,
203, 251, 254,
255, 254, 251,
251, 252, 255,
256, 255, 252,
252, 253, 256,
211, 256, 253,
253, 211, 211,
208, 147, 207,
207, 254, 208,
209, 208, 254,
254, 255, 209,
210, 209, 255,
255, 256, 210,
211, 210, 256,
256, 211, 211,
263, 262, 257,
257, 258, 263,
264, 263, 258,
258, 259, 264,
265, 264, 259,
259, 260, 265,
266, 265, 260,
260, 261, 266,
268, 267, 262,
262, 263, 268,
269, 268, 263,
263, 264, 269,
270, 269, 264,
264, 265, 270,
271, 270, 265,
265, 266, 271,
273, 272, 267,
267, 268, 273,
274, 273, 268,
268, 269, 274,
275, 274, 269,
269, 270, 275,
276, 275, 270,
270, 271, 276,
278, 277, 272,
272, 273, 278,
279, 278, 273,
273, 274, 279,
280, 279, 274,
274, 275, 280,
281, 280, 275,
275, 276, 281,
283, 282, 277,
277, 278, 283,
284, 283, 278,
278, 279, 284,
285, 284, 279,
279, 280, 285,
286, 285, 280,
280, 281, 286,
288, 287, 282,
282, 283, 288,
289, 288, 283,
283, 284, 289,
290, 289, 284,
284, 285, 290,
291, 290, 285,
285, 286, 291,
293, 292, 287,
287, 288, 293,
294, 293, 288,
288, 289, 294,
295, 294, 289,
289, 290, 295,
296, 295, 290,
290, 291, 296,
258, 257, 292,
292, 293, 258,
259, 258, 293,
293, 294, 259,
260, 259, 294,
294, 295, 260,
261, 260, 295,
295, 296, 261,
301, 266, 261,
261, 297, 301,
302, 301, 297,
297, 298, 302,
303, 302, 298,
298, 299, 303,
304, 303, 299,
299, 300, 304,
305, 271, 266,
266, 301, 305,
306, 305, 301,
301, 302, 306,
307, 306, 302,
302, 303, 307,
308, 307, 303,
303, 304, 308,
309, 276, 271,
271, 305, 309,
310, 309, 305,
305, 306, 310,
311, 310, 306,
306, 307, 311,
312, 311, 307,
307, 308, 312,
313, 281, 276,
276, 309, 313,
314, 313, 309,
309, 310, 314,
315, 314, 310,
310, 311, 315,
316, 315, 311,
311, 312, 316,
317, 286, 281,
281, 313, 317,
318, 317, 313,
313, 314, 318,
319, 318, 314,
314, 315, 319,
320, 319, 315,
315, 316, 320,
321, 291, 286,
286, 317, 321,
322, 321, 317,
317, 318, 322,
323, 322, 318,
318, 319, 323,
324, 323, 319,
319, 320, 324,
325, 296, 291,
291, 321, 325,
326, 325, 321,
321, 322, 326,
327, 326, 322,
322, 323, 327,
328, 327, 323,
323, 324, 328,
297, 261, 296,
296, 325, 297,
298, 297, 325,
325, 326, 298,
299, 298, 326,
326, 327, 299,
300, 299, 327,
327, 328, 300,
335, 334, 329,
329, 330, 335,
336, 335, 330,
330, 331, 336,
337, 336, 331,
331, 332, 337,
338, 337, 332,
332, 333, 338,
340, 339, 334,
334, 335, 340,
341, 340, 335,
335, 336, 341,
342, 341, 336,
336, 337, 342,
343, 342, 337,
337, 338, 343,
345, 344, 339,
339, 340, 345,
346, 345, 340,
340, 341, 346,
347, 346, 341,
341, 342, 347,
348, 347, 342,
342, 343, 348,
350, 349, 344,
344, 345, 350,
351, 350, 345,
345, 346, 351,
352, 351, 346,
346, 347, 352,
353, 352, 347,
347, 348, 353,
355, 354, 349,
349, 350, 355,
356, 355, 350,
350, 351, 356,
357, 356, 351,
351, 352, 357,
358, 357, 352,
352, 353, 358,
360, 359, 354,
354, 355, 360,
361, 360, 355,
355, 356, 361,
362, 361, 356,
356, 357, 362,
363, 362, 357,
357, 358, 363,
365, 364, 359,
359, 360, 365,
366, 365, 360,
360, 361, 366,
367, 366, 361,
361, 362, 367,
368, 367, 362,
362, 363, 368,
330, 329, 364,
364, 365, 330,
331, 330, 365,
365, 366, 331,
332, 331, 366,
366, 367, 332,
333, 332, 367,
367, 368, 333,
373, 338, 333,
333, 369, 373,
374, 373, 369,
369, 370, 374,
375, 374, 370,
370, 371, 375,
376, 375, 371,
371, 372, 376,
377, 343, 338,
338, 373, 377,
378, 377, 373,
373, 374, 378,
379, 378, 374,
374, 375, 379,
380, 379, 375,
375, 376, 380,
381, 348, 343,
343, 377, 381,
382, 381, 377,
377, 378, 382,
383, 382, 378,
378, 379, 383,
384, 383, 379,
379, 380, 384,
385, 353, 348,
348, 381, 385,
386, 385, 381,
381, 382, 386,
387, 386, 382,
382, 383, 387,
388, 387, 383,
383, 384, 388,
389, 358, 353,
353, 385, 389,
390, 389, 385,
385, 386, 390,
391, 390, 386,
386, 387, 391,
392, 391, 387,
387, 388, 392,
393, 363, 358,
358, 389, 393,
394, 393, 389,
389, 390, 394,
395, 394, 390,
390, 391, 395,
396, 395, 391,
391, 392, 396,
397, 368, 363,
363, 393, 397,
398, 397, 393,
393, 394, 398,
399, 398, 394,
394, 395, 399,
400, 399, 395,
395, 396, 400,
369, 333, 368,
368, 397, 369,
370, 369, 397,
397, 398, 370,
371, 370, 398,
398, 399, 371,
372, 371, 399,
399, 400, 372,
406, 401, 401,
401, 402, 406,
407, 406, 402,
402, 403, 407,
408, 407, 403,
403, 404, 408,
409, 408, 404,
404, 405, 409,
410, 401, 401,
401, 406, 410,
411, 410, 406,
406, 407, 411,
412, 411, 407,
407, 408, 412,
413, 412, 408,
408, 409, 413,
414, 401, 401,
401, 410, 414,
415, 414, 410,
410, 411, 415,
416, 415, 411,
411, 412, 416,
417, 416, 412,
412, 413, 417,
418, 401, 401,
401, 414, 418,
419, 418, 414,
414, 415, 419,
420, 419, 415,
415, 416, 420,
421, 420, 416,
416, 417, 421,
422, 401, 401,
401, 418, 422,
423, 422, 418,
418, 419, 423,
424, 423, 419,
419, 420, 424,
425, 424, 420,
420, 421, 425,
426, 401, 401,
401, 422, 426,
427, 426, 422,
422, 423, 427,
428, 427, 423,
423, 424, 428,
429, 428, 424,
424, 425, 429,
430, 401, 401,
401, 426, 430,
431, 430, 426,
426, 427, 431,
432, 431, 427,
427, 428, 432,
433, 432, 428,
428, 429, 433,
434, 401, 401,
401, 430, 434,
435, 434, 430,
430, 431, 435,
436, 435, 431,
431, 432, 436,
437, 436, 432,
432, 433, 437,
438, 401, 401,
401, 434, 438,
439, 438, 434,
434, 435, 439,
440, 439, 435,
435, 436, 440,
441, 440, 436,
436, 437, 441,
442, 401, 401,
401, 438, 442,
443, 442, 438,
438, 439, 443,
444, 443, 439,
439, 440, 444,
445, 444, 440,
440, 441, 445,
446, 401, 401,
401, 442, 446,
447, 446, 442,
442, 443, 447,
448, 447, 443,
443, 444, 448,
449, 448, 444,
444, 445, 449,
450, 401, 401,
401, 446, 450,
451, 450, 446,
446, 447, 451,
452, 451, 447,
447, 448, 452,
453, 452, 448,
448, 449, 453,
454, 401, 401,
401, 450, 454,
455, 454, 450,
450, 451, 455,
456, 455, 451,
451, 452, 456,
457, 456, 452,
452, 453, 457,
458, 401, 401,
401, 454, 458,
459, 458, 454,
454, 455, 459,
460, 459, 455,
455, 456, 460,
461, 460, 456,
456, 457, 461,
462, 401, 401,
401, 458, 462,
463, 462, 458,
458, 459, 463,
464, 463, 459,
459, 460, 464,
465, 464, 460,
460, 461, 465,
402, 401, 401,
401, 462, 402,
403, 402, 462,
462, 463, 403,
404, 403, 463,
463, 464, 404,
405, 404, 464,
464, 465, 405,
470, 409, 405,
405, 466, 470,
471, 470, 466,
466, 467, 471,
472, 471, 467,
467, 468, 472,
473, 472, 468,
468, 469, 473,
474, 413, 409,
409, 470, 474,
475, 474, 470,
470, 471, 475,
476, 475, 471,
471, 472, 476,
477, 476, 472,
472, 473, 477,
478, 417, 413,
413, 474, 478,
479, 478, 474,
474, 475, 479,
480, 479, 475,
475, 476, 480,
481, 480, 476,
476, 477, 481,
482, 421, 417,
417, 478, 482,
483, 482, 478,
478, 479, 483,
484, 483, 479,
479, 480, 484,
485, 484, 480,
480, 481, 485,
486, 425, 421,
421, 482, 486,
487, 486, 482,
482, 483, 487,
488, 487, 483,
483, 484, 488,
489, 488, 484,
484, 485, 489,
490, 429, 425,
425, 486, 490,
491, 490, 486,
486, 487, 491,
492, 491, 487,
487, 488, 492,
493, 492, 488,
488, 489, 493,
494, 433, 429,
429, 490, 494,
495, 494, 490,
490, 491, 495,
496, 495, 491,
491, 492, 496,
497, 496, 492,
492, 493, 497,
498, 437, 433,
433, 494, 498,
499, 498, 494,
494, 495, 499,
500, 499, 495,
495, 496, 500,
501, 500, 496,
496, 497, 501,
502, 441, 437,
437, 498, 502,
503, 502, 498,
498, 499, 503,
504, 503, 499,
499, 500, 504,
505, 504, 500,
500, 501, 505,
506, 445, 441,
441, 502, 506,
507, 506, 502,
502, 503, 507,
508, 507, 503,
503, 504, 508,
509, 508, 504,
504, 505, 509,
510, 449, 445,
445, 506, 510,
511, 510, 506,
506, 507, 511,
512, 511, 507,
507, 508, 512,
513, 512, 508,
508, 509, 513,
514, 453, 449,
449, 510, 514,
515, 514, 510,
510, 511, 515,
516, 515, 511,
511, 512, 516,
517, 516, 512,
512, 513, 517,
518, 457, 453,
453, 514, 518,
519, 518, 514,
514, 515, 519,
520, 519, 515,
515, 516, 520,
521, 520, 516,
516, 517, 521,
522, 461, 457,
457, 518, 522,
523, 522, 518,
518, 519, 523,
524, 523, 519,
519, 520, 524,
525, 524, 520,
520, 521, 525,
526, 465, 461,
461, 522, 526,
527, 526, 522,
522, 523, 527,
528, 527, 523,
523, 524, 528,
529, 528, 524,
524, 525, 529,
466, 405, 465,
465, 526, 466,
467, 466, 526,
526, 527, 467,
468, 467, 527,
527, 528, 468,
469, 468, 528,
528, 529, 469,
]; | 38.362663 | 104 | 0.538912 |
ff1ee11a5f5e4339271625845e7f1e4aff6a5250 | 492 | use gw_db::schema::Col;
use gw_db::ReadOptions;
use gw_db::{error::Error, iter::DBIter, IteratorMode};
pub trait KVStore {
fn get(&self, col: Col, key: &[u8]) -> Option<Box<[u8]>>;
fn get_iter(&self, col: Col, mode: IteratorMode) -> DBIter;
fn get_iter_opts(&self, col: Col, mode: IteratorMode, opts: &ReadOptions) -> DBIter;
fn insert_raw(&self, col: Col, key: &[u8], value: &[u8]) -> Result<(), Error>;
fn delete(&self, col: Col, key: &[u8]) -> Result<(), Error>;
}
| 32.8 | 88 | 0.619919 |
1473227c4c2631956d32f1440db44ed3825c7a52 | 57,926 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module translates the bytecode of a module to Boogie code.
use std::collections::BTreeMap;
use itertools::Itertools;
#[allow(unused_imports)]
use log::{debug, info, log, warn, Level};
use bytecode::{
function_target::FunctionTarget,
function_target_pipeline::FunctionTargetsHolder,
stackless_bytecode::{BorrowEdge, BorrowNode, Bytecode, Constant, Operation, StrongEdge},
verification_analysis,
};
use move_model::{
code_writer::CodeWriter,
emit, emitln,
model::{GlobalEnv, ModuleEnv, StructEnv, TypeParameter},
pragmas::{ADDITION_OVERFLOW_UNCHECKED_PRAGMA, SEED_PRAGMA, TIMEOUT_PRAGMA},
ty::{PrimitiveType, Type},
};
use crate::{
boogie_helpers::{
boogie_byte_blob, boogie_debug_track_abort, boogie_debug_track_local,
boogie_debug_track_return, boogie_field_name, boogie_function_name, boogie_local_type,
boogie_modifies_memory_name, boogie_resource_memory_name, boogie_struct_name,
boogie_type_value, boogie_type_value_array, boogie_type_values, boogie_well_formed_check,
},
options::BoogieOptions,
spec_translator::SpecTranslator,
};
use bytecode::{
function_target_pipeline::FunctionVariant,
stackless_bytecode::{AbortAction, PropKind},
};
use codespan::LineIndex;
use move_model::{ast::TempIndex, model::Loc};
pub struct BoogieTranslator<'env> {
env: &'env GlobalEnv,
options: &'env BoogieOptions,
writer: &'env CodeWriter,
targets: &'env FunctionTargetsHolder,
}
pub struct ModuleTranslator<'env> {
writer: &'env CodeWriter,
module_env: ModuleEnv<'env>,
spec_translator: SpecTranslator<'env>,
options: &'env BoogieOptions,
targets: &'env FunctionTargetsHolder,
}
impl<'env> BoogieTranslator<'env> {
pub fn new(
env: &'env GlobalEnv,
options: &'env BoogieOptions,
targets: &'env FunctionTargetsHolder,
writer: &'env CodeWriter,
) -> Self {
Self {
env,
options,
targets,
writer,
}
}
pub fn translate(&mut self) {
// generate definitions for all modules.
for module_env in self.env.get_modules() {
ModuleTranslator::new(self, module_env).translate();
}
}
}
impl<'env> ModuleTranslator<'env> {
/// Creates a new module translator.
fn new(parent: &'env BoogieTranslator, module: ModuleEnv<'env>) -> Self {
Self {
writer: parent.writer,
options: parent.options,
module_env: module,
spec_translator: SpecTranslator::new(parent.writer, &parent.env, parent.options),
targets: &parent.targets,
}
}
/// Translates this module.
fn translate(&mut self) {
log!(
if !self.module_env.is_target() {
Level::Debug
} else {
Level::Info
},
"translating module {}",
self.module_env
.get_name()
.display(self.module_env.symbol_pool())
);
self.writer
.set_location(&self.module_env.env.internal_loc());
self.spec_translator.translate_spec_vars(&self.module_env);
self.spec_translator.translate_spec_funs(&self.module_env);
self.translate_structs();
self.translate_functions();
}
/// Translates all structs in the module.
fn translate_structs(&self) {
emitln!(
self.writer,
"\n\n// ** structs of module {}\n",
self.module_env
.get_name()
.display(self.module_env.symbol_pool())
);
for struct_env in self.module_env.get_structs() {
// Set the location to internal so we don't see locations of pack/unpack
// in execution traces.
self.writer
.set_location(&self.module_env.env.internal_loc());
self.translate_struct_type(&struct_env);
}
}
/// Translates the given struct.
fn translate_struct_type(&self, struct_env: &StructEnv<'_>) {
// Emit TypeName
let struct_name = boogie_struct_name(&struct_env);
emitln!(self.writer, "const unique {}: $TypeName;", struct_name);
// Emit FieldNames
for (i, field_env) in struct_env.get_fields().enumerate() {
let field_name = boogie_field_name(&field_env);
emitln!(
self.writer,
"const {}: $FieldName;\naxiom {} == {};",
field_name,
field_name,
i
);
}
// Emit TypeValue constructor function.
let type_params = struct_env
.get_type_parameters()
.iter()
.enumerate()
.map(|(i, _)| format!("$tv{}: $TypeValue", i))
.join(", ");
let type_args = struct_env
.get_type_parameters()
.iter()
.enumerate()
.map(|(i, _)| Type::TypeParameter(i as u16))
.collect_vec();
let type_args_array = boogie_type_value_array(struct_env.module_env.env, &type_args);
let type_value = format!("$StructType({}, {})", struct_name, type_args_array);
emitln!(
self.writer,
"function {}_type_value({}): $TypeValue {{\n {}\n}}",
struct_name,
type_params,
type_value
);
// Emit memory variable.
if struct_env.is_resource() {
let memory_name = boogie_resource_memory_name(
struct_env.module_env.env,
struct_env.get_qualified_id(),
&None,
);
emitln!(self.writer, "var {}: $Memory;", memory_name);
}
// Emit type assumption function.
self.spec_translator
.translate_assume_well_formed(&struct_env);
}
/// Translates all functions in the module.
fn translate_functions(&self) {
emitln!(
self.writer,
"\n\n// ** functions of module {}\n",
self.module_env
.get_name()
.display(self.module_env.symbol_pool())
);
for func_env in self.module_env.get_functions() {
if func_env.is_native() || func_env.is_intrinsic() {
continue;
}
let verification_info = verification_analysis::get_info(
&self
.targets
.get_target(&func_env, FunctionVariant::Baseline),
);
for variant in self.targets.get_target_variants(&func_env) {
if verification_info.verified && variant.is_verified()
|| verification_info.inlined && !variant.is_verified()
{
self.translate_function(variant, &self.targets.get_target(&func_env, variant));
}
}
}
}
}
impl<'env> ModuleTranslator<'env> {
/// Translates the given function.
fn translate_function(&self, variant: FunctionVariant, fun_target: &FunctionTarget<'_>) {
self.generate_function_sig(variant, &fun_target);
self.generate_function_body(variant, &fun_target);
emitln!(self.writer);
}
/// Return a string for a boogie procedure header. Use inline attribute and name
/// suffix as indicated by `entry_point`.
fn generate_function_sig(&self, variant: FunctionVariant, fun_target: &FunctionTarget<'_>) {
let (args, rets) = self.generate_function_args_and_returns(fun_target);
let (suffix, attribs) = match variant {
FunctionVariant::Baseline => ("".to_string(), "{:inline 1} ".to_string()),
FunctionVariant::Verification(flavor) => {
let timeout = fun_target
.func_env
.get_num_pragma(TIMEOUT_PRAGMA, || self.options.vc_timeout);
let mut attribs = vec![format!("{{:timeLimit {}}} ", timeout)];
if fun_target.func_env.is_num_pragma_set(SEED_PRAGMA) {
let seed = fun_target
.func_env
.get_num_pragma(SEED_PRAGMA, || self.options.random_seed);
attribs.push(format!("{{:random_seed {}}} ", seed));
};
if flavor == "inconsistency" {
attribs.push(format!(
"{{:msg_if_verifies \"inconsistency_detected{}\"}} ",
self.loc_str(&fun_target.get_loc())
));
}
if flavor.is_empty() {
("$verify".to_string(), attribs.join(""))
} else {
(format!("$verify_{}", flavor), attribs.join(""))
}
}
};
self.writer.set_location(&fun_target.get_loc());
emitln!(
self.writer,
"procedure {}{}{}({}) returns ({})",
attribs,
boogie_function_name(fun_target.func_env),
suffix,
args,
rets,
)
}
/// Generate boogie representation of function args and return args.
fn generate_function_args_and_returns(
&self,
fun_target: &FunctionTarget<'_>,
) -> (String, String) {
let args = fun_target
.get_type_parameters()
.iter()
.map(|TypeParameter(s, _)| {
format!("{}: $TypeValue", s.display(fun_target.symbol_pool()))
})
.chain((0..fun_target.get_parameter_count()).map(|i| {
let ty = fun_target.get_local_type(i);
// Boogie does not allow to assign to parameters, so we need to proxy them.
let prefix = if self.parameter_needs_to_be_mutable(fun_target, i) {
"_$"
} else {
"$"
};
format!("{}t{}: {}", prefix, i, boogie_local_type(ty))
}))
.join(", ");
let mut_ref_count = (0..fun_target.get_parameter_count())
.filter(|idx| fun_target.get_local_type(*idx).is_mutable_reference())
.count();
let rets = fun_target
.get_return_types()
.iter()
.enumerate()
.map(|(i, ref s)| format!("$ret{}: {}", i, boogie_local_type(s)))
// Add implicit return parameters for &mut
.chain(
(0..mut_ref_count)
.map(|i| format!("$ret{}: $Mutation", fun_target.get_return_count() + i)),
)
.join(", ");
(args, rets)
}
/// Generates boogie implementation body.
fn generate_function_body(&self, variant: FunctionVariant, fun_target: &FunctionTarget<'_>) {
// Be sure to set back location to the whole function definition as a default.
self.writer.set_location(&fun_target.get_loc().at_start());
emitln!(self.writer, "{");
self.writer.indent();
// Generate local variable declarations. They need to appear first in boogie.
emitln!(self.writer, "// declare local variables");
let num_args = fun_target.get_parameter_count();
for i in num_args..fun_target.get_local_count() {
let local_type = fun_target.get_local_type(i);
emitln!(
self.writer,
"var $t{}: {}; // {}",
i,
boogie_local_type(local_type),
boogie_type_value(self.module_env.env, local_type)
);
}
// Generate declarations for renamed parameters.
let proxied_parameters = self.get_mutable_parameters(fun_target);
for (idx, ty) in &proxied_parameters {
emitln!(self.writer, "var $t{}: {};", idx, boogie_local_type(ty));
}
// Generate declarations for modifies condition.
fun_target.get_modify_targets().keys().for_each(|ty| {
emitln!(
self.writer,
"var {}: {}",
boogie_modifies_memory_name(fun_target.global_env(), *ty),
"[$TypeValueArray, int]bool;"
);
});
// Declare temporaries for debug tracing.
emitln!(self.writer, "var $trace_abort_temp: int;");
emitln!(self.writer, "var $trace_local_temp: $Value;");
// Generate memory snapshot variable declarations.
let code = fun_target.get_bytecode();
let labels = code
.iter()
.filter_map(|bc| {
use Bytecode::*;
match bc {
SaveMem(_, lab, mem) => Some((lab, mem)),
SaveSpecVar(..) => panic!("spec var memory snapshots NYI"),
_ => None,
}
})
.collect::<BTreeMap<_, _>>();
for (lab, mem) in labels {
let name = boogie_resource_memory_name(self.module_env.env, *mem, &Some(*lab));
emitln!(self.writer, "var {}: $Memory;", name);
}
// Initialize renamed parameters.
for (idx, _) in proxied_parameters {
emitln!(self.writer, "$t{} := _$t{};", idx, idx);
}
// Initial assumptions
if variant.is_verified() {
self.translate_verify_entry_assumptions(fun_target);
}
// Generate bytecode
emitln!(self.writer, "\n// bytecode translation starts here");
let mut last_tracked_loc = None;
for bytecode in code.iter() {
self.translate_bytecode(fun_target, &mut last_tracked_loc, bytecode);
}
self.writer.unindent();
emitln!(self.writer, "}");
}
fn get_mutable_parameters(&self, fun_target: &FunctionTarget<'_>) -> Vec<(TempIndex, Type)> {
(0..fun_target.get_parameter_count())
.filter_map(|i| {
if self.parameter_needs_to_be_mutable(fun_target, i) {
Some((i, fun_target.get_local_type(i).clone()))
} else {
None
}
})
.collect_vec()
}
/// Determines whether the parameter of a function needs to be mutable.
/// Boogie does not allow to assign to procedure parameters. In some cases
/// (e.g. for memory instrumentation, but also as a result of copy propagation),
/// we may need to assign to parameters.
fn parameter_needs_to_be_mutable(
&self,
_fun_target: &FunctionTarget<'_>,
_idx: TempIndex,
) -> bool {
// For now, we just always say true. This could be optimized because the actual (known
// so far) sources for mutability are parameters which are used in WriteBack(LocalRoot(p))
// position.
true
}
fn translate_verify_entry_assumptions(&self, fun_target: &FunctionTarget<'_>) {
emitln!(self.writer, "\n// verification entrypoint assumptions");
// Prelude initialization
emitln!(self.writer, "call $InitVerification();");
// Assume reference parameters to be based on the Param(i) Location, ensuring
// they are disjoint from all other references. This prevents aliasing and is justified as
// follows:
// - for mutual references, by their exclusive access in Move.
// - for immutable references, by that mutation is not possible, and they are equivalent
// to some given but arbitrary value.
for i in 0..fun_target.get_parameter_count() {
let ty = fun_target.get_local_type(i);
if ty.is_reference() {
emitln!(self.writer, "assume l#$Mutation($t{}) == $Param({});", i, i);
emitln!(self.writer, "assume size#$Path(p#$Mutation($t{})) == 0;", i);
}
}
// Initialize modify permissions.
self.initialize_modifies_permissions(fun_target);
}
/// Initializes modifies permissions.
fn initialize_modifies_permissions(&self, fun_target: &FunctionTarget<'_>) {
let env = fun_target.global_env();
for (ty, targets) in fun_target.get_modify_targets() {
emit!(
self.writer,
"{} := {}",
boogie_modifies_memory_name(fun_target.global_env(), *ty),
"$ConstMemoryDomain(false)"
);
for target in targets {
let node_id = target.node_id();
let args = target.call_args();
let rty = &env.get_node_instantiation(node_id)[0];
let (_, _, targs) = rty.require_struct();
let type_args = boogie_type_value_array(env, targs);
emit!(self.writer, "[{}, a#$Address(", type_args);
self.spec_translator.translate(&args[0]);
emit!(self.writer, ") := true]");
}
emitln!(self.writer, ";");
}
}
/// Translates one bytecode instruction.
fn translate_bytecode(
&'env self,
fun_target: &FunctionTarget<'_>,
last_tracked_loc: &mut Option<(Loc, LineIndex)>,
bytecode: &Bytecode,
) {
use Bytecode::*;
// Set location of this code in the CodeWriter.
let attr_id = bytecode.get_attr_id();
let loc = fun_target.get_bytecode_loc(attr_id);
self.writer.set_location(&loc);
// Print location.
emitln!(
self.writer,
"// {} {}",
bytecode.display(fun_target, &BTreeMap::default()),
loc.display(self.module_env.env)
);
// Print debug comments.
if let Some(comment) = fun_target.get_debug_comment(attr_id) {
emitln!(self.writer, "// {}", comment);
}
// Track location for execution traces.
if matches!(bytecode, Call(_, _, Operation::TraceAbort, ..)) {
// Ensure that aborts always has the precise location instead of the
// line-approximated one
*last_tracked_loc = None;
}
self.track_loc(fun_target, last_tracked_loc, &loc);
// Helper function to get a a string for a local
let str_local = |idx: usize| format!("$t{}", idx);
// Translate the bytecode instruction.
match bytecode {
SpecBlock(..) => panic!("deprecated"),
SaveMem(_, label, mem) => {
let snapshot =
boogie_resource_memory_name(self.module_env.env, *mem, &Some(*label));
let current = boogie_resource_memory_name(self.module_env.env, *mem, &None);
emitln!(self.writer, "{} := {};", snapshot, current);
}
SaveSpecVar(_, _label, _var) => {
panic!("spec var snapshot NYI")
}
Prop(id, kind, exp) => match kind {
PropKind::Assert => {
emit!(self.writer, "assert ");
let info = fun_target
.get_vc_info(*id)
.map(|s| s.as_str())
.unwrap_or("unknown assertion failed");
emit!(
self.writer,
"{{:msg \"assert_failed{}: {}\"}} ",
self.loc_str(&loc),
info
);
self.spec_translator.translate_unboxed(exp);
emitln!(self.writer, ";");
}
PropKind::Assume => {
emit!(self.writer, "assume ");
self.spec_translator.translate_unboxed(exp);
emitln!(self.writer, ";");
}
PropKind::Modifies => {
let ty = self.module_env.env.get_node_type(exp.node_id());
let (mid, sid, type_args) = ty.require_struct();
let boogie_mem =
boogie_resource_memory_name(self.module_env.env, mid.qualified(sid), &None);
let boogie_type_args = boogie_type_value_array(self.module_env.env, type_args);
emit!(
self.writer,
"call {} := $Modifies({}, {}, ",
boogie_mem,
boogie_mem,
boogie_type_args
);
self.spec_translator.translate_unboxed(&exp.call_args()[0]);
emitln!(self.writer, ");");
}
},
Label(_, label) => {
self.writer.unindent();
emitln!(self.writer, "L{}:", label.as_usize());
/*
// TODO: revisit whether we can express what is needed here on bytecode level
let annotated_func_target = self.targets.get_annotated_target(fun_target.func_env);
let loop_annotation = annotated_func_target
.get_annotations()
.get::<LoopAnnotation>()
.expect("loop annotation");
if loop_annotation.loop_targets.contains_key(label) {
let targets = &loop_annotation.loop_targets[label];
for idx in 0..fun_target.get_local_count() {
if let Some(ref_proxy_idx) = fun_target.get_ref_proxy_index(idx) {
if targets.contains(ref_proxy_idx) {
let ref_proxy_var_name = str_local(*ref_proxy_idx);
let proxy_idx = fun_target.get_proxy_index(idx).unwrap();
emitln!(
self.writer,
"assume l#$Mutation({}) == $Local({}) && p#$Mutation({}) == $EmptyPath;",
ref_proxy_var_name,
proxy_idx,
ref_proxy_var_name);
}
}
}
}
*/
self.writer.indent();
}
Jump(_, target) => emitln!(self.writer, "goto L{};", target.as_usize()),
Branch(_, then_target, else_target, idx) => emitln!(
self.writer,
"if (b#$Boolean({})) {{ goto L{}; }} else {{ goto L{}; }}",
str_local(*idx),
then_target.as_usize(),
else_target.as_usize(),
),
Assign(_, dest, src, _) => {
if fun_target.get_local_type(*dest).is_reference() {
emitln!(
self.writer,
"call {} := $CopyOrMoveRef({});",
str_local(*dest),
str_local(*src)
);
} else {
emitln!(
self.writer,
"call {} := $CopyOrMoveValue({});",
str_local(*dest),
str_local(*src)
);
}
}
Ret(_, rets) => {
for (i, r) in rets.iter().enumerate() {
emitln!(self.writer, "$ret{} := {};", i, str_local(*r));
}
// Also assign input to output $mut parameters
let mut ret_idx = rets.len();
for i in 0..fun_target.get_parameter_count() {
if fun_target.get_local_type(i).is_mutable_reference() {
emitln!(self.writer, "$ret{} := {};", ret_idx, str_local(i));
ret_idx = usize::saturating_add(ret_idx, 1);
}
}
emitln!(self.writer, "return;");
}
Load(_, idx, c) => {
let value = match c {
Constant::Bool(true) => "$Boolean(true)".to_string(),
Constant::Bool(false) => "$Boolean(false)".to_string(),
Constant::U8(num) => format!("$Integer({})", num),
Constant::U64(num) => format!("$Integer({})", num),
Constant::U128(num) => format!("$Integer({})", num),
Constant::Address(val) => format!("$Address({})", val),
Constant::ByteArray(val) => {
format!("$Vector({})", boogie_byte_blob(self.options, val))
}
};
emitln!(self.writer, "{} := {};", str_local(*idx), value);
}
Call(_, dests, oper, srcs, aa) => {
use Operation::*;
match oper {
FreezeRef => unreachable!(),
UnpackRef | UnpackRefDeep | PackRef | PackRefDeep => {
// No effect
}
WriteBack(dest, edge) => {
use BorrowNode::*;
let src = srcs[0];
match dest {
GlobalRoot(struct_decl) => {
let memory = struct_decl.module_id.qualified(struct_decl.id);
let memory_name = boogie_resource_memory_name(
fun_target.global_env(),
memory,
&None,
);
let func = match edge {
BorrowEdge::Weak => "WritebackToGlobalWeak",
BorrowEdge::Strong(StrongEdge::Direct) => {
"WritebackToGlobalStrong"
}
_ => {
panic!("Strong global writeback cannot have field")
}
};
emitln!(
self.writer,
"call {} := ${}({}, {});",
memory_name,
func,
memory_name,
str_local(src),
);
}
LocalRoot(idx) => {
let func = match edge {
BorrowEdge::Weak => "WritebackToValueWeak",
BorrowEdge::Strong(StrongEdge::Direct) => {
"WritebackToValueStrong"
}
_ => {
panic!("Strong local writeback cannot have field")
}
};
emitln!(
self.writer,
"call {} := ${}({}, {}, {});",
str_local(*idx),
func,
str_local(src),
idx,
str_local(*idx)
);
}
Reference(idx) => {
let (func, thirdarg): (&str, String) = match edge {
BorrowEdge::Weak => {
("WritebackToReferenceWeak", "".to_string())
}
BorrowEdge::Strong(StrongEdge::Direct) => {
("WritebackToReferenceStrongDirect", "".to_string())
}
BorrowEdge::Strong(StrongEdge::Field(field)) => {
("WritebackToReferenceStrongField", format!(", {}", field))
}
BorrowEdge::Strong(StrongEdge::FieldUnknown) => {
("WritebackToVec", "".to_string())
}
};
emitln!(
self.writer,
"call {} := ${}({}, {}{});",
str_local(*idx),
func,
str_local(src),
str_local(*idx),
thirdarg
);
}
}
}
Splice(map) => {
let src = srcs[0];
assert!(!map.is_empty());
emitln!(
self.writer,
"call {} := $Splice{}({}, {});",
str_local(src),
map.len(),
map.iter()
.map(|(pos, idx)| format!("{}, {}", pos, str_local(*idx)))
.join(", "),
str_local(src)
);
}
BorrowLoc => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $BorrowLoc({}, {});",
str_local(dest),
src,
str_local(src)
);
}
ReadRef => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $ReadRef({});",
str_local(dest),
str_local(src)
);
}
WriteRef => {
let reference = srcs[0];
let value = srcs[1];
emitln!(
self.writer,
"call {} := $WriteRef({}, {});",
str_local(reference),
str_local(reference),
str_local(value),
);
}
Function(mid, fid, type_actuals) => {
let callee_env = self.module_env.env.get_module(*mid).into_function(*fid);
let args_str = std::iter::once(boogie_type_values(
fun_target.func_env.module_env.env,
type_actuals,
))
.chain(srcs.iter().map(|arg_idx| str_local(*arg_idx)))
.filter(|s| !s.is_empty())
.join(", ");
let dest_str = dests
.iter()
// Add implict dest returns for &mut srcs:
// f(x) --> x := f(x) with t(x) = &mut_
.chain(srcs.iter().filter(|idx| {
fun_target.get_local_type(**idx).is_mutable_reference()
}))
.map(|idx| str_local(*idx))
.join(",");
if dest_str.is_empty() {
emitln!(
self.writer,
"call {}({});",
boogie_function_name(&callee_env),
args_str
);
} else {
emitln!(
self.writer,
"call {} := {}({});",
dest_str,
boogie_function_name(&callee_env),
args_str
);
}
// Clear the last track location after function call, as the call inserted
// location tracks before it returns.
*last_tracked_loc = None;
}
Pack(mid, sid, _type_actuals) => {
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
let mut ctor_expr = "$MapConstValue($DefaultValue())".to_owned();
for (i, field_env) in struct_env.get_fields().enumerate() {
ctor_expr = format!(
"{}[{} := {}]",
ctor_expr,
boogie_field_name(&field_env),
str_local(srcs[i])
);
}
emitln!(
self.writer,
"{} := $Vector($ValueArray({}, {}));",
str_local(dests[0]),
ctor_expr,
struct_env.get_field_count()
);
}
Unpack(mid, sid, _type_actuals) => {
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
for (i, field_env) in struct_env.get_fields().enumerate() {
emitln!(
self.writer,
"{} := $SelectField({}, {});",
str_local(dests[i]),
str_local(srcs[0]),
boogie_field_name(&field_env)
);
let type_check = boogie_well_formed_check(
self.module_env.env,
&str_local(dests[i]),
&field_env.get_type(),
);
emit!(self.writer, &type_check);
}
}
BorrowField(mid, sid, _, field_offset) => {
let src = srcs[0];
let dest = dests[0];
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
let field_env = &struct_env.get_field_by_offset(*field_offset);
emitln!(
self.writer,
"call {} := $BorrowField({}, {});",
str_local(dest),
str_local(src),
boogie_field_name(field_env)
);
}
GetField(mid, sid, _, field_offset) => {
let src = srcs[0];
let dest = dests[0];
let struct_env = fun_target
.func_env
.module_env
.env
.get_module(*mid)
.into_struct(*sid);
let field_env = &struct_env.get_field_by_offset(*field_offset);
let is_ref = fun_target.get_local_type(src).is_reference();
emitln!(
self.writer,
"call {} := {}({}, {});",
str_local(dest),
if is_ref {
"$GetFieldFromReference"
} else {
"$GetFieldFromValue"
},
str_local(src),
boogie_field_name(field_env)
);
}
Exists(mid, sid, type_actuals) => {
let addr = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let memory = boogie_resource_memory_name(
self.module_env.env,
mid.qualified(*sid),
&None,
);
emitln!(
self.writer,
"{} := $ResourceExists({}, {}, {});",
str_local(dest),
memory,
type_args,
str_local(addr),
);
}
BorrowGlobal(mid, sid, type_actuals) => {
let addr = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let addr_name = str_local(addr);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {} := $BorrowGlobal({}, {}, {});",
str_local(dest),
memory_name,
addr_name,
type_args,
);
}
GetGlobal(mid, sid, type_actuals) => {
let addr = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {} := $GetGlobal({}, {}, {});",
str_local(dest),
memory_name,
str_local(addr),
type_args,
);
}
MoveTo(mid, sid, type_actuals) => {
let value = srcs[0];
let signer = srcs[1];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let signer_name = str_local(signer);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {} := $MoveTo({}, {}, {}, {});",
memory_name,
memory_name,
type_args,
str_local(value),
signer_name,
);
}
MoveFrom(mid, sid, type_actuals) => {
let src = srcs[0];
let dest = dests[0];
let type_args = boogie_type_value_array(self.module_env.env, type_actuals);
let src_name = str_local(src);
let memory = mid.qualified(*sid);
let memory_name =
boogie_resource_memory_name(self.module_env.env, memory, &None);
emitln!(
self.writer,
"call {}, {} := $MoveFrom({}, {}, {});",
memory_name,
str_local(dest),
memory_name,
src_name,
type_args,
);
}
Havoc => {
let temp_str = str_local(srcs[0]);
let ty = fun_target.get_local_type(srcs[0]);
if ty.is_mutable_reference() {
emitln!(
self.writer,
"call {} := $HavocMutation({});",
temp_str,
temp_str
);
} else {
emitln!(self.writer, "havoc {};", temp_str);
}
let check = boogie_well_formed_check(self.module_env.env, &temp_str, ty);
if !check.is_empty() {
emitln!(self.writer, &check);
}
}
CastU8 => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $CastU8({});",
str_local(dest),
str_local(src)
);
}
CastU64 => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $CastU64({});",
str_local(dest),
str_local(src)
);
}
CastU128 => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $CastU128({});",
str_local(dest),
str_local(src)
);
}
Not => {
let src = srcs[0];
let dest = dests[0];
emitln!(
self.writer,
"call {} := $Not({});",
str_local(dest),
str_local(src)
);
}
Add => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
let unchecked = if fun_target
.is_pragma_true(ADDITION_OVERFLOW_UNCHECKED_PRAGMA, || false)
{
"_unchecked"
} else {
""
};
let add_type = match fun_target.get_local_type(dest) {
Type::Primitive(PrimitiveType::U8) => "U8".to_string(),
Type::Primitive(PrimitiveType::U64) => format!("U64{}", unchecked),
Type::Primitive(PrimitiveType::U128) => format!("U128{}", unchecked),
_ => unreachable!(),
};
emitln!(
self.writer,
"call {} := $Add{}({}, {});",
str_local(dest),
add_type,
str_local(op1),
str_local(op2)
);
}
Sub => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Sub({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Mul => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
let mul_type = match fun_target.get_local_type(dest) {
Type::Primitive(PrimitiveType::U8) => "U8",
Type::Primitive(PrimitiveType::U64) => "U64",
Type::Primitive(PrimitiveType::U128) => "U128",
_ => unreachable!(),
};
emitln!(
self.writer,
"call {} := $Mul{}({}, {});",
str_local(dest),
mul_type,
str_local(op1),
str_local(op2)
);
}
Div => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Div({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Mod => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Mod({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Shl => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Shl({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Shr => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Shr({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Lt => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Lt({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Gt => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Gt({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Le => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Le({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Ge => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Ge({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Or => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $Or({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
And => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"call {} := $And({}, {});",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Eq => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"{} := $Boolean($IsEqual({}, {}));",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
Neq => {
let dest = dests[0];
let op1 = srcs[0];
let op2 = srcs[1];
emitln!(
self.writer,
"{} := $Boolean(!$IsEqual({}, {}));",
str_local(dest),
str_local(op1),
str_local(op2)
);
}
BitOr | BitAnd | Xor => {
emitln!(
self.writer,
"// bit operation not supported: {:?}\nassert false;",
bytecode
);
}
Destroy => {}
TraceLocal(idx) => {
self.track_local(fun_target, *idx, srcs[0]);
}
TraceReturn(i) => {
self.track_return(fun_target, *i, srcs[0]);
}
TraceAbort => self.track_abort(fun_target, &str_local(srcs[0])),
EmitEvent => {
let msg = srcs[0];
let handle = srcs[1];
let translate_local = |idx: usize| {
if fun_target.get_local_type(idx).is_mutable_reference() {
format!("$Dereference({})", str_local(idx))
} else {
str_local(idx)
}
};
emit!(
self.writer,
"$es := ${}ExtendEventStore($es, ",
if srcs.len() > 2 { "Cond" } else { "" }
);
emit!(
self.writer,
"$SelectField({}, $Event_EventHandle_guid), {}",
translate_local(handle),
str_local(msg)
);
if srcs.len() > 2 {
emit!(self.writer, ", {}", str_local(srcs[2]));
}
emitln!(self.writer, ");");
}
EventStoreDiverge => {
emitln!(self.writer, "call $es := $EventStore__diverge($es);");
}
}
if let Some(AbortAction(target, code)) = aa {
emitln!(self.writer, "if ($abort_flag) {");
self.writer.indent();
let code_str = str_local(*code);
emitln!(self.writer, "{} := $Integer($abort_code);", code_str);
self.track_abort(fun_target, &code_str);
emitln!(self.writer, "goto L{};", target.as_usize());
self.writer.unindent();
emitln!(self.writer, "}");
}
}
Abort(_, src) => {
emitln!(
self.writer,
"$abort_code := i#$Integer({});",
str_local(*src)
);
emitln!(self.writer, "$abort_flag := true;");
for (i, ty) in fun_target.get_return_types().iter().enumerate() {
let ret_str = format!("$ret{}", i);
if ty.is_reference() {
emitln!(self.writer, "{} := $DefaultMutation;", &ret_str);
} else {
emitln!(self.writer, "{} := $DefaultValue();", &ret_str);
}
}
emitln!(self.writer, "return;")
}
Nop(..) => {}
}
emitln!(self.writer);
}
/// Track location for execution trace, avoiding to track the same line multiple times.
fn track_loc(
&self,
_fun_target: &FunctionTarget<'_>,
last_tracked_loc: &mut Option<(Loc, LineIndex)>,
loc: &Loc,
) {
if let Some(l) = self.module_env.env.get_location(loc) {
if let Some((last_loc, last_line)) = last_tracked_loc {
if *last_line == l.line {
// This line already tracked.
return;
}
*last_loc = loc.clone();
*last_line = l.line;
} else {
*last_tracked_loc = Some((loc.clone(), l.line));
}
emitln!(
self.writer,
"assume {{:print \"$at{}\"}} true;",
self.loc_str(&loc)
);
}
}
fn track_abort(&self, fun_target: &FunctionTarget<'_>, code_var: &str) {
emitln!(self.writer, &boogie_debug_track_abort(fun_target, code_var));
}
/// Generates an update of the debug information about temporary.
fn track_local(&self, fun_target: &FunctionTarget<'_>, origin_idx: TempIndex, idx: TempIndex) {
// In order to determine whether we need to dereference, use the type of the temporary
// which actually holds the value, not the original temp we are tracing.
let ty = fun_target.get_local_type(idx);
let mut value = format!("$t{}", idx);
if ty.is_reference() {
value = format!("$Dereference({})", value);
}
let track = boogie_debug_track_local(fun_target, origin_idx, &value);
emitln!(self.writer, &track);
}
/// Generates an update of the debug information about the return value at given location.
fn track_return(&self, fun_target: &FunctionTarget<'_>, return_idx: usize, idx: TempIndex) {
let ty = fun_target.get_local_type(idx);
let mut value = format!("$t{}", idx);
if ty.is_reference() {
value = format!("$Dereference({})", value);
}
emitln!(
self.writer,
&boogie_debug_track_return(fun_target, return_idx, &value)
);
}
fn loc_str(&self, loc: &Loc) -> String {
let file_idx = self.module_env.env.file_id_to_idx(loc.file_id());
format!("({},{},{})", file_idx, loc.span().start(), loc.span().end())
}
}
| 42.005801 | 109 | 0.393537 |
ddf6aa7caf3745a3b73a6c25e1c177e1e5907e71 | 1,339 | use std::collections::HashMap;
use std::env;
use std::io::{BufReader, Lines};
use std::io::prelude::*;
use crate::errors::*;
use crate::parse;
pub struct Iter<R> {
lines: Lines<BufReader<R>>,
substitution_data: HashMap<String, Option<String>>,
}
impl<R: Read> Iter<R> {
pub fn new(reader: R) -> Iter<R> {
Iter {
lines: BufReader::new(reader).lines(),
substitution_data: HashMap::new(),
}
}
pub fn load(self) -> Result<()> {
for item in self {
let (key, value) = item?;
if env::var(&key).is_err() {
env::set_var(&key, value);
}
}
Ok(())
}
}
impl<R: Read> Iterator for Iter<R> {
type Item = Result<(String, String)>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let line = match self.lines.next() {
Some(Ok(line)) => line,
Some(Err(err)) => return Some(Err(Error::Io(err))),
None => return None,
};
match parse::parse_line(&line, &mut self.substitution_data) {
Ok(Some(result)) => return Some(Ok(result)),
Ok(None) => {}
Err(err) => return Some(Err(err)),
}
}
}
}
| 25.264151 | 74 | 0.466019 |
eba5ded588f13b3f8c66c83b8bbc2b71d36e1af6 | 1,347 | use std::f32::{self, consts::PI};
pub type WindowingFunction = fn(i: usize, window_size: usize) -> f32;
/// Generates a lookup table for a windowing function with a set window size
pub fn generate_lut(window_fn: WindowingFunction, window_size: usize) -> Vec<f32> {
let window_norm = 1.0 / (0..window_size).map(|i| window_fn(i, window_size)).sum::<f32>();
(0..window_size).map(|i| window_fn(i, window_size) * window_norm).collect()
}
#[inline]
pub fn rectangular(_i: usize, _window_size: usize) -> f32 {
1.0
}
#[inline]
pub fn triangular(i: usize, window_size: usize) -> f32 {
let n = i as f32;
let size = window_size as f32;
1.0 - ((n - (size - 1.0) / 2.0) / (size / 2.0)).abs()
}
#[inline]
pub fn blackman_harris(i: usize, window_size: usize) -> f32 {
generic_blackman_window(i, window_size, (0.35875, 0.48829, 0.14128, 0.01168))
}
#[inline]
pub fn blackman_nuttal(i: usize, window_size: usize) -> f32 {
generic_blackman_window(i, window_size, (0.3635819, 0.4891775, 0.1365995, 0.0106411))
}
#[inline]
pub fn generic_blackman_window(i: usize, window_size: usize, a: (f32, f32, f32, f32)) -> f32 {
let n = i as f32;
let max = window_size as f32 - 1.0;
let (a0, a1, a2, a3) = a;
a0 - a1 * (2. * PI * n / max).cos() + a2 * (4. * PI * n / max).cos() -
a3 * (6. * PI * n / max).cos()
}
| 31.325581 | 94 | 0.62732 |
3a691e2987a868571e2fefedb103043e923fb9a9 | 3,586 | #![allow(dead_code)]
use rschema::{
Schema,
Schematic,
};
#[derive(Debug, Schematic)]
#[rschema(unique_items)]
struct TupleStruct(u32, u32);
#[derive(Debug, Schematic)]
enum Enum {
UnitVariant,
EmptyTupleVariant(),
NewTypeVariant(i32),
TupleVariant(String, bool),
StructVariant {
#[rschema(title = "i32")]
value: i32,
}
}
#[derive(Debug, Schematic)]
struct ArrayProperties<'a> {
#[rschema(title = "[i32; 3]")]
prop_sized_array: [i32; 3],
#[rschema(title = "&[i32]")]
prop_array: &'a [i32],
#[rschema(title = "Vec<i32>")]
prop_vec: Vec<i32>,
#[rschema(title = "(i32, String, bool)")]
prop_tuple: (i32, String, bool),
#[rschema(
title = "TupleStruct",
unique_items,
)]
prop_unique_tuple_struct: TupleStruct,
#[rschema(title = "Vec<Enum>")]
prop_vec_enum: Vec<Enum>,
#[rschema(
title = "All keywords",
min_items = 1,
max_items = 5,
unique_items,
)]
prop_all_keywords: Vec<i32>,
}
#[test]
fn it_tests_array_properties() -> rschema::Result<()> {
let schema_str = Schema::new::<ArrayProperties>("Array Properties")
.to_string_pretty()?;
let schema_str2 = r#"{
"title": "Array Properties",
"type": "object",
"properties": {
"prop_sized_array": {
"title": "[i32; 3]",
"type": "array",
"items": {
"type": "number"
},
"minItems": 3,
"maxItems": 3
},
"prop_array": {
"title": "&[i32]",
"type": "array",
"items": {
"type": "number"
}
},
"prop_vec": {
"title": "Vec<i32>",
"type": "array",
"items": {
"type": "number"
}
},
"prop_tuple": {
"title": "(i32, String, bool)",
"type": "array",
"items": [
{
"type": "number"
},
{
"type": "string"
},
{
"type": "boolean"
}
],
"minItems": 3,
"maxItems": 3
},
"prop_unique_tuple_struct": {
"title": "TupleStruct",
"type": "array",
"items": [
{
"type": "number"
},
{
"type": "number"
}
],
"minItems": 2,
"maxItems": 2,
"uniqueItems": true
},
"prop_vec_enum": {
"title": "Vec<Enum>",
"type": "array",
"items": {
"anyOf": [
{
"type": "array",
"items": [],
"minItems": 0,
"maxItems": 0
},
{
"type": "number"
},
{
"type": "array",
"items": [
{
"type": "string"
},
{
"type": "boolean"
}
],
"minItems": 2,
"maxItems": 2
},
{
"type": "object",
"properties": {
"value": {
"title": "i32",
"type": "number"
}
},
"additionalProperties": false
},
{
"type": "string",
"enum": [
"UnitVariant"
]
}
]
}
},
"prop_all_keywords": {
"title": "All keywords",
"type": "array",
"items": {
"type": "number"
},
"minItems": 1,
"maxItems": 5,
"uniqueItems": true
}
},
"additionalProperties": false
}"#;
assert_eq!(schema_str, schema_str2);
Ok(())
} | 19.27957 | 71 | 0.418851 |
28f30152170f608ff59b319ddacde131fb490f42 | 2,427 | //! This crate contains the functionality to work with RTMP messages
//!
//! # Examples
//!
//! Deserialize a message payload:
//!
//! ```
//! extern crate rtmp_message; // This crate
//! extern crate rtmp_time; // Required for working with timestamps in rtmp messages
//!
//! use rtmp_time::RtmpTimestamp;
//!
//! use rtmp_message::{MessagePayload, RtmpMessageDetails, RtmpMessage};
//!
//! # fn main() {
//!
//! let payload = MessagePayload {
//! timestamp: RtmpTimestamp::new(5),
//! stream_id: 5,
//! type_id: 1,
//! data: vec![0, 0, 0, 128]
//! };
//!
//! let details = RtmpMessageDetails::from_payload(payload).unwrap();
//!
//! assert_eq!(details.rtmp_timestamp, RtmpTimestamp::new(5));
//! assert_eq!(details.stream_id, 5);
//! assert_eq!(details.message, RtmpMessage::SetChunkSize { size: 128 });
//!
//! # }
//! ```
//!
//! Serialize a RTMP message into a message payload:
//!
//! ```
//! extern crate rtmp_message; // This crate
//! extern crate rtmp_time; // Required for working with timestamps in rtmp messages
//!
//! use rtmp_time::RtmpTimestamp;
//!
//! use rtmp_message::{MessagePayload, RtmpMessage, RtmpMessageDetails};
//!
//! # fn main() {
//! let details = RtmpMessageDetails {
//! rtmp_timestamp: RtmpTimestamp::new(5),
//! stream_id: 5,
//! message: RtmpMessage::SetChunkSize { size: 128 }
//! };
//!
//! let payload = details.to_payload().unwrap();
//!
//! assert_eq!(payload.type_id, 1);
//! assert_eq!(payload.stream_id, 5);
//! assert_eq!(payload.timestamp, RtmpTimestamp::new(5));
//! assert_eq!(payload.data, vec![0, 0, 0, 128]);
//!
//! # }
//! ```
#[macro_use] extern crate quick_error;
extern crate byteorder;
extern crate rtmp_time;
extern crate amf0;
mod message_payload;
mod known_message_type;
mod errors;
mod rtmp_message;
mod rtmp_message_details;
pub use message_payload::MessagePayload;
pub use known_message_type::KnownMessageType;
pub use errors::MessageSerializationError;
pub use errors::MessageDeserializationError;
pub use rtmp_message::{RtmpMessage, UserControlEventType, PeerBandwidthLimitType};
pub use rtmp_message_details::RtmpMessageDetails;
mod messages {
pub mod abort;
pub mod acknowledgement;
pub mod amf0_command;
pub mod amf0_data;
pub mod audio_data;
pub mod set_chunk_size;
pub mod set_peer_bandwidth;
pub mod user_control;
pub mod video_data;
pub mod window_acknowledgement_size;
} | 26.966667 | 84 | 0.68562 |
48963b7f9f3f8791eb36f44c398a6e0845c312a4 | 7,322 | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::vec::IntoIter;
use engine_traits::CfName;
use kvproto::metapb::Region;
use kvproto::pdpb::CheckPolicy;
use kvproto::raft_cmdpb::{
AdminRequest, AdminResponse, RaftCmdRequest, RaftCmdResponse, Request, Response,
};
use raft::StateRole;
use txn_types::TxnExtra;
pub mod config;
pub mod dispatcher;
mod error;
mod metrics;
pub mod region_info_accessor;
mod split_check;
pub mod split_observer;
pub use self::config::{Config, ConsistencyCheckMethod};
pub use self::dispatcher::{
BoxAdminObserver, BoxApplySnapshotObserver, BoxCmdObserver, BoxQueryObserver,
BoxRegionChangeObserver, BoxRoleObserver, BoxSplitCheckObserver, CoprocessorHost, Registry,
};
pub use self::error::{Error, Result};
pub use self::region_info_accessor::{
Callback as RegionInfoCallback, RegionCollector, RegionInfo, RegionInfoAccessor,
RegionInfoProvider, SeekRegionCallback,
};
pub use self::split_check::{
get_region_approximate_keys, get_region_approximate_keys_cf, get_region_approximate_middle,
get_region_approximate_size, get_region_approximate_size_cf, HalfCheckObserver,
Host as SplitCheckerHost, KeysCheckObserver, SizeCheckObserver, TableCheckObserver,
};
use crate::store::fsm::ObserveID;
pub use crate::store::KeyEntry;
/// Coprocessor is used to provide a convenient way to inject code to
/// KV processing.
pub trait Coprocessor: Send {
fn start(&self) {}
fn stop(&self) {}
}
/// Context of observer.
pub struct ObserverContext<'a> {
region: &'a Region,
/// Whether to bypass following observer hook.
pub bypass: bool,
}
impl<'a> ObserverContext<'a> {
pub fn new(region: &Region) -> ObserverContext<'_> {
ObserverContext {
region,
bypass: false,
}
}
pub fn region(&self) -> &Region {
self.region
}
}
pub trait AdminObserver: Coprocessor {
/// Hook to call before proposing admin request.
fn pre_propose_admin(&self, _: &mut ObserverContext<'_>, _: &mut AdminRequest) -> Result<()> {
Ok(())
}
/// Hook to call before applying admin request.
fn pre_apply_admin(&self, _: &mut ObserverContext<'_>, _: &AdminRequest) {}
/// Hook to call after applying admin request.
fn post_apply_admin(&self, _: &mut ObserverContext<'_>, _: &mut AdminResponse) {}
}
pub trait QueryObserver: Coprocessor {
/// Hook to call before proposing write request.
///
/// We don't propose read request, hence there is no hook for it yet.
fn pre_propose_query(&self, _: &mut ObserverContext<'_>, _: &mut Vec<Request>) -> Result<()> {
Ok(())
}
/// Hook to call before applying write request.
fn pre_apply_query(&self, _: &mut ObserverContext<'_>, _: &[Request]) {}
/// Hook to call after applying write request.
fn post_apply_query(&self, _: &mut ObserverContext<'_>, _: &mut Vec<Response>) {}
}
pub trait ApplySnapshotObserver: Coprocessor {
/// Hook to call before applying key from plain file.
/// This may be invoked multiple times for each plain file, and each time a batch of key-value
/// pairs will be passed to the function.
fn pre_apply_plain_kvs(
&self,
_: &mut ObserverContext<'_>,
_: CfName,
_: &[(Vec<u8>, Vec<u8>)],
) {
}
/// Hook to call before applying sst file. Currently the content of the snapshot can't be
/// passed to the observer.
fn pre_apply_sst(&self, _: &mut ObserverContext<'_>, _: CfName, _path: &str) {}
}
/// SplitChecker is invoked during a split check scan, and decides to use
/// which keys to split a region.
pub trait SplitChecker<E> {
/// Hook to call for every kv scanned during split.
///
/// Return true to abort scan early.
fn on_kv(&mut self, _: &mut ObserverContext<'_>, _: &KeyEntry) -> bool {
false
}
/// Get the desired split keys.
fn split_keys(&mut self) -> Vec<Vec<u8>>;
/// Get approximate split keys without scan.
fn approximate_split_keys(&mut self, _: &Region, _: &E) -> Result<Vec<Vec<u8>>> {
Ok(vec![])
}
/// Get split policy.
fn policy(&self) -> CheckPolicy;
}
pub trait SplitCheckObserver<E>: Coprocessor {
/// Add a checker for a split scan.
fn add_checker(
&self,
_: &mut ObserverContext<'_>,
_: &mut SplitCheckerHost<'_, E>,
_: &E,
policy: CheckPolicy,
);
}
pub trait RoleObserver: Coprocessor {
/// Hook to call when role of a peer changes.
///
/// Please note that, this hook is not called at realtime. There maybe a
/// situation that the hook is not called yet, however the role of some peers
/// have changed.
fn on_role_change(&self, _: &mut ObserverContext<'_>, _: StateRole) {}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RegionChangeEvent {
Create,
Update,
Destroy,
}
pub trait RegionChangeObserver: Coprocessor {
/// Hook to call when a region changed on this TiKV
fn on_region_changed(&self, _: &mut ObserverContext<'_>, _: RegionChangeEvent, _: StateRole) {}
}
#[derive(Clone, Debug)]
pub struct Cmd {
pub index: u64,
pub request: RaftCmdRequest,
pub response: RaftCmdResponse,
}
impl Cmd {
pub fn new(index: u64, request: RaftCmdRequest, response: RaftCmdResponse) -> Cmd {
Cmd {
index,
request,
response,
}
}
}
#[derive(Clone, Debug)]
pub struct CmdBatch {
pub observe_id: ObserveID,
pub region_id: u64,
pub cmds: Vec<Cmd>,
}
impl CmdBatch {
pub fn new(observe_id: ObserveID, region_id: u64) -> CmdBatch {
CmdBatch {
observe_id,
region_id,
cmds: Vec::new(),
}
}
pub fn push(&mut self, observe_id: ObserveID, region_id: u64, cmd: Cmd) {
assert_eq!(region_id, self.region_id);
assert_eq!(observe_id, self.observe_id);
self.cmds.push(cmd)
}
pub fn into_iter(self, region_id: u64) -> IntoIter<Cmd> {
assert_eq!(self.region_id, region_id);
self.cmds.into_iter()
}
pub fn len(&self) -> usize {
self.cmds.len()
}
pub fn is_empty(&self) -> bool {
self.cmds.is_empty()
}
pub fn size(&self) -> usize {
let mut cmd_bytes = 0;
for cmd in self.cmds.iter() {
let Cmd {
ref request,
ref response,
..
} = cmd;
if !response.get_header().has_error() {
if !request.has_admin_request() {
for req in request.requests.iter() {
let put = req.get_put();
cmd_bytes += put.get_key().len();
cmd_bytes += put.get_value().len();
}
}
}
}
cmd_bytes
}
}
pub trait CmdObserver: Coprocessor {
/// Hook to call after preparing for applying write requests.
fn on_prepare_for_apply(&self, observe_id: ObserveID, region_id: u64);
/// Hook to call after applying a write request.
fn on_apply_cmd(&self, observe_id: ObserveID, region_id: u64, cmd: Cmd);
/// Hook to call after flushing writes to db.
fn on_flush_apply(&self, txn_extras: Vec<TxnExtra>);
}
| 29.405622 | 99 | 0.632477 |
4b4d13d93feab36f7da4f501b9964059f68aab15 | 380 | #[doc = "Reader of register BLK_IDX"]
pub type R = crate::R<u32, super::BLK_IDX>;
#[doc = "Writer for register BLK_IDX"]
pub type W = crate::W<u32, super::BLK_IDX>;
#[doc = "Register BLK_IDX `reset()`'s with value 0"]
impl crate::ResetValue for super::BLK_IDX {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
impl R {}
impl W {}
| 25.333333 | 52 | 0.615789 |
16f5df961f448314d3b4173727e4e7dcbffdc403 | 1,264 | use super::ParallelIterator;
use super::internal::*;
use super::noop::*;
pub fn for_each<I, F, T>(pi: I, op: &F)
where I: ParallelIterator<Item = T>,
F: Fn(T) + Sync,
T: Send
{
let consumer = ForEachConsumer { op: op };
pi.drive_unindexed(consumer)
}
struct ForEachConsumer<'f, F: 'f> {
op: &'f F,
}
impl<'f, F, T> Consumer<T> for ForEachConsumer<'f, F>
where F: Fn(T) + Sync
{
type Folder = ForEachConsumer<'f, F>;
type Reducer = NoopReducer;
type Result = ();
fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) {
(self.split_off_left(), self, NoopReducer)
}
fn into_folder(self) -> Self {
self
}
fn full(&self) -> bool {
false
}
}
impl<'f, F, T> Folder<T> for ForEachConsumer<'f, F>
where F: Fn(T) + Sync
{
type Result = ();
fn consume(self, item: T) -> Self {
(self.op)(item);
self
}
fn complete(self) {}
fn full(&self) -> bool {
false
}
}
impl<'f, F, T> UnindexedConsumer<T> for ForEachConsumer<'f, F>
where F: Fn(T) + Sync
{
fn split_off_left(&self) -> Self {
ForEachConsumer { op: self.op }
}
fn to_reducer(&self) -> NoopReducer {
NoopReducer
}
}
| 19.151515 | 67 | 0.549842 |
f88ba65405b9a4aacb06260213c23bb1c96c4d4a | 857,599 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum IdentityType {
#[allow(missing_docs)] // documentation missing in model
Group,
#[allow(missing_docs)] // documentation missing in model
User,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for IdentityType {
fn from(s: &str) -> Self {
match s {
"GROUP" => IdentityType::Group,
"USER" => IdentityType::User,
other => IdentityType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for IdentityType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(IdentityType::from(s))
}
}
impl IdentityType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
IdentityType::Group => "GROUP",
IdentityType::User => "USER",
IdentityType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["GROUP", "USER"]
}
}
impl AsRef<str> for IdentityType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A key-value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag Clusters</a>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Tag {
/// <p>A user-defined key, which is the minimum required information for a valid tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag</a>. </p>
pub key: std::option::Option<std::string::String>,
/// <p>A user-defined value, which is optional in a tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag Clusters</a>. </p>
pub value: std::option::Option<std::string::String>,
}
impl Tag {
/// <p>A user-defined key, which is the minimum required information for a valid tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag</a>. </p>
pub fn key(&self) -> std::option::Option<&str> {
self.key.as_deref()
}
/// <p>A user-defined value, which is optional in a tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag Clusters</a>. </p>
pub fn value(&self) -> std::option::Option<&str> {
self.value.as_deref()
}
}
impl std::fmt::Debug for Tag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Tag");
formatter.field("key", &self.key);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`Tag`](crate::model::Tag)
pub mod tag {
/// A builder for [`Tag`](crate::model::Tag)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A user-defined key, which is the minimum required information for a valid tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag</a>. </p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
/// <p>A user-defined key, which is the minimum required information for a valid tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag</a>. </p>
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>A user-defined value, which is optional in a tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag Clusters</a>. </p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
/// <p>A user-defined value, which is optional in a tag. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-tags.html">Tag Clusters</a>. </p>
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`Tag`](crate::model::Tag)
pub fn build(self) -> crate::model::Tag {
crate::model::Tag {
key: self.key,
value: self.value,
}
}
}
}
impl Tag {
/// Creates a new builder-style object to manufacture [`Tag`](crate::model::Tag)
pub fn builder() -> crate::model::tag::Builder {
crate::model::tag::Builder::default()
}
}
/// <p>Specifies the execution engine (cluster) to run the notebook and perform the notebook execution, for example, an EMR cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ExecutionEngineConfig {
/// <p>The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The type of execution engine. A value of <code>EMR</code> specifies an EMR cluster.</p>
pub r#type: std::option::Option<crate::model::ExecutionEngineType>,
/// <p>An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub master_instance_security_group_id: std::option::Option<std::string::String>,
}
impl ExecutionEngineConfig {
/// <p>The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The type of execution engine. A value of <code>EMR</code> specifies an EMR cluster.</p>
pub fn r#type(&self) -> std::option::Option<&crate::model::ExecutionEngineType> {
self.r#type.as_ref()
}
/// <p>An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub fn master_instance_security_group_id(&self) -> std::option::Option<&str> {
self.master_instance_security_group_id.as_deref()
}
}
impl std::fmt::Debug for ExecutionEngineConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ExecutionEngineConfig");
formatter.field("id", &self.id);
formatter.field("r#type", &self.r#type);
formatter.field(
"master_instance_security_group_id",
&self.master_instance_security_group_id,
);
formatter.finish()
}
}
/// See [`ExecutionEngineConfig`](crate::model::ExecutionEngineConfig)
pub mod execution_engine_config {
/// A builder for [`ExecutionEngineConfig`](crate::model::ExecutionEngineConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) r#type: std::option::Option<crate::model::ExecutionEngineType>,
pub(crate) master_instance_security_group_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The type of execution engine. A value of <code>EMR</code> specifies an EMR cluster.</p>
pub fn r#type(mut self, input: crate::model::ExecutionEngineType) -> Self {
self.r#type = Some(input);
self
}
/// <p>The type of execution engine. A value of <code>EMR</code> specifies an EMR cluster.</p>
pub fn set_type(
mut self,
input: std::option::Option<crate::model::ExecutionEngineType>,
) -> Self {
self.r#type = input;
self
}
/// <p>An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub fn master_instance_security_group_id(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.master_instance_security_group_id = Some(input.into());
self
}
/// <p>An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub fn set_master_instance_security_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.master_instance_security_group_id = input;
self
}
/// Consumes the builder and constructs a [`ExecutionEngineConfig`](crate::model::ExecutionEngineConfig)
pub fn build(self) -> crate::model::ExecutionEngineConfig {
crate::model::ExecutionEngineConfig {
id: self.id,
r#type: self.r#type,
master_instance_security_group_id: self.master_instance_security_group_id,
}
}
}
}
impl ExecutionEngineConfig {
/// Creates a new builder-style object to manufacture [`ExecutionEngineConfig`](crate::model::ExecutionEngineConfig)
pub fn builder() -> crate::model::execution_engine_config::Builder {
crate::model::execution_engine_config::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ExecutionEngineType {
#[allow(missing_docs)] // documentation missing in model
Emr,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ExecutionEngineType {
fn from(s: &str) -> Self {
match s {
"EMR" => ExecutionEngineType::Emr,
other => ExecutionEngineType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ExecutionEngineType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ExecutionEngineType::from(s))
}
}
impl ExecutionEngineType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ExecutionEngineType::Emr => "EMR",
ExecutionEngineType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["EMR"]
}
}
impl AsRef<str> for ExecutionEngineType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html">Control cluster termination</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AutoTerminationPolicy {
/// <p>Specifies the amount of idle time in seconds after which the cluster automatically terminates. You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).</p>
pub idle_timeout: i64,
}
impl AutoTerminationPolicy {
/// <p>Specifies the amount of idle time in seconds after which the cluster automatically terminates. You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).</p>
pub fn idle_timeout(&self) -> i64 {
self.idle_timeout
}
}
impl std::fmt::Debug for AutoTerminationPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AutoTerminationPolicy");
formatter.field("idle_timeout", &self.idle_timeout);
formatter.finish()
}
}
/// See [`AutoTerminationPolicy`](crate::model::AutoTerminationPolicy)
pub mod auto_termination_policy {
/// A builder for [`AutoTerminationPolicy`](crate::model::AutoTerminationPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) idle_timeout: std::option::Option<i64>,
}
impl Builder {
/// <p>Specifies the amount of idle time in seconds after which the cluster automatically terminates. You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).</p>
pub fn idle_timeout(mut self, input: i64) -> Self {
self.idle_timeout = Some(input);
self
}
/// <p>Specifies the amount of idle time in seconds after which the cluster automatically terminates. You can specify a minimum of 60 seconds and a maximum of 604800 seconds (seven days).</p>
pub fn set_idle_timeout(mut self, input: std::option::Option<i64>) -> Self {
self.idle_timeout = input;
self
}
/// Consumes the builder and constructs a [`AutoTerminationPolicy`](crate::model::AutoTerminationPolicy)
pub fn build(self) -> crate::model::AutoTerminationPolicy {
crate::model::AutoTerminationPolicy {
idle_timeout: self.idle_timeout.unwrap_or_default(),
}
}
}
}
impl AutoTerminationPolicy {
/// Creates a new builder-style object to manufacture [`AutoTerminationPolicy`](crate::model::AutoTerminationPolicy)
pub fn builder() -> crate::model::auto_termination_policy::Builder {
crate::model::auto_termination_policy::Builder::default()
}
}
/// <p>Placement group configuration for an Amazon EMR cluster. The configuration specifies the placement strategy that can be applied to instance roles during cluster creation.</p>
/// <p>To use this configuration, consider attaching managed policy AmazonElasticMapReducePlacementGroupPolicy to the EMR role.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PlacementGroupConfig {
/// <p>Role of the instance in the cluster.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported instance role is <code>MASTER</code>.</p>
pub instance_role: std::option::Option<crate::model::InstanceRoleType>,
/// <p>EC2 Placement Group strategy associated with instance role.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported placement strategy is <code>SPREAD</code> for the <code>MASTER</code> instance role.</p>
pub placement_strategy: std::option::Option<crate::model::PlacementGroupStrategy>,
}
impl PlacementGroupConfig {
/// <p>Role of the instance in the cluster.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported instance role is <code>MASTER</code>.</p>
pub fn instance_role(&self) -> std::option::Option<&crate::model::InstanceRoleType> {
self.instance_role.as_ref()
}
/// <p>EC2 Placement Group strategy associated with instance role.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported placement strategy is <code>SPREAD</code> for the <code>MASTER</code> instance role.</p>
pub fn placement_strategy(&self) -> std::option::Option<&crate::model::PlacementGroupStrategy> {
self.placement_strategy.as_ref()
}
}
impl std::fmt::Debug for PlacementGroupConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PlacementGroupConfig");
formatter.field("instance_role", &self.instance_role);
formatter.field("placement_strategy", &self.placement_strategy);
formatter.finish()
}
}
/// See [`PlacementGroupConfig`](crate::model::PlacementGroupConfig)
pub mod placement_group_config {
/// A builder for [`PlacementGroupConfig`](crate::model::PlacementGroupConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_role: std::option::Option<crate::model::InstanceRoleType>,
pub(crate) placement_strategy: std::option::Option<crate::model::PlacementGroupStrategy>,
}
impl Builder {
/// <p>Role of the instance in the cluster.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported instance role is <code>MASTER</code>.</p>
pub fn instance_role(mut self, input: crate::model::InstanceRoleType) -> Self {
self.instance_role = Some(input);
self
}
/// <p>Role of the instance in the cluster.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported instance role is <code>MASTER</code>.</p>
pub fn set_instance_role(
mut self,
input: std::option::Option<crate::model::InstanceRoleType>,
) -> Self {
self.instance_role = input;
self
}
/// <p>EC2 Placement Group strategy associated with instance role.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported placement strategy is <code>SPREAD</code> for the <code>MASTER</code> instance role.</p>
pub fn placement_strategy(mut self, input: crate::model::PlacementGroupStrategy) -> Self {
self.placement_strategy = Some(input);
self
}
/// <p>EC2 Placement Group strategy associated with instance role.</p>
/// <p>Starting with Amazon EMR version 5.23.0, the only supported placement strategy is <code>SPREAD</code> for the <code>MASTER</code> instance role.</p>
pub fn set_placement_strategy(
mut self,
input: std::option::Option<crate::model::PlacementGroupStrategy>,
) -> Self {
self.placement_strategy = input;
self
}
/// Consumes the builder and constructs a [`PlacementGroupConfig`](crate::model::PlacementGroupConfig)
pub fn build(self) -> crate::model::PlacementGroupConfig {
crate::model::PlacementGroupConfig {
instance_role: self.instance_role,
placement_strategy: self.placement_strategy,
}
}
}
}
impl PlacementGroupConfig {
/// Creates a new builder-style object to manufacture [`PlacementGroupConfig`](crate::model::PlacementGroupConfig)
pub fn builder() -> crate::model::placement_group_config::Builder {
crate::model::placement_group_config::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum PlacementGroupStrategy {
#[allow(missing_docs)] // documentation missing in model
Cluster,
#[allow(missing_docs)] // documentation missing in model
None,
#[allow(missing_docs)] // documentation missing in model
Partition,
#[allow(missing_docs)] // documentation missing in model
Spread,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for PlacementGroupStrategy {
fn from(s: &str) -> Self {
match s {
"CLUSTER" => PlacementGroupStrategy::Cluster,
"NONE" => PlacementGroupStrategy::None,
"PARTITION" => PlacementGroupStrategy::Partition,
"SPREAD" => PlacementGroupStrategy::Spread,
other => PlacementGroupStrategy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for PlacementGroupStrategy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(PlacementGroupStrategy::from(s))
}
}
impl PlacementGroupStrategy {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
PlacementGroupStrategy::Cluster => "CLUSTER",
PlacementGroupStrategy::None => "NONE",
PlacementGroupStrategy::Partition => "PARTITION",
PlacementGroupStrategy::Spread => "SPREAD",
PlacementGroupStrategy::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CLUSTER", "NONE", "PARTITION", "SPREAD"]
}
}
impl AsRef<str> for PlacementGroupStrategy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceRoleType {
#[allow(missing_docs)] // documentation missing in model
Core,
#[allow(missing_docs)] // documentation missing in model
Master,
#[allow(missing_docs)] // documentation missing in model
Task,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceRoleType {
fn from(s: &str) -> Self {
match s {
"CORE" => InstanceRoleType::Core,
"MASTER" => InstanceRoleType::Master,
"TASK" => InstanceRoleType::Task,
other => InstanceRoleType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceRoleType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceRoleType::from(s))
}
}
impl InstanceRoleType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceRoleType::Core => "CORE",
InstanceRoleType::Master => "MASTER",
InstanceRoleType::Task => "TASK",
InstanceRoleType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CORE", "MASTER", "TASK"]
}
}
impl AsRef<str> for InstanceRoleType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p> Managed scaling policy for an Amazon EMR cluster. The policy specifies the limits for resources that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ManagedScalingPolicy {
/// <p>The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.</p>
pub compute_limits: std::option::Option<crate::model::ComputeLimits>,
}
impl ManagedScalingPolicy {
/// <p>The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.</p>
pub fn compute_limits(&self) -> std::option::Option<&crate::model::ComputeLimits> {
self.compute_limits.as_ref()
}
}
impl std::fmt::Debug for ManagedScalingPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ManagedScalingPolicy");
formatter.field("compute_limits", &self.compute_limits);
formatter.finish()
}
}
/// See [`ManagedScalingPolicy`](crate::model::ManagedScalingPolicy)
pub mod managed_scaling_policy {
/// A builder for [`ManagedScalingPolicy`](crate::model::ManagedScalingPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) compute_limits: std::option::Option<crate::model::ComputeLimits>,
}
impl Builder {
/// <p>The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.</p>
pub fn compute_limits(mut self, input: crate::model::ComputeLimits) -> Self {
self.compute_limits = Some(input);
self
}
/// <p>The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.</p>
pub fn set_compute_limits(
mut self,
input: std::option::Option<crate::model::ComputeLimits>,
) -> Self {
self.compute_limits = input;
self
}
/// Consumes the builder and constructs a [`ManagedScalingPolicy`](crate::model::ManagedScalingPolicy)
pub fn build(self) -> crate::model::ManagedScalingPolicy {
crate::model::ManagedScalingPolicy {
compute_limits: self.compute_limits,
}
}
}
}
impl ManagedScalingPolicy {
/// Creates a new builder-style object to manufacture [`ManagedScalingPolicy`](crate::model::ManagedScalingPolicy)
pub fn builder() -> crate::model::managed_scaling_policy::Builder {
crate::model::managed_scaling_policy::Builder::default()
}
}
/// <p> The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster can not be above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ComputeLimits {
/// <p> The unit type used for specifying a managed scaling policy. </p>
pub unit_type: std::option::Option<crate::model::ComputeLimitsUnitType>,
/// <p> The lower boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub minimum_capacity_units: std::option::Option<i32>,
/// <p> The upper boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub maximum_capacity_units: std::option::Option<i32>,
/// <p> The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances. </p>
pub maximum_on_demand_capacity_units: std::option::Option<i32>,
/// <p> The upper boundary of EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes. </p>
pub maximum_core_capacity_units: std::option::Option<i32>,
}
impl ComputeLimits {
/// <p> The unit type used for specifying a managed scaling policy. </p>
pub fn unit_type(&self) -> std::option::Option<&crate::model::ComputeLimitsUnitType> {
self.unit_type.as_ref()
}
/// <p> The lower boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub fn minimum_capacity_units(&self) -> std::option::Option<i32> {
self.minimum_capacity_units
}
/// <p> The upper boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub fn maximum_capacity_units(&self) -> std::option::Option<i32> {
self.maximum_capacity_units
}
/// <p> The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances. </p>
pub fn maximum_on_demand_capacity_units(&self) -> std::option::Option<i32> {
self.maximum_on_demand_capacity_units
}
/// <p> The upper boundary of EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes. </p>
pub fn maximum_core_capacity_units(&self) -> std::option::Option<i32> {
self.maximum_core_capacity_units
}
}
impl std::fmt::Debug for ComputeLimits {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ComputeLimits");
formatter.field("unit_type", &self.unit_type);
formatter.field("minimum_capacity_units", &self.minimum_capacity_units);
formatter.field("maximum_capacity_units", &self.maximum_capacity_units);
formatter.field(
"maximum_on_demand_capacity_units",
&self.maximum_on_demand_capacity_units,
);
formatter.field(
"maximum_core_capacity_units",
&self.maximum_core_capacity_units,
);
formatter.finish()
}
}
/// See [`ComputeLimits`](crate::model::ComputeLimits)
pub mod compute_limits {
/// A builder for [`ComputeLimits`](crate::model::ComputeLimits)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) unit_type: std::option::Option<crate::model::ComputeLimitsUnitType>,
pub(crate) minimum_capacity_units: std::option::Option<i32>,
pub(crate) maximum_capacity_units: std::option::Option<i32>,
pub(crate) maximum_on_demand_capacity_units: std::option::Option<i32>,
pub(crate) maximum_core_capacity_units: std::option::Option<i32>,
}
impl Builder {
/// <p> The unit type used for specifying a managed scaling policy. </p>
pub fn unit_type(mut self, input: crate::model::ComputeLimitsUnitType) -> Self {
self.unit_type = Some(input);
self
}
/// <p> The unit type used for specifying a managed scaling policy. </p>
pub fn set_unit_type(
mut self,
input: std::option::Option<crate::model::ComputeLimitsUnitType>,
) -> Self {
self.unit_type = input;
self
}
/// <p> The lower boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub fn minimum_capacity_units(mut self, input: i32) -> Self {
self.minimum_capacity_units = Some(input);
self
}
/// <p> The lower boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub fn set_minimum_capacity_units(mut self, input: std::option::Option<i32>) -> Self {
self.minimum_capacity_units = input;
self
}
/// <p> The upper boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub fn maximum_capacity_units(mut self, input: i32) -> Self {
self.maximum_capacity_units = Some(input);
self
}
/// <p> The upper boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. </p>
pub fn set_maximum_capacity_units(mut self, input: std::option::Option<i32>) -> Self {
self.maximum_capacity_units = input;
self
}
/// <p> The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances. </p>
pub fn maximum_on_demand_capacity_units(mut self, input: i32) -> Self {
self.maximum_on_demand_capacity_units = Some(input);
self
}
/// <p> The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances. </p>
pub fn set_maximum_on_demand_capacity_units(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.maximum_on_demand_capacity_units = input;
self
}
/// <p> The upper boundary of EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes. </p>
pub fn maximum_core_capacity_units(mut self, input: i32) -> Self {
self.maximum_core_capacity_units = Some(input);
self
}
/// <p> The upper boundary of EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes. </p>
pub fn set_maximum_core_capacity_units(mut self, input: std::option::Option<i32>) -> Self {
self.maximum_core_capacity_units = input;
self
}
/// Consumes the builder and constructs a [`ComputeLimits`](crate::model::ComputeLimits)
pub fn build(self) -> crate::model::ComputeLimits {
crate::model::ComputeLimits {
unit_type: self.unit_type,
minimum_capacity_units: self.minimum_capacity_units,
maximum_capacity_units: self.maximum_capacity_units,
maximum_on_demand_capacity_units: self.maximum_on_demand_capacity_units,
maximum_core_capacity_units: self.maximum_core_capacity_units,
}
}
}
}
impl ComputeLimits {
/// Creates a new builder-style object to manufacture [`ComputeLimits`](crate::model::ComputeLimits)
pub fn builder() -> crate::model::compute_limits::Builder {
crate::model::compute_limits::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ComputeLimitsUnitType {
#[allow(missing_docs)] // documentation missing in model
InstanceFleetUnits,
#[allow(missing_docs)] // documentation missing in model
Instances,
#[allow(missing_docs)] // documentation missing in model
Vcpu,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ComputeLimitsUnitType {
fn from(s: &str) -> Self {
match s {
"InstanceFleetUnits" => ComputeLimitsUnitType::InstanceFleetUnits,
"Instances" => ComputeLimitsUnitType::Instances,
"VCPU" => ComputeLimitsUnitType::Vcpu,
other => ComputeLimitsUnitType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ComputeLimitsUnitType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ComputeLimitsUnitType::from(s))
}
}
impl ComputeLimitsUnitType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ComputeLimitsUnitType::InstanceFleetUnits => "InstanceFleetUnits",
ComputeLimitsUnitType::Instances => "Instances",
ComputeLimitsUnitType::Vcpu => "VCPU",
ComputeLimitsUnitType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["InstanceFleetUnits", "Instances", "VCPU"]
}
}
impl AsRef<str> for ComputeLimitsUnitType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-kerberos.html">Use Kerberos Authentication</a> in the <i>Amazon EMR Management Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct KerberosAttributes {
/// <p>The name of the Kerberos realm to which all nodes in a cluster belong. For example, <code>EC2.INTERNAL</code>. </p>
pub realm: std::option::Option<std::string::String>,
/// <p>The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster.</p>
pub kdc_admin_password: std::option::Option<std::string::String>,
/// <p>Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms.</p>
pub cross_realm_trust_principal_password: std::option::Option<std::string::String>,
/// <p>Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain.</p>
pub ad_domain_join_user: std::option::Option<std::string::String>,
/// <p>The Active Directory password for <code>ADDomainJoinUser</code>.</p>
pub ad_domain_join_password: std::option::Option<std::string::String>,
}
impl KerberosAttributes {
/// <p>The name of the Kerberos realm to which all nodes in a cluster belong. For example, <code>EC2.INTERNAL</code>. </p>
pub fn realm(&self) -> std::option::Option<&str> {
self.realm.as_deref()
}
/// <p>The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster.</p>
pub fn kdc_admin_password(&self) -> std::option::Option<&str> {
self.kdc_admin_password.as_deref()
}
/// <p>Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms.</p>
pub fn cross_realm_trust_principal_password(&self) -> std::option::Option<&str> {
self.cross_realm_trust_principal_password.as_deref()
}
/// <p>Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain.</p>
pub fn ad_domain_join_user(&self) -> std::option::Option<&str> {
self.ad_domain_join_user.as_deref()
}
/// <p>The Active Directory password for <code>ADDomainJoinUser</code>.</p>
pub fn ad_domain_join_password(&self) -> std::option::Option<&str> {
self.ad_domain_join_password.as_deref()
}
}
impl std::fmt::Debug for KerberosAttributes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("KerberosAttributes");
formatter.field("realm", &self.realm);
formatter.field("kdc_admin_password", &self.kdc_admin_password);
formatter.field(
"cross_realm_trust_principal_password",
&self.cross_realm_trust_principal_password,
);
formatter.field("ad_domain_join_user", &self.ad_domain_join_user);
formatter.field("ad_domain_join_password", &self.ad_domain_join_password);
formatter.finish()
}
}
/// See [`KerberosAttributes`](crate::model::KerberosAttributes)
pub mod kerberos_attributes {
/// A builder for [`KerberosAttributes`](crate::model::KerberosAttributes)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) realm: std::option::Option<std::string::String>,
pub(crate) kdc_admin_password: std::option::Option<std::string::String>,
pub(crate) cross_realm_trust_principal_password: std::option::Option<std::string::String>,
pub(crate) ad_domain_join_user: std::option::Option<std::string::String>,
pub(crate) ad_domain_join_password: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the Kerberos realm to which all nodes in a cluster belong. For example, <code>EC2.INTERNAL</code>. </p>
pub fn realm(mut self, input: impl Into<std::string::String>) -> Self {
self.realm = Some(input.into());
self
}
/// <p>The name of the Kerberos realm to which all nodes in a cluster belong. For example, <code>EC2.INTERNAL</code>. </p>
pub fn set_realm(mut self, input: std::option::Option<std::string::String>) -> Self {
self.realm = input;
self
}
/// <p>The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster.</p>
pub fn kdc_admin_password(mut self, input: impl Into<std::string::String>) -> Self {
self.kdc_admin_password = Some(input.into());
self
}
/// <p>The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster.</p>
pub fn set_kdc_admin_password(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.kdc_admin_password = input;
self
}
/// <p>Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms.</p>
pub fn cross_realm_trust_principal_password(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.cross_realm_trust_principal_password = Some(input.into());
self
}
/// <p>Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms.</p>
pub fn set_cross_realm_trust_principal_password(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cross_realm_trust_principal_password = input;
self
}
/// <p>Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain.</p>
pub fn ad_domain_join_user(mut self, input: impl Into<std::string::String>) -> Self {
self.ad_domain_join_user = Some(input.into());
self
}
/// <p>Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain.</p>
pub fn set_ad_domain_join_user(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ad_domain_join_user = input;
self
}
/// <p>The Active Directory password for <code>ADDomainJoinUser</code>.</p>
pub fn ad_domain_join_password(mut self, input: impl Into<std::string::String>) -> Self {
self.ad_domain_join_password = Some(input.into());
self
}
/// <p>The Active Directory password for <code>ADDomainJoinUser</code>.</p>
pub fn set_ad_domain_join_password(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ad_domain_join_password = input;
self
}
/// Consumes the builder and constructs a [`KerberosAttributes`](crate::model::KerberosAttributes)
pub fn build(self) -> crate::model::KerberosAttributes {
crate::model::KerberosAttributes {
realm: self.realm,
kdc_admin_password: self.kdc_admin_password,
cross_realm_trust_principal_password: self.cross_realm_trust_principal_password,
ad_domain_join_user: self.ad_domain_join_user,
ad_domain_join_password: self.ad_domain_join_password,
}
}
}
}
impl KerberosAttributes {
/// Creates a new builder-style object to manufacture [`KerberosAttributes`](crate::model::KerberosAttributes)
pub fn builder() -> crate::model::kerberos_attributes::Builder {
crate::model::kerberos_attributes::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum RepoUpgradeOnBoot {
#[allow(missing_docs)] // documentation missing in model
None,
#[allow(missing_docs)] // documentation missing in model
Security,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for RepoUpgradeOnBoot {
fn from(s: &str) -> Self {
match s {
"NONE" => RepoUpgradeOnBoot::None,
"SECURITY" => RepoUpgradeOnBoot::Security,
other => RepoUpgradeOnBoot::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for RepoUpgradeOnBoot {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(RepoUpgradeOnBoot::from(s))
}
}
impl RepoUpgradeOnBoot {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
RepoUpgradeOnBoot::None => "NONE",
RepoUpgradeOnBoot::Security => "SECURITY",
RepoUpgradeOnBoot::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["NONE", "SECURITY"]
}
}
impl AsRef<str> for RepoUpgradeOnBoot {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ScaleDownBehavior {
#[allow(missing_docs)] // documentation missing in model
TerminateAtInstanceHour,
#[allow(missing_docs)] // documentation missing in model
TerminateAtTaskCompletion,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ScaleDownBehavior {
fn from(s: &str) -> Self {
match s {
"TERMINATE_AT_INSTANCE_HOUR" => ScaleDownBehavior::TerminateAtInstanceHour,
"TERMINATE_AT_TASK_COMPLETION" => ScaleDownBehavior::TerminateAtTaskCompletion,
other => ScaleDownBehavior::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ScaleDownBehavior {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ScaleDownBehavior::from(s))
}
}
impl ScaleDownBehavior {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ScaleDownBehavior::TerminateAtInstanceHour => "TERMINATE_AT_INSTANCE_HOUR",
ScaleDownBehavior::TerminateAtTaskCompletion => "TERMINATE_AT_TASK_COMPLETION",
ScaleDownBehavior::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["TERMINATE_AT_INSTANCE_HOUR", "TERMINATE_AT_TASK_COMPLETION"]
}
}
impl AsRef<str> for ScaleDownBehavior {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>An optional configuration specification to be used when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html">Configuring Applications</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Configuration {
/// <p>The classification within a configuration.</p>
pub classification: std::option::Option<std::string::String>,
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>A set of properties specified within a configuration classification.</p>
pub properties:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl Configuration {
/// <p>The classification within a configuration.</p>
pub fn classification(&self) -> std::option::Option<&str> {
self.classification.as_deref()
}
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>A set of properties specified within a configuration classification.</p>
pub fn properties(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.properties.as_ref()
}
}
impl std::fmt::Debug for Configuration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Configuration");
formatter.field("classification", &self.classification);
formatter.field("configurations", &self.configurations);
formatter.field("properties", &self.properties);
formatter.finish()
}
}
/// See [`Configuration`](crate::model::Configuration)
pub mod configuration {
/// A builder for [`Configuration`](crate::model::Configuration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) classification: std::option::Option<std::string::String>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) properties: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>The classification within a configuration.</p>
pub fn classification(mut self, input: impl Into<std::string::String>) -> Self {
self.classification = Some(input.into());
self
}
/// <p>The classification within a configuration.</p>
pub fn set_classification(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.classification = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// Adds a key-value pair to `properties`.
///
/// To override the contents of this collection use [`set_properties`](Self::set_properties).
///
/// <p>A set of properties specified within a configuration classification.</p>
pub fn properties(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.properties.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.properties = Some(hash_map);
self
}
/// <p>A set of properties specified within a configuration classification.</p>
pub fn set_properties(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.properties = input;
self
}
/// Consumes the builder and constructs a [`Configuration`](crate::model::Configuration)
pub fn build(self) -> crate::model::Configuration {
crate::model::Configuration {
classification: self.classification,
configurations: self.configurations,
properties: self.properties,
}
}
}
}
impl Configuration {
/// Creates a new builder-style object to manufacture [`Configuration`](crate::model::Configuration)
pub fn builder() -> crate::model::configuration::Builder {
crate::model::configuration::Builder::default()
}
}
/// <p>With Amazon EMR release version 4.0 and later, the only accepted parameter is the application name. To pass arguments to applications, you use configuration classifications specified using configuration JSON objects. For more information, see <a href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html">Configuring Applications</a>.</p>
/// <p>With earlier Amazon EMR releases, the application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Application {
/// <p>The name of the application.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The version of the application.</p>
pub version: std::option::Option<std::string::String>,
/// <p>Arguments for Amazon EMR to pass to the application.</p>
pub args: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.</p>
pub additional_info:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl Application {
/// <p>The name of the application.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The version of the application.</p>
pub fn version(&self) -> std::option::Option<&str> {
self.version.as_deref()
}
/// <p>Arguments for Amazon EMR to pass to the application.</p>
pub fn args(&self) -> std::option::Option<&[std::string::String]> {
self.args.as_deref()
}
/// <p>This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.</p>
pub fn additional_info(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.additional_info.as_ref()
}
}
impl std::fmt::Debug for Application {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Application");
formatter.field("name", &self.name);
formatter.field("version", &self.version);
formatter.field("args", &self.args);
formatter.field("additional_info", &self.additional_info);
formatter.finish()
}
}
/// See [`Application`](crate::model::Application)
pub mod application {
/// A builder for [`Application`](crate::model::Application)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) version: std::option::Option<std::string::String>,
pub(crate) args: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) additional_info: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>The name of the application.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the application.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The version of the application.</p>
pub fn version(mut self, input: impl Into<std::string::String>) -> Self {
self.version = Some(input.into());
self
}
/// <p>The version of the application.</p>
pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version = input;
self
}
/// Appends an item to `args`.
///
/// To override the contents of this collection use [`set_args`](Self::set_args).
///
/// <p>Arguments for Amazon EMR to pass to the application.</p>
pub fn args(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.args.unwrap_or_default();
v.push(input.into());
self.args = Some(v);
self
}
/// <p>Arguments for Amazon EMR to pass to the application.</p>
pub fn set_args(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.args = input;
self
}
/// Adds a key-value pair to `additional_info`.
///
/// To override the contents of this collection use [`set_additional_info`](Self::set_additional_info).
///
/// <p>This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.</p>
pub fn additional_info(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.additional_info.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.additional_info = Some(hash_map);
self
}
/// <p>This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.</p>
pub fn set_additional_info(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.additional_info = input;
self
}
/// Consumes the builder and constructs a [`Application`](crate::model::Application)
pub fn build(self) -> crate::model::Application {
crate::model::Application {
name: self.name,
version: self.version,
args: self.args,
additional_info: self.additional_info,
}
}
}
}
impl Application {
/// Creates a new builder-style object to manufacture [`Application`](crate::model::Application)
pub fn builder() -> crate::model::application::Builder {
crate::model::application::Builder::default()
}
}
/// <p>The list of supported product configurations that allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SupportedProductConfig {
/// <p>The name of the product configuration.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The list of user-supplied arguments.</p>
pub args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl SupportedProductConfig {
/// <p>The name of the product configuration.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The list of user-supplied arguments.</p>
pub fn args(&self) -> std::option::Option<&[std::string::String]> {
self.args.as_deref()
}
}
impl std::fmt::Debug for SupportedProductConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SupportedProductConfig");
formatter.field("name", &self.name);
formatter.field("args", &self.args);
formatter.finish()
}
}
/// See [`SupportedProductConfig`](crate::model::SupportedProductConfig)
pub mod supported_product_config {
/// A builder for [`SupportedProductConfig`](crate::model::SupportedProductConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the product configuration.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the product configuration.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// Appends an item to `args`.
///
/// To override the contents of this collection use [`set_args`](Self::set_args).
///
/// <p>The list of user-supplied arguments.</p>
pub fn args(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.args.unwrap_or_default();
v.push(input.into());
self.args = Some(v);
self
}
/// <p>The list of user-supplied arguments.</p>
pub fn set_args(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.args = input;
self
}
/// Consumes the builder and constructs a [`SupportedProductConfig`](crate::model::SupportedProductConfig)
pub fn build(self) -> crate::model::SupportedProductConfig {
crate::model::SupportedProductConfig {
name: self.name,
args: self.args,
}
}
}
}
impl SupportedProductConfig {
/// Creates a new builder-style object to manufacture [`SupportedProductConfig`](crate::model::SupportedProductConfig)
pub fn builder() -> crate::model::supported_product_config::Builder {
crate::model::supported_product_config::Builder::default()
}
}
/// <p>Configuration of a bootstrap action.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct BootstrapActionConfig {
/// <p>The name of the bootstrap action.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The script run by the bootstrap action.</p>
pub script_bootstrap_action: std::option::Option<crate::model::ScriptBootstrapActionConfig>,
}
impl BootstrapActionConfig {
/// <p>The name of the bootstrap action.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The script run by the bootstrap action.</p>
pub fn script_bootstrap_action(
&self,
) -> std::option::Option<&crate::model::ScriptBootstrapActionConfig> {
self.script_bootstrap_action.as_ref()
}
}
impl std::fmt::Debug for BootstrapActionConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("BootstrapActionConfig");
formatter.field("name", &self.name);
formatter.field("script_bootstrap_action", &self.script_bootstrap_action);
formatter.finish()
}
}
/// See [`BootstrapActionConfig`](crate::model::BootstrapActionConfig)
pub mod bootstrap_action_config {
/// A builder for [`BootstrapActionConfig`](crate::model::BootstrapActionConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) script_bootstrap_action:
std::option::Option<crate::model::ScriptBootstrapActionConfig>,
}
impl Builder {
/// <p>The name of the bootstrap action.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the bootstrap action.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The script run by the bootstrap action.</p>
pub fn script_bootstrap_action(
mut self,
input: crate::model::ScriptBootstrapActionConfig,
) -> Self {
self.script_bootstrap_action = Some(input);
self
}
/// <p>The script run by the bootstrap action.</p>
pub fn set_script_bootstrap_action(
mut self,
input: std::option::Option<crate::model::ScriptBootstrapActionConfig>,
) -> Self {
self.script_bootstrap_action = input;
self
}
/// Consumes the builder and constructs a [`BootstrapActionConfig`](crate::model::BootstrapActionConfig)
pub fn build(self) -> crate::model::BootstrapActionConfig {
crate::model::BootstrapActionConfig {
name: self.name,
script_bootstrap_action: self.script_bootstrap_action,
}
}
}
}
impl BootstrapActionConfig {
/// Creates a new builder-style object to manufacture [`BootstrapActionConfig`](crate::model::BootstrapActionConfig)
pub fn builder() -> crate::model::bootstrap_action_config::Builder {
crate::model::bootstrap_action_config::Builder::default()
}
}
/// <p>Configuration of the script to run during a bootstrap action.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ScriptBootstrapActionConfig {
/// <p>Location in Amazon S3 of the script to run during a bootstrap action.</p>
pub path: std::option::Option<std::string::String>,
/// <p>A list of command line arguments to pass to the bootstrap action script.</p>
pub args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl ScriptBootstrapActionConfig {
/// <p>Location in Amazon S3 of the script to run during a bootstrap action.</p>
pub fn path(&self) -> std::option::Option<&str> {
self.path.as_deref()
}
/// <p>A list of command line arguments to pass to the bootstrap action script.</p>
pub fn args(&self) -> std::option::Option<&[std::string::String]> {
self.args.as_deref()
}
}
impl std::fmt::Debug for ScriptBootstrapActionConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ScriptBootstrapActionConfig");
formatter.field("path", &self.path);
formatter.field("args", &self.args);
formatter.finish()
}
}
/// See [`ScriptBootstrapActionConfig`](crate::model::ScriptBootstrapActionConfig)
pub mod script_bootstrap_action_config {
/// A builder for [`ScriptBootstrapActionConfig`](crate::model::ScriptBootstrapActionConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) path: std::option::Option<std::string::String>,
pub(crate) args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>Location in Amazon S3 of the script to run during a bootstrap action.</p>
pub fn path(mut self, input: impl Into<std::string::String>) -> Self {
self.path = Some(input.into());
self
}
/// <p>Location in Amazon S3 of the script to run during a bootstrap action.</p>
pub fn set_path(mut self, input: std::option::Option<std::string::String>) -> Self {
self.path = input;
self
}
/// Appends an item to `args`.
///
/// To override the contents of this collection use [`set_args`](Self::set_args).
///
/// <p>A list of command line arguments to pass to the bootstrap action script.</p>
pub fn args(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.args.unwrap_or_default();
v.push(input.into());
self.args = Some(v);
self
}
/// <p>A list of command line arguments to pass to the bootstrap action script.</p>
pub fn set_args(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.args = input;
self
}
/// Consumes the builder and constructs a [`ScriptBootstrapActionConfig`](crate::model::ScriptBootstrapActionConfig)
pub fn build(self) -> crate::model::ScriptBootstrapActionConfig {
crate::model::ScriptBootstrapActionConfig {
path: self.path,
args: self.args,
}
}
}
}
impl ScriptBootstrapActionConfig {
/// Creates a new builder-style object to manufacture [`ScriptBootstrapActionConfig`](crate::model::ScriptBootstrapActionConfig)
pub fn builder() -> crate::model::script_bootstrap_action_config::Builder {
crate::model::script_bootstrap_action_config::Builder::default()
}
}
/// <p>Specification for a cluster (job flow) step.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepConfig {
/// <p>The name of the step.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The action to take when the step fails. Use one of the following values:</p>
/// <ul>
/// <li> <p> <code>TERMINATE_CLUSTER</code> - Shuts down the cluster.</p> </li>
/// <li> <p> <code>CANCEL_AND_WAIT</code> - Cancels any pending steps and returns the cluster to the <code>WAITING</code> state.</p> </li>
/// <li> <p> <code>CONTINUE</code> - Continues to the next step in the queue.</p> </li>
/// <li> <p> <code>TERMINATE_JOB_FLOW</code> - Shuts down the cluster. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p> </li>
/// </ul>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub action_on_failure: std::option::Option<crate::model::ActionOnFailure>,
/// <p>The JAR file used for the step.</p>
pub hadoop_jar_step: std::option::Option<crate::model::HadoopJarStepConfig>,
}
impl StepConfig {
/// <p>The name of the step.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The action to take when the step fails. Use one of the following values:</p>
/// <ul>
/// <li> <p> <code>TERMINATE_CLUSTER</code> - Shuts down the cluster.</p> </li>
/// <li> <p> <code>CANCEL_AND_WAIT</code> - Cancels any pending steps and returns the cluster to the <code>WAITING</code> state.</p> </li>
/// <li> <p> <code>CONTINUE</code> - Continues to the next step in the queue.</p> </li>
/// <li> <p> <code>TERMINATE_JOB_FLOW</code> - Shuts down the cluster. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p> </li>
/// </ul>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub fn action_on_failure(&self) -> std::option::Option<&crate::model::ActionOnFailure> {
self.action_on_failure.as_ref()
}
/// <p>The JAR file used for the step.</p>
pub fn hadoop_jar_step(&self) -> std::option::Option<&crate::model::HadoopJarStepConfig> {
self.hadoop_jar_step.as_ref()
}
}
impl std::fmt::Debug for StepConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepConfig");
formatter.field("name", &self.name);
formatter.field("action_on_failure", &self.action_on_failure);
formatter.field("hadoop_jar_step", &self.hadoop_jar_step);
formatter.finish()
}
}
/// See [`StepConfig`](crate::model::StepConfig)
pub mod step_config {
/// A builder for [`StepConfig`](crate::model::StepConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) action_on_failure: std::option::Option<crate::model::ActionOnFailure>,
pub(crate) hadoop_jar_step: std::option::Option<crate::model::HadoopJarStepConfig>,
}
impl Builder {
/// <p>The name of the step.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the step.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The action to take when the step fails. Use one of the following values:</p>
/// <ul>
/// <li> <p> <code>TERMINATE_CLUSTER</code> - Shuts down the cluster.</p> </li>
/// <li> <p> <code>CANCEL_AND_WAIT</code> - Cancels any pending steps and returns the cluster to the <code>WAITING</code> state.</p> </li>
/// <li> <p> <code>CONTINUE</code> - Continues to the next step in the queue.</p> </li>
/// <li> <p> <code>TERMINATE_JOB_FLOW</code> - Shuts down the cluster. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p> </li>
/// </ul>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub fn action_on_failure(mut self, input: crate::model::ActionOnFailure) -> Self {
self.action_on_failure = Some(input);
self
}
/// <p>The action to take when the step fails. Use one of the following values:</p>
/// <ul>
/// <li> <p> <code>TERMINATE_CLUSTER</code> - Shuts down the cluster.</p> </li>
/// <li> <p> <code>CANCEL_AND_WAIT</code> - Cancels any pending steps and returns the cluster to the <code>WAITING</code> state.</p> </li>
/// <li> <p> <code>CONTINUE</code> - Continues to the next step in the queue.</p> </li>
/// <li> <p> <code>TERMINATE_JOB_FLOW</code> - Shuts down the cluster. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p> </li>
/// </ul>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub fn set_action_on_failure(
mut self,
input: std::option::Option<crate::model::ActionOnFailure>,
) -> Self {
self.action_on_failure = input;
self
}
/// <p>The JAR file used for the step.</p>
pub fn hadoop_jar_step(mut self, input: crate::model::HadoopJarStepConfig) -> Self {
self.hadoop_jar_step = Some(input);
self
}
/// <p>The JAR file used for the step.</p>
pub fn set_hadoop_jar_step(
mut self,
input: std::option::Option<crate::model::HadoopJarStepConfig>,
) -> Self {
self.hadoop_jar_step = input;
self
}
/// Consumes the builder and constructs a [`StepConfig`](crate::model::StepConfig)
pub fn build(self) -> crate::model::StepConfig {
crate::model::StepConfig {
name: self.name,
action_on_failure: self.action_on_failure,
hadoop_jar_step: self.hadoop_jar_step,
}
}
}
}
impl StepConfig {
/// Creates a new builder-style object to manufacture [`StepConfig`](crate::model::StepConfig)
pub fn builder() -> crate::model::step_config::Builder {
crate::model::step_config::Builder::default()
}
}
/// <p>A job flow step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct HadoopJarStepConfig {
/// <p>A list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub properties: std::option::Option<std::vec::Vec<crate::model::KeyValue>>,
/// <p>A path to a JAR file run during the step.</p>
pub jar: std::option::Option<std::string::String>,
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.</p>
pub main_class: std::option::Option<std::string::String>,
/// <p>A list of command line arguments passed to the JAR file's main function when executed.</p>
pub args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl HadoopJarStepConfig {
/// <p>A list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub fn properties(&self) -> std::option::Option<&[crate::model::KeyValue]> {
self.properties.as_deref()
}
/// <p>A path to a JAR file run during the step.</p>
pub fn jar(&self) -> std::option::Option<&str> {
self.jar.as_deref()
}
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.</p>
pub fn main_class(&self) -> std::option::Option<&str> {
self.main_class.as_deref()
}
/// <p>A list of command line arguments passed to the JAR file's main function when executed.</p>
pub fn args(&self) -> std::option::Option<&[std::string::String]> {
self.args.as_deref()
}
}
impl std::fmt::Debug for HadoopJarStepConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("HadoopJarStepConfig");
formatter.field("properties", &self.properties);
formatter.field("jar", &self.jar);
formatter.field("main_class", &self.main_class);
formatter.field("args", &self.args);
formatter.finish()
}
}
/// See [`HadoopJarStepConfig`](crate::model::HadoopJarStepConfig)
pub mod hadoop_jar_step_config {
/// A builder for [`HadoopJarStepConfig`](crate::model::HadoopJarStepConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) properties: std::option::Option<std::vec::Vec<crate::model::KeyValue>>,
pub(crate) jar: std::option::Option<std::string::String>,
pub(crate) main_class: std::option::Option<std::string::String>,
pub(crate) args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// Appends an item to `properties`.
///
/// To override the contents of this collection use [`set_properties`](Self::set_properties).
///
/// <p>A list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub fn properties(mut self, input: crate::model::KeyValue) -> Self {
let mut v = self.properties.unwrap_or_default();
v.push(input);
self.properties = Some(v);
self
}
/// <p>A list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub fn set_properties(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::KeyValue>>,
) -> Self {
self.properties = input;
self
}
/// <p>A path to a JAR file run during the step.</p>
pub fn jar(mut self, input: impl Into<std::string::String>) -> Self {
self.jar = Some(input.into());
self
}
/// <p>A path to a JAR file run during the step.</p>
pub fn set_jar(mut self, input: std::option::Option<std::string::String>) -> Self {
self.jar = input;
self
}
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.</p>
pub fn main_class(mut self, input: impl Into<std::string::String>) -> Self {
self.main_class = Some(input.into());
self
}
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.</p>
pub fn set_main_class(mut self, input: std::option::Option<std::string::String>) -> Self {
self.main_class = input;
self
}
/// Appends an item to `args`.
///
/// To override the contents of this collection use [`set_args`](Self::set_args).
///
/// <p>A list of command line arguments passed to the JAR file's main function when executed.</p>
pub fn args(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.args.unwrap_or_default();
v.push(input.into());
self.args = Some(v);
self
}
/// <p>A list of command line arguments passed to the JAR file's main function when executed.</p>
pub fn set_args(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.args = input;
self
}
/// Consumes the builder and constructs a [`HadoopJarStepConfig`](crate::model::HadoopJarStepConfig)
pub fn build(self) -> crate::model::HadoopJarStepConfig {
crate::model::HadoopJarStepConfig {
properties: self.properties,
jar: self.jar,
main_class: self.main_class,
args: self.args,
}
}
}
}
impl HadoopJarStepConfig {
/// Creates a new builder-style object to manufacture [`HadoopJarStepConfig`](crate::model::HadoopJarStepConfig)
pub fn builder() -> crate::model::hadoop_jar_step_config::Builder {
crate::model::hadoop_jar_step_config::Builder::default()
}
}
/// <p>A key-value pair.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct KeyValue {
/// <p>The unique identifier of a key-value pair.</p>
pub key: std::option::Option<std::string::String>,
/// <p>The value part of the identified key.</p>
pub value: std::option::Option<std::string::String>,
}
impl KeyValue {
/// <p>The unique identifier of a key-value pair.</p>
pub fn key(&self) -> std::option::Option<&str> {
self.key.as_deref()
}
/// <p>The value part of the identified key.</p>
pub fn value(&self) -> std::option::Option<&str> {
self.value.as_deref()
}
}
impl std::fmt::Debug for KeyValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("KeyValue");
formatter.field("key", &self.key);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`KeyValue`](crate::model::KeyValue)
pub mod key_value {
/// A builder for [`KeyValue`](crate::model::KeyValue)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The unique identifier of a key-value pair.</p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
/// <p>The unique identifier of a key-value pair.</p>
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>The value part of the identified key.</p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
/// <p>The value part of the identified key.</p>
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`KeyValue`](crate::model::KeyValue)
pub fn build(self) -> crate::model::KeyValue {
crate::model::KeyValue {
key: self.key,
value: self.value,
}
}
}
}
impl KeyValue {
/// Creates a new builder-style object to manufacture [`KeyValue`](crate::model::KeyValue)
pub fn builder() -> crate::model::key_value::Builder {
crate::model::key_value::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ActionOnFailure {
#[allow(missing_docs)] // documentation missing in model
CancelAndWait,
#[allow(missing_docs)] // documentation missing in model
Continue,
#[allow(missing_docs)] // documentation missing in model
TerminateCluster,
#[allow(missing_docs)] // documentation missing in model
TerminateJobFlow,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ActionOnFailure {
fn from(s: &str) -> Self {
match s {
"CANCEL_AND_WAIT" => ActionOnFailure::CancelAndWait,
"CONTINUE" => ActionOnFailure::Continue,
"TERMINATE_CLUSTER" => ActionOnFailure::TerminateCluster,
"TERMINATE_JOB_FLOW" => ActionOnFailure::TerminateJobFlow,
other => ActionOnFailure::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ActionOnFailure {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ActionOnFailure::from(s))
}
}
impl ActionOnFailure {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ActionOnFailure::CancelAndWait => "CANCEL_AND_WAIT",
ActionOnFailure::Continue => "CONTINUE",
ActionOnFailure::TerminateCluster => "TERMINATE_CLUSTER",
ActionOnFailure::TerminateJobFlow => "TERMINATE_JOB_FLOW",
ActionOnFailure::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CANCEL_AND_WAIT",
"CONTINUE",
"TERMINATE_CLUSTER",
"TERMINATE_JOB_FLOW",
]
}
}
impl AsRef<str> for ActionOnFailure {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A description of the Amazon EC2 instance on which the cluster (job flow) runs. A valid JobFlowInstancesConfig must contain either InstanceGroups or InstanceFleets. They cannot be used together. You may also have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present), but we don't recommend this configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobFlowInstancesConfig {
/// <p>The EC2 instance type of the master node.</p>
pub master_instance_type: std::option::Option<std::string::String>,
/// <p>The EC2 instance type of the core and task nodes.</p>
pub slave_instance_type: std::option::Option<std::string::String>,
/// <p>The number of EC2 instances in the cluster.</p>
pub instance_count: std::option::Option<i32>,
/// <p>Configuration for the instance groups in a cluster.</p>
pub instance_groups: std::option::Option<std::vec::Vec<crate::model::InstanceGroupConfig>>,
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.</p>
pub instance_fleets: std::option::Option<std::vec::Vec<crate::model::InstanceFleetConfig>>,
/// <p>The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called "hadoop."</p>
pub ec2_key_name: std::option::Option<std::string::String>,
/// <p>The Availability Zone in which the cluster runs.</p>
pub placement: std::option::Option<crate::model::PlacementType>,
/// <p>Specifies whether the cluster should remain available after completing all steps. Defaults to <code>true</code>. For more information about configuring cluster termination, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html">Control Cluster Termination</a> in the <i>EMR Management Guide</i>.</p>
pub keep_job_flow_alive_when_no_steps: bool,
/// <p>Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.</p>
pub termination_protected: bool,
/// <p>Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are "0.18" (no longer maintained), "0.20" (no longer maintained), "0.20.205" (no longer maintained), "1.0.3", "2.2.0", or "2.4.0". If you do not set this value, the default of 0.18 is used, unless the <code>AmiVersion</code> parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.</p>
pub hadoop_version: std::option::Option<std::string::String>,
/// <p>Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub ec2_subnet_id: std::option::Option<std::string::String>,
/// <p>Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub ec2_subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The identifier of the Amazon EC2 security group for the master node. If you specify <code>EmrManagedMasterSecurityGroup</code>, you must also specify <code>EmrManagedSlaveSecurityGroup</code>.</p>
pub emr_managed_master_security_group: std::option::Option<std::string::String>,
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes. If you specify <code>EmrManagedSlaveSecurityGroup</code>, you must also specify <code>EmrManagedMasterSecurityGroup</code>.</p>
pub emr_managed_slave_security_group: std::option::Option<std::string::String>,
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub service_access_security_group: std::option::Option<std::string::String>,
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub additional_master_security_groups: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub additional_slave_security_groups: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl JobFlowInstancesConfig {
/// <p>The EC2 instance type of the master node.</p>
pub fn master_instance_type(&self) -> std::option::Option<&str> {
self.master_instance_type.as_deref()
}
/// <p>The EC2 instance type of the core and task nodes.</p>
pub fn slave_instance_type(&self) -> std::option::Option<&str> {
self.slave_instance_type.as_deref()
}
/// <p>The number of EC2 instances in the cluster.</p>
pub fn instance_count(&self) -> std::option::Option<i32> {
self.instance_count
}
/// <p>Configuration for the instance groups in a cluster.</p>
pub fn instance_groups(&self) -> std::option::Option<&[crate::model::InstanceGroupConfig]> {
self.instance_groups.as_deref()
}
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.</p>
pub fn instance_fleets(&self) -> std::option::Option<&[crate::model::InstanceFleetConfig]> {
self.instance_fleets.as_deref()
}
/// <p>The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called "hadoop."</p>
pub fn ec2_key_name(&self) -> std::option::Option<&str> {
self.ec2_key_name.as_deref()
}
/// <p>The Availability Zone in which the cluster runs.</p>
pub fn placement(&self) -> std::option::Option<&crate::model::PlacementType> {
self.placement.as_ref()
}
/// <p>Specifies whether the cluster should remain available after completing all steps. Defaults to <code>true</code>. For more information about configuring cluster termination, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html">Control Cluster Termination</a> in the <i>EMR Management Guide</i>.</p>
pub fn keep_job_flow_alive_when_no_steps(&self) -> bool {
self.keep_job_flow_alive_when_no_steps
}
/// <p>Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.</p>
pub fn termination_protected(&self) -> bool {
self.termination_protected
}
/// <p>Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are "0.18" (no longer maintained), "0.20" (no longer maintained), "0.20.205" (no longer maintained), "1.0.3", "2.2.0", or "2.4.0". If you do not set this value, the default of 0.18 is used, unless the <code>AmiVersion</code> parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.</p>
pub fn hadoop_version(&self) -> std::option::Option<&str> {
self.hadoop_version.as_deref()
}
/// <p>Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub fn ec2_subnet_id(&self) -> std::option::Option<&str> {
self.ec2_subnet_id.as_deref()
}
/// <p>Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub fn ec2_subnet_ids(&self) -> std::option::Option<&[std::string::String]> {
self.ec2_subnet_ids.as_deref()
}
/// <p>The identifier of the Amazon EC2 security group for the master node. If you specify <code>EmrManagedMasterSecurityGroup</code>, you must also specify <code>EmrManagedSlaveSecurityGroup</code>.</p>
pub fn emr_managed_master_security_group(&self) -> std::option::Option<&str> {
self.emr_managed_master_security_group.as_deref()
}
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes. If you specify <code>EmrManagedSlaveSecurityGroup</code>, you must also specify <code>EmrManagedMasterSecurityGroup</code>.</p>
pub fn emr_managed_slave_security_group(&self) -> std::option::Option<&str> {
self.emr_managed_slave_security_group.as_deref()
}
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub fn service_access_security_group(&self) -> std::option::Option<&str> {
self.service_access_security_group.as_deref()
}
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub fn additional_master_security_groups(&self) -> std::option::Option<&[std::string::String]> {
self.additional_master_security_groups.as_deref()
}
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub fn additional_slave_security_groups(&self) -> std::option::Option<&[std::string::String]> {
self.additional_slave_security_groups.as_deref()
}
}
impl std::fmt::Debug for JobFlowInstancesConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobFlowInstancesConfig");
formatter.field("master_instance_type", &self.master_instance_type);
formatter.field("slave_instance_type", &self.slave_instance_type);
formatter.field("instance_count", &self.instance_count);
formatter.field("instance_groups", &self.instance_groups);
formatter.field("instance_fleets", &self.instance_fleets);
formatter.field("ec2_key_name", &self.ec2_key_name);
formatter.field("placement", &self.placement);
formatter.field(
"keep_job_flow_alive_when_no_steps",
&self.keep_job_flow_alive_when_no_steps,
);
formatter.field("termination_protected", &self.termination_protected);
formatter.field("hadoop_version", &self.hadoop_version);
formatter.field("ec2_subnet_id", &self.ec2_subnet_id);
formatter.field("ec2_subnet_ids", &self.ec2_subnet_ids);
formatter.field(
"emr_managed_master_security_group",
&self.emr_managed_master_security_group,
);
formatter.field(
"emr_managed_slave_security_group",
&self.emr_managed_slave_security_group,
);
formatter.field(
"service_access_security_group",
&self.service_access_security_group,
);
formatter.field(
"additional_master_security_groups",
&self.additional_master_security_groups,
);
formatter.field(
"additional_slave_security_groups",
&self.additional_slave_security_groups,
);
formatter.finish()
}
}
/// See [`JobFlowInstancesConfig`](crate::model::JobFlowInstancesConfig)
pub mod job_flow_instances_config {
/// A builder for [`JobFlowInstancesConfig`](crate::model::JobFlowInstancesConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) master_instance_type: std::option::Option<std::string::String>,
pub(crate) slave_instance_type: std::option::Option<std::string::String>,
pub(crate) instance_count: std::option::Option<i32>,
pub(crate) instance_groups:
std::option::Option<std::vec::Vec<crate::model::InstanceGroupConfig>>,
pub(crate) instance_fleets:
std::option::Option<std::vec::Vec<crate::model::InstanceFleetConfig>>,
pub(crate) ec2_key_name: std::option::Option<std::string::String>,
pub(crate) placement: std::option::Option<crate::model::PlacementType>,
pub(crate) keep_job_flow_alive_when_no_steps: std::option::Option<bool>,
pub(crate) termination_protected: std::option::Option<bool>,
pub(crate) hadoop_version: std::option::Option<std::string::String>,
pub(crate) ec2_subnet_id: std::option::Option<std::string::String>,
pub(crate) ec2_subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) emr_managed_master_security_group: std::option::Option<std::string::String>,
pub(crate) emr_managed_slave_security_group: std::option::Option<std::string::String>,
pub(crate) service_access_security_group: std::option::Option<std::string::String>,
pub(crate) additional_master_security_groups:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) additional_slave_security_groups:
std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The EC2 instance type of the master node.</p>
pub fn master_instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.master_instance_type = Some(input.into());
self
}
/// <p>The EC2 instance type of the master node.</p>
pub fn set_master_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.master_instance_type = input;
self
}
/// <p>The EC2 instance type of the core and task nodes.</p>
pub fn slave_instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.slave_instance_type = Some(input.into());
self
}
/// <p>The EC2 instance type of the core and task nodes.</p>
pub fn set_slave_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.slave_instance_type = input;
self
}
/// <p>The number of EC2 instances in the cluster.</p>
pub fn instance_count(mut self, input: i32) -> Self {
self.instance_count = Some(input);
self
}
/// <p>The number of EC2 instances in the cluster.</p>
pub fn set_instance_count(mut self, input: std::option::Option<i32>) -> Self {
self.instance_count = input;
self
}
/// Appends an item to `instance_groups`.
///
/// To override the contents of this collection use [`set_instance_groups`](Self::set_instance_groups).
///
/// <p>Configuration for the instance groups in a cluster.</p>
pub fn instance_groups(mut self, input: crate::model::InstanceGroupConfig) -> Self {
let mut v = self.instance_groups.unwrap_or_default();
v.push(input);
self.instance_groups = Some(v);
self
}
/// <p>Configuration for the instance groups in a cluster.</p>
pub fn set_instance_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InstanceGroupConfig>>,
) -> Self {
self.instance_groups = input;
self
}
/// Appends an item to `instance_fleets`.
///
/// To override the contents of this collection use [`set_instance_fleets`](Self::set_instance_fleets).
///
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.</p>
pub fn instance_fleets(mut self, input: crate::model::InstanceFleetConfig) -> Self {
let mut v = self.instance_fleets.unwrap_or_default();
v.push(input);
self.instance_fleets = Some(v);
self
}
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.</p>
pub fn set_instance_fleets(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InstanceFleetConfig>>,
) -> Self {
self.instance_fleets = input;
self
}
/// <p>The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called "hadoop."</p>
pub fn ec2_key_name(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_key_name = Some(input.into());
self
}
/// <p>The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called "hadoop."</p>
pub fn set_ec2_key_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ec2_key_name = input;
self
}
/// <p>The Availability Zone in which the cluster runs.</p>
pub fn placement(mut self, input: crate::model::PlacementType) -> Self {
self.placement = Some(input);
self
}
/// <p>The Availability Zone in which the cluster runs.</p>
pub fn set_placement(
mut self,
input: std::option::Option<crate::model::PlacementType>,
) -> Self {
self.placement = input;
self
}
/// <p>Specifies whether the cluster should remain available after completing all steps. Defaults to <code>true</code>. For more information about configuring cluster termination, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html">Control Cluster Termination</a> in the <i>EMR Management Guide</i>.</p>
pub fn keep_job_flow_alive_when_no_steps(mut self, input: bool) -> Self {
self.keep_job_flow_alive_when_no_steps = Some(input);
self
}
/// <p>Specifies whether the cluster should remain available after completing all steps. Defaults to <code>true</code>. For more information about configuring cluster termination, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html">Control Cluster Termination</a> in the <i>EMR Management Guide</i>.</p>
pub fn set_keep_job_flow_alive_when_no_steps(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.keep_job_flow_alive_when_no_steps = input;
self
}
/// <p>Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.</p>
pub fn termination_protected(mut self, input: bool) -> Self {
self.termination_protected = Some(input);
self
}
/// <p>Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.</p>
pub fn set_termination_protected(mut self, input: std::option::Option<bool>) -> Self {
self.termination_protected = input;
self
}
/// <p>Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are "0.18" (no longer maintained), "0.20" (no longer maintained), "0.20.205" (no longer maintained), "1.0.3", "2.2.0", or "2.4.0". If you do not set this value, the default of 0.18 is used, unless the <code>AmiVersion</code> parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.</p>
pub fn hadoop_version(mut self, input: impl Into<std::string::String>) -> Self {
self.hadoop_version = Some(input.into());
self
}
/// <p>Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are "0.18" (no longer maintained), "0.20" (no longer maintained), "0.20.205" (no longer maintained), "1.0.3", "2.2.0", or "2.4.0". If you do not set this value, the default of 0.18 is used, unless the <code>AmiVersion</code> parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.</p>
pub fn set_hadoop_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.hadoop_version = input;
self
}
/// <p>Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub fn ec2_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_subnet_id = Some(input.into());
self
}
/// <p>Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub fn set_ec2_subnet_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ec2_subnet_id = input;
self
}
/// Appends an item to `ec2_subnet_ids`.
///
/// To override the contents of this collection use [`set_ec2_subnet_ids`](Self::set_ec2_subnet_ids).
///
/// <p>Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub fn ec2_subnet_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.ec2_subnet_ids.unwrap_or_default();
v.push(input.into());
self.ec2_subnet_ids = Some(v);
self
}
/// <p>Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub fn set_ec2_subnet_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.ec2_subnet_ids = input;
self
}
/// <p>The identifier of the Amazon EC2 security group for the master node. If you specify <code>EmrManagedMasterSecurityGroup</code>, you must also specify <code>EmrManagedSlaveSecurityGroup</code>.</p>
pub fn emr_managed_master_security_group(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.emr_managed_master_security_group = Some(input.into());
self
}
/// <p>The identifier of the Amazon EC2 security group for the master node. If you specify <code>EmrManagedMasterSecurityGroup</code>, you must also specify <code>EmrManagedSlaveSecurityGroup</code>.</p>
pub fn set_emr_managed_master_security_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.emr_managed_master_security_group = input;
self
}
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes. If you specify <code>EmrManagedSlaveSecurityGroup</code>, you must also specify <code>EmrManagedMasterSecurityGroup</code>.</p>
pub fn emr_managed_slave_security_group(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.emr_managed_slave_security_group = Some(input.into());
self
}
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes. If you specify <code>EmrManagedSlaveSecurityGroup</code>, you must also specify <code>EmrManagedMasterSecurityGroup</code>.</p>
pub fn set_emr_managed_slave_security_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.emr_managed_slave_security_group = input;
self
}
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub fn service_access_security_group(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.service_access_security_group = Some(input.into());
self
}
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub fn set_service_access_security_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.service_access_security_group = input;
self
}
/// Appends an item to `additional_master_security_groups`.
///
/// To override the contents of this collection use [`set_additional_master_security_groups`](Self::set_additional_master_security_groups).
///
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub fn additional_master_security_groups(
mut self,
input: impl Into<std::string::String>,
) -> Self {
let mut v = self.additional_master_security_groups.unwrap_or_default();
v.push(input.into());
self.additional_master_security_groups = Some(v);
self
}
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub fn set_additional_master_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.additional_master_security_groups = input;
self
}
/// Appends an item to `additional_slave_security_groups`.
///
/// To override the contents of this collection use [`set_additional_slave_security_groups`](Self::set_additional_slave_security_groups).
///
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub fn additional_slave_security_groups(
mut self,
input: impl Into<std::string::String>,
) -> Self {
let mut v = self.additional_slave_security_groups.unwrap_or_default();
v.push(input.into());
self.additional_slave_security_groups = Some(v);
self
}
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub fn set_additional_slave_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.additional_slave_security_groups = input;
self
}
/// Consumes the builder and constructs a [`JobFlowInstancesConfig`](crate::model::JobFlowInstancesConfig)
pub fn build(self) -> crate::model::JobFlowInstancesConfig {
crate::model::JobFlowInstancesConfig {
master_instance_type: self.master_instance_type,
slave_instance_type: self.slave_instance_type,
instance_count: self.instance_count,
instance_groups: self.instance_groups,
instance_fleets: self.instance_fleets,
ec2_key_name: self.ec2_key_name,
placement: self.placement,
keep_job_flow_alive_when_no_steps: self
.keep_job_flow_alive_when_no_steps
.unwrap_or_default(),
termination_protected: self.termination_protected.unwrap_or_default(),
hadoop_version: self.hadoop_version,
ec2_subnet_id: self.ec2_subnet_id,
ec2_subnet_ids: self.ec2_subnet_ids,
emr_managed_master_security_group: self.emr_managed_master_security_group,
emr_managed_slave_security_group: self.emr_managed_slave_security_group,
service_access_security_group: self.service_access_security_group,
additional_master_security_groups: self.additional_master_security_groups,
additional_slave_security_groups: self.additional_slave_security_groups,
}
}
}
}
impl JobFlowInstancesConfig {
/// Creates a new builder-style object to manufacture [`JobFlowInstancesConfig`](crate::model::JobFlowInstancesConfig)
pub fn builder() -> crate::model::job_flow_instances_config::Builder {
crate::model::job_flow_instances_config::Builder::default()
}
}
/// <p>The Amazon EC2 Availability Zone configuration of the cluster (job flow).</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PlacementType {
/// <p>The Amazon EC2 Availability Zone for the cluster. <code>AvailabilityZone</code> is used for uniform instance groups, while <code>AvailabilityZones</code> (plural) is used for instance fleets.</p>
pub availability_zone: std::option::Option<std::string::String>,
/// <p>When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. <code>AvailabilityZones</code> is used for instance fleets, while <code>AvailabilityZone</code> (singular) is used for uniform instance groups.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub availability_zones: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl PlacementType {
/// <p>The Amazon EC2 Availability Zone for the cluster. <code>AvailabilityZone</code> is used for uniform instance groups, while <code>AvailabilityZones</code> (plural) is used for instance fleets.</p>
pub fn availability_zone(&self) -> std::option::Option<&str> {
self.availability_zone.as_deref()
}
/// <p>When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. <code>AvailabilityZones</code> is used for instance fleets, while <code>AvailabilityZone</code> (singular) is used for uniform instance groups.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub fn availability_zones(&self) -> std::option::Option<&[std::string::String]> {
self.availability_zones.as_deref()
}
}
impl std::fmt::Debug for PlacementType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PlacementType");
formatter.field("availability_zone", &self.availability_zone);
formatter.field("availability_zones", &self.availability_zones);
formatter.finish()
}
}
/// See [`PlacementType`](crate::model::PlacementType)
pub mod placement_type {
/// A builder for [`PlacementType`](crate::model::PlacementType)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) availability_zone: std::option::Option<std::string::String>,
pub(crate) availability_zones: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The Amazon EC2 Availability Zone for the cluster. <code>AvailabilityZone</code> is used for uniform instance groups, while <code>AvailabilityZones</code> (plural) is used for instance fleets.</p>
pub fn availability_zone(mut self, input: impl Into<std::string::String>) -> Self {
self.availability_zone = Some(input.into());
self
}
/// <p>The Amazon EC2 Availability Zone for the cluster. <code>AvailabilityZone</code> is used for uniform instance groups, while <code>AvailabilityZones</code> (plural) is used for instance fleets.</p>
pub fn set_availability_zone(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.availability_zone = input;
self
}
/// Appends an item to `availability_zones`.
///
/// To override the contents of this collection use [`set_availability_zones`](Self::set_availability_zones).
///
/// <p>When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. <code>AvailabilityZones</code> is used for instance fleets, while <code>AvailabilityZone</code> (singular) is used for uniform instance groups.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub fn availability_zones(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.availability_zones.unwrap_or_default();
v.push(input.into());
self.availability_zones = Some(v);
self
}
/// <p>When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. <code>AvailabilityZones</code> is used for instance fleets, while <code>AvailabilityZone</code> (singular) is used for uniform instance groups.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
pub fn set_availability_zones(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.availability_zones = input;
self
}
/// Consumes the builder and constructs a [`PlacementType`](crate::model::PlacementType)
pub fn build(self) -> crate::model::PlacementType {
crate::model::PlacementType {
availability_zone: self.availability_zone,
availability_zones: self.availability_zones,
}
}
}
}
impl PlacementType {
/// Creates a new builder-style object to manufacture [`PlacementType`](crate::model::PlacementType)
pub fn builder() -> crate::model::placement_type::Builder {
crate::model::placement_type::Builder::default()
}
}
/// <p>The configuration that defines an instance fleet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleetConfig {
/// <p>The friendly name of the instance fleet.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK.</p>
pub instance_fleet_type: std::option::Option<crate::model::InstanceFleetType>,
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub target_on_demand_capacity: std::option::Option<i32>,
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub target_spot_capacity: std::option::Option<i32>,
/// <p>The instance type configurations that define the EC2 instances in the instance fleet.</p>
pub instance_type_configs: std::option::Option<std::vec::Vec<crate::model::InstanceTypeConfig>>,
/// <p>The launch specification for the instance fleet.</p>
pub launch_specifications:
std::option::Option<crate::model::InstanceFleetProvisioningSpecifications>,
}
impl InstanceFleetConfig {
/// <p>The friendly name of the instance fleet.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK.</p>
pub fn instance_fleet_type(&self) -> std::option::Option<&crate::model::InstanceFleetType> {
self.instance_fleet_type.as_ref()
}
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_on_demand_capacity(&self) -> std::option::Option<i32> {
self.target_on_demand_capacity
}
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_spot_capacity(&self) -> std::option::Option<i32> {
self.target_spot_capacity
}
/// <p>The instance type configurations that define the EC2 instances in the instance fleet.</p>
pub fn instance_type_configs(
&self,
) -> std::option::Option<&[crate::model::InstanceTypeConfig]> {
self.instance_type_configs.as_deref()
}
/// <p>The launch specification for the instance fleet.</p>
pub fn launch_specifications(
&self,
) -> std::option::Option<&crate::model::InstanceFleetProvisioningSpecifications> {
self.launch_specifications.as_ref()
}
}
impl std::fmt::Debug for InstanceFleetConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleetConfig");
formatter.field("name", &self.name);
formatter.field("instance_fleet_type", &self.instance_fleet_type);
formatter.field("target_on_demand_capacity", &self.target_on_demand_capacity);
formatter.field("target_spot_capacity", &self.target_spot_capacity);
formatter.field("instance_type_configs", &self.instance_type_configs);
formatter.field("launch_specifications", &self.launch_specifications);
formatter.finish()
}
}
/// See [`InstanceFleetConfig`](crate::model::InstanceFleetConfig)
pub mod instance_fleet_config {
/// A builder for [`InstanceFleetConfig`](crate::model::InstanceFleetConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) instance_fleet_type: std::option::Option<crate::model::InstanceFleetType>,
pub(crate) target_on_demand_capacity: std::option::Option<i32>,
pub(crate) target_spot_capacity: std::option::Option<i32>,
pub(crate) instance_type_configs:
std::option::Option<std::vec::Vec<crate::model::InstanceTypeConfig>>,
pub(crate) launch_specifications:
std::option::Option<crate::model::InstanceFleetProvisioningSpecifications>,
}
impl Builder {
/// <p>The friendly name of the instance fleet.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The friendly name of the instance fleet.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK.</p>
pub fn instance_fleet_type(mut self, input: crate::model::InstanceFleetType) -> Self {
self.instance_fleet_type = Some(input);
self
}
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK.</p>
pub fn set_instance_fleet_type(
mut self,
input: std::option::Option<crate::model::InstanceFleetType>,
) -> Self {
self.instance_fleet_type = input;
self
}
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_on_demand_capacity(mut self, input: i32) -> Self {
self.target_on_demand_capacity = Some(input);
self
}
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn set_target_on_demand_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.target_on_demand_capacity = input;
self
}
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_spot_capacity(mut self, input: i32) -> Self {
self.target_spot_capacity = Some(input);
self
}
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn set_target_spot_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.target_spot_capacity = input;
self
}
/// Appends an item to `instance_type_configs`.
///
/// To override the contents of this collection use [`set_instance_type_configs`](Self::set_instance_type_configs).
///
/// <p>The instance type configurations that define the EC2 instances in the instance fleet.</p>
pub fn instance_type_configs(mut self, input: crate::model::InstanceTypeConfig) -> Self {
let mut v = self.instance_type_configs.unwrap_or_default();
v.push(input);
self.instance_type_configs = Some(v);
self
}
/// <p>The instance type configurations that define the EC2 instances in the instance fleet.</p>
pub fn set_instance_type_configs(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InstanceTypeConfig>>,
) -> Self {
self.instance_type_configs = input;
self
}
/// <p>The launch specification for the instance fleet.</p>
pub fn launch_specifications(
mut self,
input: crate::model::InstanceFleetProvisioningSpecifications,
) -> Self {
self.launch_specifications = Some(input);
self
}
/// <p>The launch specification for the instance fleet.</p>
pub fn set_launch_specifications(
mut self,
input: std::option::Option<crate::model::InstanceFleetProvisioningSpecifications>,
) -> Self {
self.launch_specifications = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleetConfig`](crate::model::InstanceFleetConfig)
pub fn build(self) -> crate::model::InstanceFleetConfig {
crate::model::InstanceFleetConfig {
name: self.name,
instance_fleet_type: self.instance_fleet_type,
target_on_demand_capacity: self.target_on_demand_capacity,
target_spot_capacity: self.target_spot_capacity,
instance_type_configs: self.instance_type_configs,
launch_specifications: self.launch_specifications,
}
}
}
}
impl InstanceFleetConfig {
/// Creates a new builder-style object to manufacture [`InstanceFleetConfig`](crate::model::InstanceFleetConfig)
pub fn builder() -> crate::model::instance_fleet_config::Builder {
crate::model::instance_fleet_config::Builder::default()
}
}
/// <p>The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot Instance allocation strategies are available in Amazon EMR version 5.12.1 and later.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleetProvisioningSpecifications {
/// <p>The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.</p>
pub spot_specification: std::option::Option<crate::model::SpotProvisioningSpecification>,
/// <p> The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy. </p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.</p>
/// </note>
pub on_demand_specification:
std::option::Option<crate::model::OnDemandProvisioningSpecification>,
}
impl InstanceFleetProvisioningSpecifications {
/// <p>The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.</p>
pub fn spot_specification(
&self,
) -> std::option::Option<&crate::model::SpotProvisioningSpecification> {
self.spot_specification.as_ref()
}
/// <p> The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy. </p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.</p>
/// </note>
pub fn on_demand_specification(
&self,
) -> std::option::Option<&crate::model::OnDemandProvisioningSpecification> {
self.on_demand_specification.as_ref()
}
}
impl std::fmt::Debug for InstanceFleetProvisioningSpecifications {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleetProvisioningSpecifications");
formatter.field("spot_specification", &self.spot_specification);
formatter.field("on_demand_specification", &self.on_demand_specification);
formatter.finish()
}
}
/// See [`InstanceFleetProvisioningSpecifications`](crate::model::InstanceFleetProvisioningSpecifications)
pub mod instance_fleet_provisioning_specifications {
/// A builder for [`InstanceFleetProvisioningSpecifications`](crate::model::InstanceFleetProvisioningSpecifications)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) spot_specification:
std::option::Option<crate::model::SpotProvisioningSpecification>,
pub(crate) on_demand_specification:
std::option::Option<crate::model::OnDemandProvisioningSpecification>,
}
impl Builder {
/// <p>The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.</p>
pub fn spot_specification(
mut self,
input: crate::model::SpotProvisioningSpecification,
) -> Self {
self.spot_specification = Some(input);
self
}
/// <p>The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.</p>
pub fn set_spot_specification(
mut self,
input: std::option::Option<crate::model::SpotProvisioningSpecification>,
) -> Self {
self.spot_specification = input;
self
}
/// <p> The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy. </p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.</p>
/// </note>
pub fn on_demand_specification(
mut self,
input: crate::model::OnDemandProvisioningSpecification,
) -> Self {
self.on_demand_specification = Some(input);
self
}
/// <p> The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy. </p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.</p>
/// </note>
pub fn set_on_demand_specification(
mut self,
input: std::option::Option<crate::model::OnDemandProvisioningSpecification>,
) -> Self {
self.on_demand_specification = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleetProvisioningSpecifications`](crate::model::InstanceFleetProvisioningSpecifications)
pub fn build(self) -> crate::model::InstanceFleetProvisioningSpecifications {
crate::model::InstanceFleetProvisioningSpecifications {
spot_specification: self.spot_specification,
on_demand_specification: self.on_demand_specification,
}
}
}
}
impl InstanceFleetProvisioningSpecifications {
/// Creates a new builder-style object to manufacture [`InstanceFleetProvisioningSpecifications`](crate::model::InstanceFleetProvisioningSpecifications)
pub fn builder() -> crate::model::instance_fleet_provisioning_specifications::Builder {
crate::model::instance_fleet_provisioning_specifications::Builder::default()
}
}
/// <p> The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy. </p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct OnDemandProvisioningSpecification {
/// <p>Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is <code>lowest-price</code> (the default), which launches the lowest price first.</p>
pub allocation_strategy:
std::option::Option<crate::model::OnDemandProvisioningAllocationStrategy>,
/// <p>The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.</p>
pub capacity_reservation_options:
std::option::Option<crate::model::OnDemandCapacityReservationOptions>,
}
impl OnDemandProvisioningSpecification {
/// <p>Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is <code>lowest-price</code> (the default), which launches the lowest price first.</p>
pub fn allocation_strategy(
&self,
) -> std::option::Option<&crate::model::OnDemandProvisioningAllocationStrategy> {
self.allocation_strategy.as_ref()
}
/// <p>The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.</p>
pub fn capacity_reservation_options(
&self,
) -> std::option::Option<&crate::model::OnDemandCapacityReservationOptions> {
self.capacity_reservation_options.as_ref()
}
}
impl std::fmt::Debug for OnDemandProvisioningSpecification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("OnDemandProvisioningSpecification");
formatter.field("allocation_strategy", &self.allocation_strategy);
formatter.field(
"capacity_reservation_options",
&self.capacity_reservation_options,
);
formatter.finish()
}
}
/// See [`OnDemandProvisioningSpecification`](crate::model::OnDemandProvisioningSpecification)
pub mod on_demand_provisioning_specification {
/// A builder for [`OnDemandProvisioningSpecification`](crate::model::OnDemandProvisioningSpecification)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) allocation_strategy:
std::option::Option<crate::model::OnDemandProvisioningAllocationStrategy>,
pub(crate) capacity_reservation_options:
std::option::Option<crate::model::OnDemandCapacityReservationOptions>,
}
impl Builder {
/// <p>Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is <code>lowest-price</code> (the default), which launches the lowest price first.</p>
pub fn allocation_strategy(
mut self,
input: crate::model::OnDemandProvisioningAllocationStrategy,
) -> Self {
self.allocation_strategy = Some(input);
self
}
/// <p>Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is <code>lowest-price</code> (the default), which launches the lowest price first.</p>
pub fn set_allocation_strategy(
mut self,
input: std::option::Option<crate::model::OnDemandProvisioningAllocationStrategy>,
) -> Self {
self.allocation_strategy = input;
self
}
/// <p>The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.</p>
pub fn capacity_reservation_options(
mut self,
input: crate::model::OnDemandCapacityReservationOptions,
) -> Self {
self.capacity_reservation_options = Some(input);
self
}
/// <p>The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.</p>
pub fn set_capacity_reservation_options(
mut self,
input: std::option::Option<crate::model::OnDemandCapacityReservationOptions>,
) -> Self {
self.capacity_reservation_options = input;
self
}
/// Consumes the builder and constructs a [`OnDemandProvisioningSpecification`](crate::model::OnDemandProvisioningSpecification)
pub fn build(self) -> crate::model::OnDemandProvisioningSpecification {
crate::model::OnDemandProvisioningSpecification {
allocation_strategy: self.allocation_strategy,
capacity_reservation_options: self.capacity_reservation_options,
}
}
}
}
impl OnDemandProvisioningSpecification {
/// Creates a new builder-style object to manufacture [`OnDemandProvisioningSpecification`](crate::model::OnDemandProvisioningSpecification)
pub fn builder() -> crate::model::on_demand_provisioning_specification::Builder {
crate::model::on_demand_provisioning_specification::Builder::default()
}
}
/// <p>Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct OnDemandCapacityReservationOptions {
/// <p>Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.</p>
/// <p>If you specify <code>use-capacity-reservations-first</code>, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (<code>lowest-price</code>) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (<code>lowest-price</code>).</p>
/// <p>If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.</p>
pub usage_strategy: std::option::Option<crate::model::OnDemandCapacityReservationUsageStrategy>,
/// <p>Indicates the instance's Capacity Reservation preferences. Possible preferences include:</p>
/// <ul>
/// <li> <p> <code>open</code> - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).</p> </li>
/// <li> <p> <code>none</code> - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.</p> </li>
/// </ul>
pub capacity_reservation_preference:
std::option::Option<crate::model::OnDemandCapacityReservationPreference>,
/// <p>The ARN of the Capacity Reservation resource group in which to run the instance.</p>
pub capacity_reservation_resource_group_arn: std::option::Option<std::string::String>,
}
impl OnDemandCapacityReservationOptions {
/// <p>Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.</p>
/// <p>If you specify <code>use-capacity-reservations-first</code>, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (<code>lowest-price</code>) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (<code>lowest-price</code>).</p>
/// <p>If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.</p>
pub fn usage_strategy(
&self,
) -> std::option::Option<&crate::model::OnDemandCapacityReservationUsageStrategy> {
self.usage_strategy.as_ref()
}
/// <p>Indicates the instance's Capacity Reservation preferences. Possible preferences include:</p>
/// <ul>
/// <li> <p> <code>open</code> - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).</p> </li>
/// <li> <p> <code>none</code> - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.</p> </li>
/// </ul>
pub fn capacity_reservation_preference(
&self,
) -> std::option::Option<&crate::model::OnDemandCapacityReservationPreference> {
self.capacity_reservation_preference.as_ref()
}
/// <p>The ARN of the Capacity Reservation resource group in which to run the instance.</p>
pub fn capacity_reservation_resource_group_arn(&self) -> std::option::Option<&str> {
self.capacity_reservation_resource_group_arn.as_deref()
}
}
impl std::fmt::Debug for OnDemandCapacityReservationOptions {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("OnDemandCapacityReservationOptions");
formatter.field("usage_strategy", &self.usage_strategy);
formatter.field(
"capacity_reservation_preference",
&self.capacity_reservation_preference,
);
formatter.field(
"capacity_reservation_resource_group_arn",
&self.capacity_reservation_resource_group_arn,
);
formatter.finish()
}
}
/// See [`OnDemandCapacityReservationOptions`](crate::model::OnDemandCapacityReservationOptions)
pub mod on_demand_capacity_reservation_options {
/// A builder for [`OnDemandCapacityReservationOptions`](crate::model::OnDemandCapacityReservationOptions)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) usage_strategy:
std::option::Option<crate::model::OnDemandCapacityReservationUsageStrategy>,
pub(crate) capacity_reservation_preference:
std::option::Option<crate::model::OnDemandCapacityReservationPreference>,
pub(crate) capacity_reservation_resource_group_arn:
std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.</p>
/// <p>If you specify <code>use-capacity-reservations-first</code>, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (<code>lowest-price</code>) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (<code>lowest-price</code>).</p>
/// <p>If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.</p>
pub fn usage_strategy(
mut self,
input: crate::model::OnDemandCapacityReservationUsageStrategy,
) -> Self {
self.usage_strategy = Some(input);
self
}
/// <p>Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.</p>
/// <p>If you specify <code>use-capacity-reservations-first</code>, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (<code>lowest-price</code>) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (<code>lowest-price</code>).</p>
/// <p>If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.</p>
pub fn set_usage_strategy(
mut self,
input: std::option::Option<crate::model::OnDemandCapacityReservationUsageStrategy>,
) -> Self {
self.usage_strategy = input;
self
}
/// <p>Indicates the instance's Capacity Reservation preferences. Possible preferences include:</p>
/// <ul>
/// <li> <p> <code>open</code> - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).</p> </li>
/// <li> <p> <code>none</code> - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.</p> </li>
/// </ul>
pub fn capacity_reservation_preference(
mut self,
input: crate::model::OnDemandCapacityReservationPreference,
) -> Self {
self.capacity_reservation_preference = Some(input);
self
}
/// <p>Indicates the instance's Capacity Reservation preferences. Possible preferences include:</p>
/// <ul>
/// <li> <p> <code>open</code> - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).</p> </li>
/// <li> <p> <code>none</code> - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.</p> </li>
/// </ul>
pub fn set_capacity_reservation_preference(
mut self,
input: std::option::Option<crate::model::OnDemandCapacityReservationPreference>,
) -> Self {
self.capacity_reservation_preference = input;
self
}
/// <p>The ARN of the Capacity Reservation resource group in which to run the instance.</p>
pub fn capacity_reservation_resource_group_arn(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.capacity_reservation_resource_group_arn = Some(input.into());
self
}
/// <p>The ARN of the Capacity Reservation resource group in which to run the instance.</p>
pub fn set_capacity_reservation_resource_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.capacity_reservation_resource_group_arn = input;
self
}
/// Consumes the builder and constructs a [`OnDemandCapacityReservationOptions`](crate::model::OnDemandCapacityReservationOptions)
pub fn build(self) -> crate::model::OnDemandCapacityReservationOptions {
crate::model::OnDemandCapacityReservationOptions {
usage_strategy: self.usage_strategy,
capacity_reservation_preference: self.capacity_reservation_preference,
capacity_reservation_resource_group_arn: self
.capacity_reservation_resource_group_arn,
}
}
}
}
impl OnDemandCapacityReservationOptions {
/// Creates a new builder-style object to manufacture [`OnDemandCapacityReservationOptions`](crate::model::OnDemandCapacityReservationOptions)
pub fn builder() -> crate::model::on_demand_capacity_reservation_options::Builder {
crate::model::on_demand_capacity_reservation_options::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum OnDemandCapacityReservationPreference {
#[allow(missing_docs)] // documentation missing in model
None,
#[allow(missing_docs)] // documentation missing in model
Open,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for OnDemandCapacityReservationPreference {
fn from(s: &str) -> Self {
match s {
"none" => OnDemandCapacityReservationPreference::None,
"open" => OnDemandCapacityReservationPreference::Open,
other => OnDemandCapacityReservationPreference::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for OnDemandCapacityReservationPreference {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(OnDemandCapacityReservationPreference::from(s))
}
}
impl OnDemandCapacityReservationPreference {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
OnDemandCapacityReservationPreference::None => "none",
OnDemandCapacityReservationPreference::Open => "open",
OnDemandCapacityReservationPreference::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["none", "open"]
}
}
impl AsRef<str> for OnDemandCapacityReservationPreference {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum OnDemandCapacityReservationUsageStrategy {
#[allow(missing_docs)] // documentation missing in model
UseCapacityReservationsFirst,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for OnDemandCapacityReservationUsageStrategy {
fn from(s: &str) -> Self {
match s {
"use-capacity-reservations-first" => {
OnDemandCapacityReservationUsageStrategy::UseCapacityReservationsFirst
}
other => OnDemandCapacityReservationUsageStrategy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for OnDemandCapacityReservationUsageStrategy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(OnDemandCapacityReservationUsageStrategy::from(s))
}
}
impl OnDemandCapacityReservationUsageStrategy {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
OnDemandCapacityReservationUsageStrategy::UseCapacityReservationsFirst => {
"use-capacity-reservations-first"
}
OnDemandCapacityReservationUsageStrategy::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["use-capacity-reservations-first"]
}
}
impl AsRef<str> for OnDemandCapacityReservationUsageStrategy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum OnDemandProvisioningAllocationStrategy {
#[allow(missing_docs)] // documentation missing in model
LowestPrice,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for OnDemandProvisioningAllocationStrategy {
fn from(s: &str) -> Self {
match s {
"lowest-price" => OnDemandProvisioningAllocationStrategy::LowestPrice,
other => OnDemandProvisioningAllocationStrategy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for OnDemandProvisioningAllocationStrategy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(OnDemandProvisioningAllocationStrategy::from(s))
}
}
impl OnDemandProvisioningAllocationStrategy {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
OnDemandProvisioningAllocationStrategy::LowestPrice => "lowest-price",
OnDemandProvisioningAllocationStrategy::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["lowest-price"]
}
}
impl AsRef<str> for OnDemandProvisioningAllocationStrategy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The launch specification for Spot Instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. Spot Instance allocation strategy is available in Amazon EMR version 5.12.1 and later.</p>
/// </note> <note>
/// <p>Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. </p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SpotProvisioningSpecification {
/// <p>The spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the <code>TimeOutAction</code> is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.</p>
pub timeout_duration_minutes: std::option::Option<i32>,
/// <p>The action to take when <code>TargetSpotCapacity</code> has not been fulfilled when the <code>TimeoutDurationMinutes</code> has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are <code>TERMINATE_CLUSTER</code> and <code>SWITCH_TO_ON_DEMAND</code>. SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.</p>
pub timeout_action: std::option::Option<crate::model::SpotProvisioningTimeoutAction>,
/// <p>The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates. </p> <note>
/// <p>Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. </p>
/// </note>
pub block_duration_minutes: std::option::Option<i32>,
/// <p> Specifies the strategy to use in launching Spot Instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. </p>
pub allocation_strategy: std::option::Option<crate::model::SpotProvisioningAllocationStrategy>,
}
impl SpotProvisioningSpecification {
/// <p>The spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the <code>TimeOutAction</code> is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.</p>
pub fn timeout_duration_minutes(&self) -> std::option::Option<i32> {
self.timeout_duration_minutes
}
/// <p>The action to take when <code>TargetSpotCapacity</code> has not been fulfilled when the <code>TimeoutDurationMinutes</code> has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are <code>TERMINATE_CLUSTER</code> and <code>SWITCH_TO_ON_DEMAND</code>. SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.</p>
pub fn timeout_action(
&self,
) -> std::option::Option<&crate::model::SpotProvisioningTimeoutAction> {
self.timeout_action.as_ref()
}
/// <p>The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates. </p> <note>
/// <p>Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. </p>
/// </note>
pub fn block_duration_minutes(&self) -> std::option::Option<i32> {
self.block_duration_minutes
}
/// <p> Specifies the strategy to use in launching Spot Instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. </p>
pub fn allocation_strategy(
&self,
) -> std::option::Option<&crate::model::SpotProvisioningAllocationStrategy> {
self.allocation_strategy.as_ref()
}
}
impl std::fmt::Debug for SpotProvisioningSpecification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SpotProvisioningSpecification");
formatter.field("timeout_duration_minutes", &self.timeout_duration_minutes);
formatter.field("timeout_action", &self.timeout_action);
formatter.field("block_duration_minutes", &self.block_duration_minutes);
formatter.field("allocation_strategy", &self.allocation_strategy);
formatter.finish()
}
}
/// See [`SpotProvisioningSpecification`](crate::model::SpotProvisioningSpecification)
pub mod spot_provisioning_specification {
/// A builder for [`SpotProvisioningSpecification`](crate::model::SpotProvisioningSpecification)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) timeout_duration_minutes: std::option::Option<i32>,
pub(crate) timeout_action: std::option::Option<crate::model::SpotProvisioningTimeoutAction>,
pub(crate) block_duration_minutes: std::option::Option<i32>,
pub(crate) allocation_strategy:
std::option::Option<crate::model::SpotProvisioningAllocationStrategy>,
}
impl Builder {
/// <p>The spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the <code>TimeOutAction</code> is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.</p>
pub fn timeout_duration_minutes(mut self, input: i32) -> Self {
self.timeout_duration_minutes = Some(input);
self
}
/// <p>The spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the <code>TimeOutAction</code> is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.</p>
pub fn set_timeout_duration_minutes(mut self, input: std::option::Option<i32>) -> Self {
self.timeout_duration_minutes = input;
self
}
/// <p>The action to take when <code>TargetSpotCapacity</code> has not been fulfilled when the <code>TimeoutDurationMinutes</code> has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are <code>TERMINATE_CLUSTER</code> and <code>SWITCH_TO_ON_DEMAND</code>. SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.</p>
pub fn timeout_action(
mut self,
input: crate::model::SpotProvisioningTimeoutAction,
) -> Self {
self.timeout_action = Some(input);
self
}
/// <p>The action to take when <code>TargetSpotCapacity</code> has not been fulfilled when the <code>TimeoutDurationMinutes</code> has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are <code>TERMINATE_CLUSTER</code> and <code>SWITCH_TO_ON_DEMAND</code>. SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.</p>
pub fn set_timeout_action(
mut self,
input: std::option::Option<crate::model::SpotProvisioningTimeoutAction>,
) -> Self {
self.timeout_action = input;
self
}
/// <p>The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates. </p> <note>
/// <p>Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. </p>
/// </note>
pub fn block_duration_minutes(mut self, input: i32) -> Self {
self.block_duration_minutes = Some(input);
self
}
/// <p>The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates. </p> <note>
/// <p>Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. </p>
/// </note>
pub fn set_block_duration_minutes(mut self, input: std::option::Option<i32>) -> Self {
self.block_duration_minutes = input;
self
}
/// <p> Specifies the strategy to use in launching Spot Instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. </p>
pub fn allocation_strategy(
mut self,
input: crate::model::SpotProvisioningAllocationStrategy,
) -> Self {
self.allocation_strategy = Some(input);
self
}
/// <p> Specifies the strategy to use in launching Spot Instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching. </p>
pub fn set_allocation_strategy(
mut self,
input: std::option::Option<crate::model::SpotProvisioningAllocationStrategy>,
) -> Self {
self.allocation_strategy = input;
self
}
/// Consumes the builder and constructs a [`SpotProvisioningSpecification`](crate::model::SpotProvisioningSpecification)
pub fn build(self) -> crate::model::SpotProvisioningSpecification {
crate::model::SpotProvisioningSpecification {
timeout_duration_minutes: self.timeout_duration_minutes,
timeout_action: self.timeout_action,
block_duration_minutes: self.block_duration_minutes,
allocation_strategy: self.allocation_strategy,
}
}
}
}
impl SpotProvisioningSpecification {
/// Creates a new builder-style object to manufacture [`SpotProvisioningSpecification`](crate::model::SpotProvisioningSpecification)
pub fn builder() -> crate::model::spot_provisioning_specification::Builder {
crate::model::spot_provisioning_specification::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum SpotProvisioningAllocationStrategy {
#[allow(missing_docs)] // documentation missing in model
CapacityOptimized,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for SpotProvisioningAllocationStrategy {
fn from(s: &str) -> Self {
match s {
"capacity-optimized" => SpotProvisioningAllocationStrategy::CapacityOptimized,
other => SpotProvisioningAllocationStrategy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for SpotProvisioningAllocationStrategy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(SpotProvisioningAllocationStrategy::from(s))
}
}
impl SpotProvisioningAllocationStrategy {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
SpotProvisioningAllocationStrategy::CapacityOptimized => "capacity-optimized",
SpotProvisioningAllocationStrategy::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["capacity-optimized"]
}
}
impl AsRef<str> for SpotProvisioningAllocationStrategy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum SpotProvisioningTimeoutAction {
#[allow(missing_docs)] // documentation missing in model
SwitchToOnDemand,
#[allow(missing_docs)] // documentation missing in model
TerminateCluster,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for SpotProvisioningTimeoutAction {
fn from(s: &str) -> Self {
match s {
"SWITCH_TO_ON_DEMAND" => SpotProvisioningTimeoutAction::SwitchToOnDemand,
"TERMINATE_CLUSTER" => SpotProvisioningTimeoutAction::TerminateCluster,
other => SpotProvisioningTimeoutAction::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for SpotProvisioningTimeoutAction {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(SpotProvisioningTimeoutAction::from(s))
}
}
impl SpotProvisioningTimeoutAction {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
SpotProvisioningTimeoutAction::SwitchToOnDemand => "SWITCH_TO_ON_DEMAND",
SpotProvisioningTimeoutAction::TerminateCluster => "TERMINATE_CLUSTER",
SpotProvisioningTimeoutAction::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["SWITCH_TO_ON_DEMAND", "TERMINATE_CLUSTER"]
}
}
impl AsRef<str> for SpotProvisioningTimeoutAction {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html">Configure Instance Fleets</a>. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceTypeConfig {
/// <p>An EC2 instance type, such as <code>m3.xlarge</code>. </p>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified. </p>
pub weighted_capacity: std::option::Option<i32>,
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD. If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%. </p>
pub bid_price: std::option::Option<std::string::String>,
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%). If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%.</p>
pub bid_price_as_percentage_of_on_demand_price: std::option::Option<f64>,
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>. </p>
pub ebs_configuration: std::option::Option<crate::model::EbsConfiguration>,
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>The custom AMI ID to use for the instance type.</p>
pub custom_ami_id: std::option::Option<std::string::String>,
}
impl InstanceTypeConfig {
/// <p>An EC2 instance type, such as <code>m3.xlarge</code>. </p>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified. </p>
pub fn weighted_capacity(&self) -> std::option::Option<i32> {
self.weighted_capacity
}
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD. If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%. </p>
pub fn bid_price(&self) -> std::option::Option<&str> {
self.bid_price.as_deref()
}
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%). If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%.</p>
pub fn bid_price_as_percentage_of_on_demand_price(&self) -> std::option::Option<f64> {
self.bid_price_as_percentage_of_on_demand_price
}
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>. </p>
pub fn ebs_configuration(&self) -> std::option::Option<&crate::model::EbsConfiguration> {
self.ebs_configuration.as_ref()
}
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>The custom AMI ID to use for the instance type.</p>
pub fn custom_ami_id(&self) -> std::option::Option<&str> {
self.custom_ami_id.as_deref()
}
}
impl std::fmt::Debug for InstanceTypeConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceTypeConfig");
formatter.field("instance_type", &self.instance_type);
formatter.field("weighted_capacity", &self.weighted_capacity);
formatter.field("bid_price", &self.bid_price);
formatter.field(
"bid_price_as_percentage_of_on_demand_price",
&self.bid_price_as_percentage_of_on_demand_price,
);
formatter.field("ebs_configuration", &self.ebs_configuration);
formatter.field("configurations", &self.configurations);
formatter.field("custom_ami_id", &self.custom_ami_id);
formatter.finish()
}
}
/// See [`InstanceTypeConfig`](crate::model::InstanceTypeConfig)
pub mod instance_type_config {
/// A builder for [`InstanceTypeConfig`](crate::model::InstanceTypeConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) weighted_capacity: std::option::Option<i32>,
pub(crate) bid_price: std::option::Option<std::string::String>,
pub(crate) bid_price_as_percentage_of_on_demand_price: std::option::Option<f64>,
pub(crate) ebs_configuration: std::option::Option<crate::model::EbsConfiguration>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) custom_ami_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>An EC2 instance type, such as <code>m3.xlarge</code>. </p>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>An EC2 instance type, such as <code>m3.xlarge</code>. </p>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified. </p>
pub fn weighted_capacity(mut self, input: i32) -> Self {
self.weighted_capacity = Some(input);
self
}
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified. </p>
pub fn set_weighted_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.weighted_capacity = input;
self
}
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD. If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%. </p>
pub fn bid_price(mut self, input: impl Into<std::string::String>) -> Self {
self.bid_price = Some(input.into());
self
}
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD. If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%. </p>
pub fn set_bid_price(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bid_price = input;
self
}
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%). If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%.</p>
pub fn bid_price_as_percentage_of_on_demand_price(mut self, input: f64) -> Self {
self.bid_price_as_percentage_of_on_demand_price = Some(input);
self
}
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%). If neither <code>BidPrice</code> nor <code>BidPriceAsPercentageOfOnDemandPrice</code> is provided, <code>BidPriceAsPercentageOfOnDemandPrice</code> defaults to 100%.</p>
pub fn set_bid_price_as_percentage_of_on_demand_price(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.bid_price_as_percentage_of_on_demand_price = input;
self
}
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>. </p>
pub fn ebs_configuration(mut self, input: crate::model::EbsConfiguration) -> Self {
self.ebs_configuration = Some(input);
self
}
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>. </p>
pub fn set_ebs_configuration(
mut self,
input: std::option::Option<crate::model::EbsConfiguration>,
) -> Self {
self.ebs_configuration = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// <p>The custom AMI ID to use for the instance type.</p>
pub fn custom_ami_id(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_ami_id = Some(input.into());
self
}
/// <p>The custom AMI ID to use for the instance type.</p>
pub fn set_custom_ami_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_ami_id = input;
self
}
/// Consumes the builder and constructs a [`InstanceTypeConfig`](crate::model::InstanceTypeConfig)
pub fn build(self) -> crate::model::InstanceTypeConfig {
crate::model::InstanceTypeConfig {
instance_type: self.instance_type,
weighted_capacity: self.weighted_capacity,
bid_price: self.bid_price,
bid_price_as_percentage_of_on_demand_price: self
.bid_price_as_percentage_of_on_demand_price,
ebs_configuration: self.ebs_configuration,
configurations: self.configurations,
custom_ami_id: self.custom_ami_id,
}
}
}
}
impl InstanceTypeConfig {
/// Creates a new builder-style object to manufacture [`InstanceTypeConfig`](crate::model::InstanceTypeConfig)
pub fn builder() -> crate::model::instance_type_config::Builder {
crate::model::instance_type_config::Builder::default()
}
}
/// <p>The Amazon EBS configuration of a cluster instance.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EbsConfiguration {
/// <p>An array of Amazon EBS volume specifications attached to a cluster instance.</p>
pub ebs_block_device_configs:
std::option::Option<std::vec::Vec<crate::model::EbsBlockDeviceConfig>>,
/// <p>Indicates whether an Amazon EBS volume is EBS-optimized.</p>
pub ebs_optimized: std::option::Option<bool>,
}
impl EbsConfiguration {
/// <p>An array of Amazon EBS volume specifications attached to a cluster instance.</p>
pub fn ebs_block_device_configs(
&self,
) -> std::option::Option<&[crate::model::EbsBlockDeviceConfig]> {
self.ebs_block_device_configs.as_deref()
}
/// <p>Indicates whether an Amazon EBS volume is EBS-optimized.</p>
pub fn ebs_optimized(&self) -> std::option::Option<bool> {
self.ebs_optimized
}
}
impl std::fmt::Debug for EbsConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EbsConfiguration");
formatter.field("ebs_block_device_configs", &self.ebs_block_device_configs);
formatter.field("ebs_optimized", &self.ebs_optimized);
formatter.finish()
}
}
/// See [`EbsConfiguration`](crate::model::EbsConfiguration)
pub mod ebs_configuration {
/// A builder for [`EbsConfiguration`](crate::model::EbsConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) ebs_block_device_configs:
std::option::Option<std::vec::Vec<crate::model::EbsBlockDeviceConfig>>,
pub(crate) ebs_optimized: std::option::Option<bool>,
}
impl Builder {
/// Appends an item to `ebs_block_device_configs`.
///
/// To override the contents of this collection use [`set_ebs_block_device_configs`](Self::set_ebs_block_device_configs).
///
/// <p>An array of Amazon EBS volume specifications attached to a cluster instance.</p>
pub fn ebs_block_device_configs(
mut self,
input: crate::model::EbsBlockDeviceConfig,
) -> Self {
let mut v = self.ebs_block_device_configs.unwrap_or_default();
v.push(input);
self.ebs_block_device_configs = Some(v);
self
}
/// <p>An array of Amazon EBS volume specifications attached to a cluster instance.</p>
pub fn set_ebs_block_device_configs(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EbsBlockDeviceConfig>>,
) -> Self {
self.ebs_block_device_configs = input;
self
}
/// <p>Indicates whether an Amazon EBS volume is EBS-optimized.</p>
pub fn ebs_optimized(mut self, input: bool) -> Self {
self.ebs_optimized = Some(input);
self
}
/// <p>Indicates whether an Amazon EBS volume is EBS-optimized.</p>
pub fn set_ebs_optimized(mut self, input: std::option::Option<bool>) -> Self {
self.ebs_optimized = input;
self
}
/// Consumes the builder and constructs a [`EbsConfiguration`](crate::model::EbsConfiguration)
pub fn build(self) -> crate::model::EbsConfiguration {
crate::model::EbsConfiguration {
ebs_block_device_configs: self.ebs_block_device_configs,
ebs_optimized: self.ebs_optimized,
}
}
}
}
impl EbsConfiguration {
/// Creates a new builder-style object to manufacture [`EbsConfiguration`](crate::model::EbsConfiguration)
pub fn builder() -> crate::model::ebs_configuration::Builder {
crate::model::ebs_configuration::Builder::default()
}
}
/// <p>Configuration of requested EBS block device associated with the instance group with count of volumes that will be associated to every instance.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EbsBlockDeviceConfig {
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub volume_specification: std::option::Option<crate::model::VolumeSpecification>,
/// <p>Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group</p>
pub volumes_per_instance: std::option::Option<i32>,
}
impl EbsBlockDeviceConfig {
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub fn volume_specification(&self) -> std::option::Option<&crate::model::VolumeSpecification> {
self.volume_specification.as_ref()
}
/// <p>Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group</p>
pub fn volumes_per_instance(&self) -> std::option::Option<i32> {
self.volumes_per_instance
}
}
impl std::fmt::Debug for EbsBlockDeviceConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EbsBlockDeviceConfig");
formatter.field("volume_specification", &self.volume_specification);
formatter.field("volumes_per_instance", &self.volumes_per_instance);
formatter.finish()
}
}
/// See [`EbsBlockDeviceConfig`](crate::model::EbsBlockDeviceConfig)
pub mod ebs_block_device_config {
/// A builder for [`EbsBlockDeviceConfig`](crate::model::EbsBlockDeviceConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) volume_specification: std::option::Option<crate::model::VolumeSpecification>,
pub(crate) volumes_per_instance: std::option::Option<i32>,
}
impl Builder {
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub fn volume_specification(mut self, input: crate::model::VolumeSpecification) -> Self {
self.volume_specification = Some(input);
self
}
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub fn set_volume_specification(
mut self,
input: std::option::Option<crate::model::VolumeSpecification>,
) -> Self {
self.volume_specification = input;
self
}
/// <p>Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group</p>
pub fn volumes_per_instance(mut self, input: i32) -> Self {
self.volumes_per_instance = Some(input);
self
}
/// <p>Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group</p>
pub fn set_volumes_per_instance(mut self, input: std::option::Option<i32>) -> Self {
self.volumes_per_instance = input;
self
}
/// Consumes the builder and constructs a [`EbsBlockDeviceConfig`](crate::model::EbsBlockDeviceConfig)
pub fn build(self) -> crate::model::EbsBlockDeviceConfig {
crate::model::EbsBlockDeviceConfig {
volume_specification: self.volume_specification,
volumes_per_instance: self.volumes_per_instance,
}
}
}
}
impl EbsBlockDeviceConfig {
/// Creates a new builder-style object to manufacture [`EbsBlockDeviceConfig`](crate::model::EbsBlockDeviceConfig)
pub fn builder() -> crate::model::ebs_block_device_config::Builder {
crate::model::ebs_block_device_config::Builder::default()
}
}
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct VolumeSpecification {
/// <p>The volume type. Volume types supported are gp2, io1, and standard.</p>
pub volume_type: std::option::Option<std::string::String>,
/// <p>The number of I/O operations per second (IOPS) that the volume supports.</p>
pub iops: std::option::Option<i32>,
/// <p>The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.</p>
pub size_in_gb: std::option::Option<i32>,
}
impl VolumeSpecification {
/// <p>The volume type. Volume types supported are gp2, io1, and standard.</p>
pub fn volume_type(&self) -> std::option::Option<&str> {
self.volume_type.as_deref()
}
/// <p>The number of I/O operations per second (IOPS) that the volume supports.</p>
pub fn iops(&self) -> std::option::Option<i32> {
self.iops
}
/// <p>The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.</p>
pub fn size_in_gb(&self) -> std::option::Option<i32> {
self.size_in_gb
}
}
impl std::fmt::Debug for VolumeSpecification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("VolumeSpecification");
formatter.field("volume_type", &self.volume_type);
formatter.field("iops", &self.iops);
formatter.field("size_in_gb", &self.size_in_gb);
formatter.finish()
}
}
/// See [`VolumeSpecification`](crate::model::VolumeSpecification)
pub mod volume_specification {
/// A builder for [`VolumeSpecification`](crate::model::VolumeSpecification)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) volume_type: std::option::Option<std::string::String>,
pub(crate) iops: std::option::Option<i32>,
pub(crate) size_in_gb: std::option::Option<i32>,
}
impl Builder {
/// <p>The volume type. Volume types supported are gp2, io1, and standard.</p>
pub fn volume_type(mut self, input: impl Into<std::string::String>) -> Self {
self.volume_type = Some(input.into());
self
}
/// <p>The volume type. Volume types supported are gp2, io1, and standard.</p>
pub fn set_volume_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.volume_type = input;
self
}
/// <p>The number of I/O operations per second (IOPS) that the volume supports.</p>
pub fn iops(mut self, input: i32) -> Self {
self.iops = Some(input);
self
}
/// <p>The number of I/O operations per second (IOPS) that the volume supports.</p>
pub fn set_iops(mut self, input: std::option::Option<i32>) -> Self {
self.iops = input;
self
}
/// <p>The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.</p>
pub fn size_in_gb(mut self, input: i32) -> Self {
self.size_in_gb = Some(input);
self
}
/// <p>The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.</p>
pub fn set_size_in_gb(mut self, input: std::option::Option<i32>) -> Self {
self.size_in_gb = input;
self
}
/// Consumes the builder and constructs a [`VolumeSpecification`](crate::model::VolumeSpecification)
pub fn build(self) -> crate::model::VolumeSpecification {
crate::model::VolumeSpecification {
volume_type: self.volume_type,
iops: self.iops,
size_in_gb: self.size_in_gb,
}
}
}
}
impl VolumeSpecification {
/// Creates a new builder-style object to manufacture [`VolumeSpecification`](crate::model::VolumeSpecification)
pub fn builder() -> crate::model::volume_specification::Builder {
crate::model::volume_specification::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceFleetType {
#[allow(missing_docs)] // documentation missing in model
Core,
#[allow(missing_docs)] // documentation missing in model
Master,
#[allow(missing_docs)] // documentation missing in model
Task,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceFleetType {
fn from(s: &str) -> Self {
match s {
"CORE" => InstanceFleetType::Core,
"MASTER" => InstanceFleetType::Master,
"TASK" => InstanceFleetType::Task,
other => InstanceFleetType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceFleetType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceFleetType::from(s))
}
}
impl InstanceFleetType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceFleetType::Core => "CORE",
InstanceFleetType::Master => "MASTER",
InstanceFleetType::Task => "TASK",
InstanceFleetType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CORE", "MASTER", "TASK"]
}
}
impl AsRef<str> for InstanceFleetType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Configuration defining a new instance group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroupConfig {
/// <p>Friendly name given to the instance group.</p>
pub name: std::option::Option<std::string::String>,
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub market: std::option::Option<crate::model::MarketType>,
/// <p>The role of the instance group in the cluster.</p>
pub instance_role: std::option::Option<crate::model::InstanceRoleType>,
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub bid_price: std::option::Option<std::string::String>,
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub instance_type: std::option::Option<std::string::String>,
/// <p>Target number of instances for the instance group.</p>
pub instance_count: std::option::Option<i32>,
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>EBS configurations that will be attached to each EC2 instance in the instance group.</p>
pub ebs_configuration: std::option::Option<crate::model::EbsConfiguration>,
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See <code>PutAutoScalingPolicy</code>.</p>
pub auto_scaling_policy: std::option::Option<crate::model::AutoScalingPolicy>,
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub custom_ami_id: std::option::Option<std::string::String>,
}
impl InstanceGroupConfig {
/// <p>Friendly name given to the instance group.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub fn market(&self) -> std::option::Option<&crate::model::MarketType> {
self.market.as_ref()
}
/// <p>The role of the instance group in the cluster.</p>
pub fn instance_role(&self) -> std::option::Option<&crate::model::InstanceRoleType> {
self.instance_role.as_ref()
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn bid_price(&self) -> std::option::Option<&str> {
self.bid_price.as_deref()
}
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>Target number of instances for the instance group.</p>
pub fn instance_count(&self) -> std::option::Option<i32> {
self.instance_count
}
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>EBS configurations that will be attached to each EC2 instance in the instance group.</p>
pub fn ebs_configuration(&self) -> std::option::Option<&crate::model::EbsConfiguration> {
self.ebs_configuration.as_ref()
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See <code>PutAutoScalingPolicy</code>.</p>
pub fn auto_scaling_policy(&self) -> std::option::Option<&crate::model::AutoScalingPolicy> {
self.auto_scaling_policy.as_ref()
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn custom_ami_id(&self) -> std::option::Option<&str> {
self.custom_ami_id.as_deref()
}
}
impl std::fmt::Debug for InstanceGroupConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroupConfig");
formatter.field("name", &self.name);
formatter.field("market", &self.market);
formatter.field("instance_role", &self.instance_role);
formatter.field("bid_price", &self.bid_price);
formatter.field("instance_type", &self.instance_type);
formatter.field("instance_count", &self.instance_count);
formatter.field("configurations", &self.configurations);
formatter.field("ebs_configuration", &self.ebs_configuration);
formatter.field("auto_scaling_policy", &self.auto_scaling_policy);
formatter.field("custom_ami_id", &self.custom_ami_id);
formatter.finish()
}
}
/// See [`InstanceGroupConfig`](crate::model::InstanceGroupConfig)
pub mod instance_group_config {
/// A builder for [`InstanceGroupConfig`](crate::model::InstanceGroupConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) market: std::option::Option<crate::model::MarketType>,
pub(crate) instance_role: std::option::Option<crate::model::InstanceRoleType>,
pub(crate) bid_price: std::option::Option<std::string::String>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) instance_count: std::option::Option<i32>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) ebs_configuration: std::option::Option<crate::model::EbsConfiguration>,
pub(crate) auto_scaling_policy: std::option::Option<crate::model::AutoScalingPolicy>,
pub(crate) custom_ami_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Friendly name given to the instance group.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>Friendly name given to the instance group.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub fn market(mut self, input: crate::model::MarketType) -> Self {
self.market = Some(input);
self
}
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub fn set_market(mut self, input: std::option::Option<crate::model::MarketType>) -> Self {
self.market = input;
self
}
/// <p>The role of the instance group in the cluster.</p>
pub fn instance_role(mut self, input: crate::model::InstanceRoleType) -> Self {
self.instance_role = Some(input);
self
}
/// <p>The role of the instance group in the cluster.</p>
pub fn set_instance_role(
mut self,
input: std::option::Option<crate::model::InstanceRoleType>,
) -> Self {
self.instance_role = input;
self
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn bid_price(mut self, input: impl Into<std::string::String>) -> Self {
self.bid_price = Some(input.into());
self
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn set_bid_price(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bid_price = input;
self
}
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// <p>Target number of instances for the instance group.</p>
pub fn instance_count(mut self, input: i32) -> Self {
self.instance_count = Some(input);
self
}
/// <p>Target number of instances for the instance group.</p>
pub fn set_instance_count(mut self, input: std::option::Option<i32>) -> Self {
self.instance_count = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// <p>EBS configurations that will be attached to each EC2 instance in the instance group.</p>
pub fn ebs_configuration(mut self, input: crate::model::EbsConfiguration) -> Self {
self.ebs_configuration = Some(input);
self
}
/// <p>EBS configurations that will be attached to each EC2 instance in the instance group.</p>
pub fn set_ebs_configuration(
mut self,
input: std::option::Option<crate::model::EbsConfiguration>,
) -> Self {
self.ebs_configuration = input;
self
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See <code>PutAutoScalingPolicy</code>.</p>
pub fn auto_scaling_policy(mut self, input: crate::model::AutoScalingPolicy) -> Self {
self.auto_scaling_policy = Some(input);
self
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See <code>PutAutoScalingPolicy</code>.</p>
pub fn set_auto_scaling_policy(
mut self,
input: std::option::Option<crate::model::AutoScalingPolicy>,
) -> Self {
self.auto_scaling_policy = input;
self
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn custom_ami_id(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_ami_id = Some(input.into());
self
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn set_custom_ami_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_ami_id = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroupConfig`](crate::model::InstanceGroupConfig)
pub fn build(self) -> crate::model::InstanceGroupConfig {
crate::model::InstanceGroupConfig {
name: self.name,
market: self.market,
instance_role: self.instance_role,
bid_price: self.bid_price,
instance_type: self.instance_type,
instance_count: self.instance_count,
configurations: self.configurations,
ebs_configuration: self.ebs_configuration,
auto_scaling_policy: self.auto_scaling_policy,
custom_ami_id: self.custom_ami_id,
}
}
}
}
impl InstanceGroupConfig {
/// Creates a new builder-style object to manufacture [`InstanceGroupConfig`](crate::model::InstanceGroupConfig)
pub fn builder() -> crate::model::instance_group_config::Builder {
crate::model::instance_group_config::Builder::default()
}
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. An automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See <code>PutAutoScalingPolicy</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AutoScalingPolicy {
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub constraints: std::option::Option<crate::model::ScalingConstraints>,
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub rules: std::option::Option<std::vec::Vec<crate::model::ScalingRule>>,
}
impl AutoScalingPolicy {
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub fn constraints(&self) -> std::option::Option<&crate::model::ScalingConstraints> {
self.constraints.as_ref()
}
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub fn rules(&self) -> std::option::Option<&[crate::model::ScalingRule]> {
self.rules.as_deref()
}
}
impl std::fmt::Debug for AutoScalingPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AutoScalingPolicy");
formatter.field("constraints", &self.constraints);
formatter.field("rules", &self.rules);
formatter.finish()
}
}
/// See [`AutoScalingPolicy`](crate::model::AutoScalingPolicy)
pub mod auto_scaling_policy {
/// A builder for [`AutoScalingPolicy`](crate::model::AutoScalingPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) constraints: std::option::Option<crate::model::ScalingConstraints>,
pub(crate) rules: std::option::Option<std::vec::Vec<crate::model::ScalingRule>>,
}
impl Builder {
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub fn constraints(mut self, input: crate::model::ScalingConstraints) -> Self {
self.constraints = Some(input);
self
}
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub fn set_constraints(
mut self,
input: std::option::Option<crate::model::ScalingConstraints>,
) -> Self {
self.constraints = input;
self
}
/// Appends an item to `rules`.
///
/// To override the contents of this collection use [`set_rules`](Self::set_rules).
///
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub fn rules(mut self, input: crate::model::ScalingRule) -> Self {
let mut v = self.rules.unwrap_or_default();
v.push(input);
self.rules = Some(v);
self
}
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub fn set_rules(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ScalingRule>>,
) -> Self {
self.rules = input;
self
}
/// Consumes the builder and constructs a [`AutoScalingPolicy`](crate::model::AutoScalingPolicy)
pub fn build(self) -> crate::model::AutoScalingPolicy {
crate::model::AutoScalingPolicy {
constraints: self.constraints,
rules: self.rules,
}
}
}
}
impl AutoScalingPolicy {
/// Creates a new builder-style object to manufacture [`AutoScalingPolicy`](crate::model::AutoScalingPolicy)
pub fn builder() -> crate::model::auto_scaling_policy::Builder {
crate::model::auto_scaling_policy::Builder::default()
}
}
/// <p>A scale-in or scale-out rule that defines scaling activity, including the CloudWatch metric alarm that triggers activity, how EC2 instances are added or removed, and the periodicity of adjustments. The automatic scaling policy for an instance group can comprise one or more automatic scaling rules.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ScalingRule {
/// <p>The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.</p>
pub name: std::option::Option<std::string::String>,
/// <p>A friendly, more verbose description of the automatic scaling rule.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The conditions that trigger an automatic scaling activity.</p>
pub action: std::option::Option<crate::model::ScalingAction>,
/// <p>The CloudWatch alarm definition that determines when automatic scaling activity is triggered.</p>
pub trigger: std::option::Option<crate::model::ScalingTrigger>,
}
impl ScalingRule {
/// <p>The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>A friendly, more verbose description of the automatic scaling rule.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>The conditions that trigger an automatic scaling activity.</p>
pub fn action(&self) -> std::option::Option<&crate::model::ScalingAction> {
self.action.as_ref()
}
/// <p>The CloudWatch alarm definition that determines when automatic scaling activity is triggered.</p>
pub fn trigger(&self) -> std::option::Option<&crate::model::ScalingTrigger> {
self.trigger.as_ref()
}
}
impl std::fmt::Debug for ScalingRule {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ScalingRule");
formatter.field("name", &self.name);
formatter.field("description", &self.description);
formatter.field("action", &self.action);
formatter.field("trigger", &self.trigger);
formatter.finish()
}
}
/// See [`ScalingRule`](crate::model::ScalingRule)
pub mod scaling_rule {
/// A builder for [`ScalingRule`](crate::model::ScalingRule)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) action: std::option::Option<crate::model::ScalingAction>,
pub(crate) trigger: std::option::Option<crate::model::ScalingTrigger>,
}
impl Builder {
/// <p>The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>A friendly, more verbose description of the automatic scaling rule.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>A friendly, more verbose description of the automatic scaling rule.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The conditions that trigger an automatic scaling activity.</p>
pub fn action(mut self, input: crate::model::ScalingAction) -> Self {
self.action = Some(input);
self
}
/// <p>The conditions that trigger an automatic scaling activity.</p>
pub fn set_action(
mut self,
input: std::option::Option<crate::model::ScalingAction>,
) -> Self {
self.action = input;
self
}
/// <p>The CloudWatch alarm definition that determines when automatic scaling activity is triggered.</p>
pub fn trigger(mut self, input: crate::model::ScalingTrigger) -> Self {
self.trigger = Some(input);
self
}
/// <p>The CloudWatch alarm definition that determines when automatic scaling activity is triggered.</p>
pub fn set_trigger(
mut self,
input: std::option::Option<crate::model::ScalingTrigger>,
) -> Self {
self.trigger = input;
self
}
/// Consumes the builder and constructs a [`ScalingRule`](crate::model::ScalingRule)
pub fn build(self) -> crate::model::ScalingRule {
crate::model::ScalingRule {
name: self.name,
description: self.description,
action: self.action,
trigger: self.trigger,
}
}
}
}
impl ScalingRule {
/// Creates a new builder-style object to manufacture [`ScalingRule`](crate::model::ScalingRule)
pub fn builder() -> crate::model::scaling_rule::Builder {
crate::model::scaling_rule::Builder::default()
}
}
/// <p>The conditions that trigger an automatic scaling activity.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ScalingTrigger {
/// <p>The definition of a CloudWatch metric alarm. When the defined alarm conditions are met along with other trigger parameters, scaling activity begins.</p>
pub cloud_watch_alarm_definition: std::option::Option<crate::model::CloudWatchAlarmDefinition>,
}
impl ScalingTrigger {
/// <p>The definition of a CloudWatch metric alarm. When the defined alarm conditions are met along with other trigger parameters, scaling activity begins.</p>
pub fn cloud_watch_alarm_definition(
&self,
) -> std::option::Option<&crate::model::CloudWatchAlarmDefinition> {
self.cloud_watch_alarm_definition.as_ref()
}
}
impl std::fmt::Debug for ScalingTrigger {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ScalingTrigger");
formatter.field(
"cloud_watch_alarm_definition",
&self.cloud_watch_alarm_definition,
);
formatter.finish()
}
}
/// See [`ScalingTrigger`](crate::model::ScalingTrigger)
pub mod scaling_trigger {
/// A builder for [`ScalingTrigger`](crate::model::ScalingTrigger)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cloud_watch_alarm_definition:
std::option::Option<crate::model::CloudWatchAlarmDefinition>,
}
impl Builder {
/// <p>The definition of a CloudWatch metric alarm. When the defined alarm conditions are met along with other trigger parameters, scaling activity begins.</p>
pub fn cloud_watch_alarm_definition(
mut self,
input: crate::model::CloudWatchAlarmDefinition,
) -> Self {
self.cloud_watch_alarm_definition = Some(input);
self
}
/// <p>The definition of a CloudWatch metric alarm. When the defined alarm conditions are met along with other trigger parameters, scaling activity begins.</p>
pub fn set_cloud_watch_alarm_definition(
mut self,
input: std::option::Option<crate::model::CloudWatchAlarmDefinition>,
) -> Self {
self.cloud_watch_alarm_definition = input;
self
}
/// Consumes the builder and constructs a [`ScalingTrigger`](crate::model::ScalingTrigger)
pub fn build(self) -> crate::model::ScalingTrigger {
crate::model::ScalingTrigger {
cloud_watch_alarm_definition: self.cloud_watch_alarm_definition,
}
}
}
}
impl ScalingTrigger {
/// Creates a new builder-style object to manufacture [`ScalingTrigger`](crate::model::ScalingTrigger)
pub fn builder() -> crate::model::scaling_trigger::Builder {
crate::model::scaling_trigger::Builder::default()
}
}
/// <p>The definition of a CloudWatch metric alarm, which determines when an automatic scaling activity is triggered. When the defined alarm conditions are satisfied, scaling activity begins.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CloudWatchAlarmDefinition {
/// <p>Determines how the metric specified by <code>MetricName</code> is compared to the value specified by <code>Threshold</code>.</p>
pub comparison_operator: std::option::Option<crate::model::ComparisonOperator>,
/// <p>The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is <code>1</code>.</p>
pub evaluation_periods: std::option::Option<i32>,
/// <p>The name of the CloudWatch metric that is watched to determine an alarm condition.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The namespace for the CloudWatch metric. The default is <code>AWS/ElasticMapReduce</code>.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify <code>300</code>.</p>
pub period: std::option::Option<i32>,
/// <p>The statistic to apply to the metric associated with the alarm. The default is <code>AVERAGE</code>.</p>
pub statistic: std::option::Option<crate::model::Statistic>,
/// <p>The value against which the specified statistic is compared.</p>
pub threshold: std::option::Option<f64>,
/// <p>The unit of measure associated with the CloudWatch metric being watched. The value specified for <code>Unit</code> must correspond to the units specified in the CloudWatch metric.</p>
pub unit: std::option::Option<crate::model::Unit>,
/// <p>A CloudWatch metric dimension.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::MetricDimension>>,
}
impl CloudWatchAlarmDefinition {
/// <p>Determines how the metric specified by <code>MetricName</code> is compared to the value specified by <code>Threshold</code>.</p>
pub fn comparison_operator(&self) -> std::option::Option<&crate::model::ComparisonOperator> {
self.comparison_operator.as_ref()
}
/// <p>The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is <code>1</code>.</p>
pub fn evaluation_periods(&self) -> std::option::Option<i32> {
self.evaluation_periods
}
/// <p>The name of the CloudWatch metric that is watched to determine an alarm condition.</p>
pub fn metric_name(&self) -> std::option::Option<&str> {
self.metric_name.as_deref()
}
/// <p>The namespace for the CloudWatch metric. The default is <code>AWS/ElasticMapReduce</code>.</p>
pub fn namespace(&self) -> std::option::Option<&str> {
self.namespace.as_deref()
}
/// <p>The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify <code>300</code>.</p>
pub fn period(&self) -> std::option::Option<i32> {
self.period
}
/// <p>The statistic to apply to the metric associated with the alarm. The default is <code>AVERAGE</code>.</p>
pub fn statistic(&self) -> std::option::Option<&crate::model::Statistic> {
self.statistic.as_ref()
}
/// <p>The value against which the specified statistic is compared.</p>
pub fn threshold(&self) -> std::option::Option<f64> {
self.threshold
}
/// <p>The unit of measure associated with the CloudWatch metric being watched. The value specified for <code>Unit</code> must correspond to the units specified in the CloudWatch metric.</p>
pub fn unit(&self) -> std::option::Option<&crate::model::Unit> {
self.unit.as_ref()
}
/// <p>A CloudWatch metric dimension.</p>
pub fn dimensions(&self) -> std::option::Option<&[crate::model::MetricDimension]> {
self.dimensions.as_deref()
}
}
impl std::fmt::Debug for CloudWatchAlarmDefinition {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CloudWatchAlarmDefinition");
formatter.field("comparison_operator", &self.comparison_operator);
formatter.field("evaluation_periods", &self.evaluation_periods);
formatter.field("metric_name", &self.metric_name);
formatter.field("namespace", &self.namespace);
formatter.field("period", &self.period);
formatter.field("statistic", &self.statistic);
formatter.field("threshold", &self.threshold);
formatter.field("unit", &self.unit);
formatter.field("dimensions", &self.dimensions);
formatter.finish()
}
}
/// See [`CloudWatchAlarmDefinition`](crate::model::CloudWatchAlarmDefinition)
pub mod cloud_watch_alarm_definition {
/// A builder for [`CloudWatchAlarmDefinition`](crate::model::CloudWatchAlarmDefinition)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) comparison_operator: std::option::Option<crate::model::ComparisonOperator>,
pub(crate) evaluation_periods: std::option::Option<i32>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) period: std::option::Option<i32>,
pub(crate) statistic: std::option::Option<crate::model::Statistic>,
pub(crate) threshold: std::option::Option<f64>,
pub(crate) unit: std::option::Option<crate::model::Unit>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::MetricDimension>>,
}
impl Builder {
/// <p>Determines how the metric specified by <code>MetricName</code> is compared to the value specified by <code>Threshold</code>.</p>
pub fn comparison_operator(mut self, input: crate::model::ComparisonOperator) -> Self {
self.comparison_operator = Some(input);
self
}
/// <p>Determines how the metric specified by <code>MetricName</code> is compared to the value specified by <code>Threshold</code>.</p>
pub fn set_comparison_operator(
mut self,
input: std::option::Option<crate::model::ComparisonOperator>,
) -> Self {
self.comparison_operator = input;
self
}
/// <p>The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is <code>1</code>.</p>
pub fn evaluation_periods(mut self, input: i32) -> Self {
self.evaluation_periods = Some(input);
self
}
/// <p>The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is <code>1</code>.</p>
pub fn set_evaluation_periods(mut self, input: std::option::Option<i32>) -> Self {
self.evaluation_periods = input;
self
}
/// <p>The name of the CloudWatch metric that is watched to determine an alarm condition.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
/// <p>The name of the CloudWatch metric that is watched to determine an alarm condition.</p>
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
/// <p>The namespace for the CloudWatch metric. The default is <code>AWS/ElasticMapReduce</code>.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
/// <p>The namespace for the CloudWatch metric. The default is <code>AWS/ElasticMapReduce</code>.</p>
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify <code>300</code>.</p>
pub fn period(mut self, input: i32) -> Self {
self.period = Some(input);
self
}
/// <p>The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify <code>300</code>.</p>
pub fn set_period(mut self, input: std::option::Option<i32>) -> Self {
self.period = input;
self
}
/// <p>The statistic to apply to the metric associated with the alarm. The default is <code>AVERAGE</code>.</p>
pub fn statistic(mut self, input: crate::model::Statistic) -> Self {
self.statistic = Some(input);
self
}
/// <p>The statistic to apply to the metric associated with the alarm. The default is <code>AVERAGE</code>.</p>
pub fn set_statistic(
mut self,
input: std::option::Option<crate::model::Statistic>,
) -> Self {
self.statistic = input;
self
}
/// <p>The value against which the specified statistic is compared.</p>
pub fn threshold(mut self, input: f64) -> Self {
self.threshold = Some(input);
self
}
/// <p>The value against which the specified statistic is compared.</p>
pub fn set_threshold(mut self, input: std::option::Option<f64>) -> Self {
self.threshold = input;
self
}
/// <p>The unit of measure associated with the CloudWatch metric being watched. The value specified for <code>Unit</code> must correspond to the units specified in the CloudWatch metric.</p>
pub fn unit(mut self, input: crate::model::Unit) -> Self {
self.unit = Some(input);
self
}
/// <p>The unit of measure associated with the CloudWatch metric being watched. The value specified for <code>Unit</code> must correspond to the units specified in the CloudWatch metric.</p>
pub fn set_unit(mut self, input: std::option::Option<crate::model::Unit>) -> Self {
self.unit = input;
self
}
/// Appends an item to `dimensions`.
///
/// To override the contents of this collection use [`set_dimensions`](Self::set_dimensions).
///
/// <p>A CloudWatch metric dimension.</p>
pub fn dimensions(mut self, input: crate::model::MetricDimension) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input);
self.dimensions = Some(v);
self
}
/// <p>A CloudWatch metric dimension.</p>
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricDimension>>,
) -> Self {
self.dimensions = input;
self
}
/// Consumes the builder and constructs a [`CloudWatchAlarmDefinition`](crate::model::CloudWatchAlarmDefinition)
pub fn build(self) -> crate::model::CloudWatchAlarmDefinition {
crate::model::CloudWatchAlarmDefinition {
comparison_operator: self.comparison_operator,
evaluation_periods: self.evaluation_periods,
metric_name: self.metric_name,
namespace: self.namespace,
period: self.period,
statistic: self.statistic,
threshold: self.threshold,
unit: self.unit,
dimensions: self.dimensions,
}
}
}
}
impl CloudWatchAlarmDefinition {
/// Creates a new builder-style object to manufacture [`CloudWatchAlarmDefinition`](crate::model::CloudWatchAlarmDefinition)
pub fn builder() -> crate::model::cloud_watch_alarm_definition::Builder {
crate::model::cloud_watch_alarm_definition::Builder::default()
}
}
/// <p>A CloudWatch dimension, which is specified using a <code>Key</code> (known as a <code>Name</code> in CloudWatch), <code>Value</code> pair. By default, Amazon EMR uses one dimension whose <code>Key</code> is <code>JobFlowID</code> and <code>Value</code> is a variable representing the cluster ID, which is <code>${emr.clusterId}</code>. This enables the rule to bootstrap when the cluster ID becomes available.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct MetricDimension {
/// <p>The dimension name.</p>
pub key: std::option::Option<std::string::String>,
/// <p>The dimension value.</p>
pub value: std::option::Option<std::string::String>,
}
impl MetricDimension {
/// <p>The dimension name.</p>
pub fn key(&self) -> std::option::Option<&str> {
self.key.as_deref()
}
/// <p>The dimension value.</p>
pub fn value(&self) -> std::option::Option<&str> {
self.value.as_deref()
}
}
impl std::fmt::Debug for MetricDimension {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("MetricDimension");
formatter.field("key", &self.key);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`MetricDimension`](crate::model::MetricDimension)
pub mod metric_dimension {
/// A builder for [`MetricDimension`](crate::model::MetricDimension)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The dimension name.</p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
/// <p>The dimension name.</p>
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>The dimension value.</p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
/// <p>The dimension value.</p>
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`MetricDimension`](crate::model::MetricDimension)
pub fn build(self) -> crate::model::MetricDimension {
crate::model::MetricDimension {
key: self.key,
value: self.value,
}
}
}
}
impl MetricDimension {
/// Creates a new builder-style object to manufacture [`MetricDimension`](crate::model::MetricDimension)
pub fn builder() -> crate::model::metric_dimension::Builder {
crate::model::metric_dimension::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum Unit {
#[allow(missing_docs)] // documentation missing in model
Bits,
#[allow(missing_docs)] // documentation missing in model
BitsPerSecond,
#[allow(missing_docs)] // documentation missing in model
Bytes,
#[allow(missing_docs)] // documentation missing in model
BytesPerSecond,
#[allow(missing_docs)] // documentation missing in model
Count,
#[allow(missing_docs)] // documentation missing in model
CountPerSecond,
#[allow(missing_docs)] // documentation missing in model
GigaBits,
#[allow(missing_docs)] // documentation missing in model
GigaBitsPerSecond,
#[allow(missing_docs)] // documentation missing in model
GigaBytes,
#[allow(missing_docs)] // documentation missing in model
GigaBytesPerSecond,
#[allow(missing_docs)] // documentation missing in model
KiloBits,
#[allow(missing_docs)] // documentation missing in model
KiloBitsPerSecond,
#[allow(missing_docs)] // documentation missing in model
KiloBytes,
#[allow(missing_docs)] // documentation missing in model
KiloBytesPerSecond,
#[allow(missing_docs)] // documentation missing in model
MegaBits,
#[allow(missing_docs)] // documentation missing in model
MegaBitsPerSecond,
#[allow(missing_docs)] // documentation missing in model
MegaBytes,
#[allow(missing_docs)] // documentation missing in model
MegaBytesPerSecond,
#[allow(missing_docs)] // documentation missing in model
MicroSeconds,
#[allow(missing_docs)] // documentation missing in model
MilliSeconds,
#[allow(missing_docs)] // documentation missing in model
None,
#[allow(missing_docs)] // documentation missing in model
Percent,
#[allow(missing_docs)] // documentation missing in model
Seconds,
#[allow(missing_docs)] // documentation missing in model
TeraBits,
#[allow(missing_docs)] // documentation missing in model
TeraBitsPerSecond,
#[allow(missing_docs)] // documentation missing in model
TeraBytes,
#[allow(missing_docs)] // documentation missing in model
TeraBytesPerSecond,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for Unit {
fn from(s: &str) -> Self {
match s {
"BITS" => Unit::Bits,
"BITS_PER_SECOND" => Unit::BitsPerSecond,
"BYTES" => Unit::Bytes,
"BYTES_PER_SECOND" => Unit::BytesPerSecond,
"COUNT" => Unit::Count,
"COUNT_PER_SECOND" => Unit::CountPerSecond,
"GIGA_BITS" => Unit::GigaBits,
"GIGA_BITS_PER_SECOND" => Unit::GigaBitsPerSecond,
"GIGA_BYTES" => Unit::GigaBytes,
"GIGA_BYTES_PER_SECOND" => Unit::GigaBytesPerSecond,
"KILO_BITS" => Unit::KiloBits,
"KILO_BITS_PER_SECOND" => Unit::KiloBitsPerSecond,
"KILO_BYTES" => Unit::KiloBytes,
"KILO_BYTES_PER_SECOND" => Unit::KiloBytesPerSecond,
"MEGA_BITS" => Unit::MegaBits,
"MEGA_BITS_PER_SECOND" => Unit::MegaBitsPerSecond,
"MEGA_BYTES" => Unit::MegaBytes,
"MEGA_BYTES_PER_SECOND" => Unit::MegaBytesPerSecond,
"MICRO_SECONDS" => Unit::MicroSeconds,
"MILLI_SECONDS" => Unit::MilliSeconds,
"NONE" => Unit::None,
"PERCENT" => Unit::Percent,
"SECONDS" => Unit::Seconds,
"TERA_BITS" => Unit::TeraBits,
"TERA_BITS_PER_SECOND" => Unit::TeraBitsPerSecond,
"TERA_BYTES" => Unit::TeraBytes,
"TERA_BYTES_PER_SECOND" => Unit::TeraBytesPerSecond,
other => Unit::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for Unit {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(Unit::from(s))
}
}
impl Unit {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
Unit::Bits => "BITS",
Unit::BitsPerSecond => "BITS_PER_SECOND",
Unit::Bytes => "BYTES",
Unit::BytesPerSecond => "BYTES_PER_SECOND",
Unit::Count => "COUNT",
Unit::CountPerSecond => "COUNT_PER_SECOND",
Unit::GigaBits => "GIGA_BITS",
Unit::GigaBitsPerSecond => "GIGA_BITS_PER_SECOND",
Unit::GigaBytes => "GIGA_BYTES",
Unit::GigaBytesPerSecond => "GIGA_BYTES_PER_SECOND",
Unit::KiloBits => "KILO_BITS",
Unit::KiloBitsPerSecond => "KILO_BITS_PER_SECOND",
Unit::KiloBytes => "KILO_BYTES",
Unit::KiloBytesPerSecond => "KILO_BYTES_PER_SECOND",
Unit::MegaBits => "MEGA_BITS",
Unit::MegaBitsPerSecond => "MEGA_BITS_PER_SECOND",
Unit::MegaBytes => "MEGA_BYTES",
Unit::MegaBytesPerSecond => "MEGA_BYTES_PER_SECOND",
Unit::MicroSeconds => "MICRO_SECONDS",
Unit::MilliSeconds => "MILLI_SECONDS",
Unit::None => "NONE",
Unit::Percent => "PERCENT",
Unit::Seconds => "SECONDS",
Unit::TeraBits => "TERA_BITS",
Unit::TeraBitsPerSecond => "TERA_BITS_PER_SECOND",
Unit::TeraBytes => "TERA_BYTES",
Unit::TeraBytesPerSecond => "TERA_BYTES_PER_SECOND",
Unit::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"BITS",
"BITS_PER_SECOND",
"BYTES",
"BYTES_PER_SECOND",
"COUNT",
"COUNT_PER_SECOND",
"GIGA_BITS",
"GIGA_BITS_PER_SECOND",
"GIGA_BYTES",
"GIGA_BYTES_PER_SECOND",
"KILO_BITS",
"KILO_BITS_PER_SECOND",
"KILO_BYTES",
"KILO_BYTES_PER_SECOND",
"MEGA_BITS",
"MEGA_BITS_PER_SECOND",
"MEGA_BYTES",
"MEGA_BYTES_PER_SECOND",
"MICRO_SECONDS",
"MILLI_SECONDS",
"NONE",
"PERCENT",
"SECONDS",
"TERA_BITS",
"TERA_BITS_PER_SECOND",
"TERA_BYTES",
"TERA_BYTES_PER_SECOND",
]
}
}
impl AsRef<str> for Unit {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum Statistic {
#[allow(missing_docs)] // documentation missing in model
Average,
#[allow(missing_docs)] // documentation missing in model
Maximum,
#[allow(missing_docs)] // documentation missing in model
Minimum,
#[allow(missing_docs)] // documentation missing in model
SampleCount,
#[allow(missing_docs)] // documentation missing in model
Sum,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for Statistic {
fn from(s: &str) -> Self {
match s {
"AVERAGE" => Statistic::Average,
"MAXIMUM" => Statistic::Maximum,
"MINIMUM" => Statistic::Minimum,
"SAMPLE_COUNT" => Statistic::SampleCount,
"SUM" => Statistic::Sum,
other => Statistic::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for Statistic {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(Statistic::from(s))
}
}
impl Statistic {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
Statistic::Average => "AVERAGE",
Statistic::Maximum => "MAXIMUM",
Statistic::Minimum => "MINIMUM",
Statistic::SampleCount => "SAMPLE_COUNT",
Statistic::Sum => "SUM",
Statistic::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["AVERAGE", "MAXIMUM", "MINIMUM", "SAMPLE_COUNT", "SUM"]
}
}
impl AsRef<str> for Statistic {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ComparisonOperator {
#[allow(missing_docs)] // documentation missing in model
GreaterThan,
#[allow(missing_docs)] // documentation missing in model
GreaterThanOrEqual,
#[allow(missing_docs)] // documentation missing in model
LessThan,
#[allow(missing_docs)] // documentation missing in model
LessThanOrEqual,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ComparisonOperator {
fn from(s: &str) -> Self {
match s {
"GREATER_THAN" => ComparisonOperator::GreaterThan,
"GREATER_THAN_OR_EQUAL" => ComparisonOperator::GreaterThanOrEqual,
"LESS_THAN" => ComparisonOperator::LessThan,
"LESS_THAN_OR_EQUAL" => ComparisonOperator::LessThanOrEqual,
other => ComparisonOperator::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ComparisonOperator {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ComparisonOperator::from(s))
}
}
impl ComparisonOperator {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ComparisonOperator::GreaterThan => "GREATER_THAN",
ComparisonOperator::GreaterThanOrEqual => "GREATER_THAN_OR_EQUAL",
ComparisonOperator::LessThan => "LESS_THAN",
ComparisonOperator::LessThanOrEqual => "LESS_THAN_OR_EQUAL",
ComparisonOperator::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"GREATER_THAN",
"GREATER_THAN_OR_EQUAL",
"LESS_THAN",
"LESS_THAN_OR_EQUAL",
]
}
}
impl AsRef<str> for ComparisonOperator {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ScalingAction {
/// <p>Not available for instance groups. Instance groups use the market type specified for the group.</p>
pub market: std::option::Option<crate::model::MarketType>,
/// <p>The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.</p>
pub simple_scaling_policy_configuration:
std::option::Option<crate::model::SimpleScalingPolicyConfiguration>,
}
impl ScalingAction {
/// <p>Not available for instance groups. Instance groups use the market type specified for the group.</p>
pub fn market(&self) -> std::option::Option<&crate::model::MarketType> {
self.market.as_ref()
}
/// <p>The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.</p>
pub fn simple_scaling_policy_configuration(
&self,
) -> std::option::Option<&crate::model::SimpleScalingPolicyConfiguration> {
self.simple_scaling_policy_configuration.as_ref()
}
}
impl std::fmt::Debug for ScalingAction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ScalingAction");
formatter.field("market", &self.market);
formatter.field(
"simple_scaling_policy_configuration",
&self.simple_scaling_policy_configuration,
);
formatter.finish()
}
}
/// See [`ScalingAction`](crate::model::ScalingAction)
pub mod scaling_action {
/// A builder for [`ScalingAction`](crate::model::ScalingAction)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) market: std::option::Option<crate::model::MarketType>,
pub(crate) simple_scaling_policy_configuration:
std::option::Option<crate::model::SimpleScalingPolicyConfiguration>,
}
impl Builder {
/// <p>Not available for instance groups. Instance groups use the market type specified for the group.</p>
pub fn market(mut self, input: crate::model::MarketType) -> Self {
self.market = Some(input);
self
}
/// <p>Not available for instance groups. Instance groups use the market type specified for the group.</p>
pub fn set_market(mut self, input: std::option::Option<crate::model::MarketType>) -> Self {
self.market = input;
self
}
/// <p>The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.</p>
pub fn simple_scaling_policy_configuration(
mut self,
input: crate::model::SimpleScalingPolicyConfiguration,
) -> Self {
self.simple_scaling_policy_configuration = Some(input);
self
}
/// <p>The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.</p>
pub fn set_simple_scaling_policy_configuration(
mut self,
input: std::option::Option<crate::model::SimpleScalingPolicyConfiguration>,
) -> Self {
self.simple_scaling_policy_configuration = input;
self
}
/// Consumes the builder and constructs a [`ScalingAction`](crate::model::ScalingAction)
pub fn build(self) -> crate::model::ScalingAction {
crate::model::ScalingAction {
market: self.market,
simple_scaling_policy_configuration: self.simple_scaling_policy_configuration,
}
}
}
}
impl ScalingAction {
/// Creates a new builder-style object to manufacture [`ScalingAction`](crate::model::ScalingAction)
pub fn builder() -> crate::model::scaling_action::Builder {
crate::model::scaling_action::Builder::default()
}
}
/// <p>An automatic scaling configuration, which describes how the policy adds or removes instances, the cooldown period, and the number of EC2 instances that will be added each time the CloudWatch metric alarm condition is satisfied.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SimpleScalingPolicyConfiguration {
/// <p>The way in which EC2 instances are added (if <code>ScalingAdjustment</code> is a positive number) or terminated (if <code>ScalingAdjustment</code> is a negative number) each time the scaling activity is triggered. <code>CHANGE_IN_CAPACITY</code> is the default. <code>CHANGE_IN_CAPACITY</code> indicates that the EC2 instance count increments or decrements by <code>ScalingAdjustment</code>, which should be expressed as an integer. <code>PERCENT_CHANGE_IN_CAPACITY</code> indicates the instance count increments or decrements by the percentage specified by <code>ScalingAdjustment</code>, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. <code>EXACT_CAPACITY</code> indicates the scaling activity results in an instance group with the number of EC2 instances specified by <code>ScalingAdjustment</code>, which should be expressed as a positive integer.</p>
pub adjustment_type: std::option::Option<crate::model::AdjustmentType>,
/// <p>The amount by which to scale in or scale out, based on the specified <code>AdjustmentType</code>. A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If <code>AdjustmentType</code> is set to <code>EXACT_CAPACITY</code>, the number should only be a positive integer. If <code>AdjustmentType</code> is set to <code>PERCENT_CHANGE_IN_CAPACITY</code>, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.</p>
pub scaling_adjustment: std::option::Option<i32>,
/// <p>The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.</p>
pub cool_down: std::option::Option<i32>,
}
impl SimpleScalingPolicyConfiguration {
/// <p>The way in which EC2 instances are added (if <code>ScalingAdjustment</code> is a positive number) or terminated (if <code>ScalingAdjustment</code> is a negative number) each time the scaling activity is triggered. <code>CHANGE_IN_CAPACITY</code> is the default. <code>CHANGE_IN_CAPACITY</code> indicates that the EC2 instance count increments or decrements by <code>ScalingAdjustment</code>, which should be expressed as an integer. <code>PERCENT_CHANGE_IN_CAPACITY</code> indicates the instance count increments or decrements by the percentage specified by <code>ScalingAdjustment</code>, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. <code>EXACT_CAPACITY</code> indicates the scaling activity results in an instance group with the number of EC2 instances specified by <code>ScalingAdjustment</code>, which should be expressed as a positive integer.</p>
pub fn adjustment_type(&self) -> std::option::Option<&crate::model::AdjustmentType> {
self.adjustment_type.as_ref()
}
/// <p>The amount by which to scale in or scale out, based on the specified <code>AdjustmentType</code>. A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If <code>AdjustmentType</code> is set to <code>EXACT_CAPACITY</code>, the number should only be a positive integer. If <code>AdjustmentType</code> is set to <code>PERCENT_CHANGE_IN_CAPACITY</code>, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.</p>
pub fn scaling_adjustment(&self) -> std::option::Option<i32> {
self.scaling_adjustment
}
/// <p>The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.</p>
pub fn cool_down(&self) -> std::option::Option<i32> {
self.cool_down
}
}
impl std::fmt::Debug for SimpleScalingPolicyConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SimpleScalingPolicyConfiguration");
formatter.field("adjustment_type", &self.adjustment_type);
formatter.field("scaling_adjustment", &self.scaling_adjustment);
formatter.field("cool_down", &self.cool_down);
formatter.finish()
}
}
/// See [`SimpleScalingPolicyConfiguration`](crate::model::SimpleScalingPolicyConfiguration)
pub mod simple_scaling_policy_configuration {
/// A builder for [`SimpleScalingPolicyConfiguration`](crate::model::SimpleScalingPolicyConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) adjustment_type: std::option::Option<crate::model::AdjustmentType>,
pub(crate) scaling_adjustment: std::option::Option<i32>,
pub(crate) cool_down: std::option::Option<i32>,
}
impl Builder {
/// <p>The way in which EC2 instances are added (if <code>ScalingAdjustment</code> is a positive number) or terminated (if <code>ScalingAdjustment</code> is a negative number) each time the scaling activity is triggered. <code>CHANGE_IN_CAPACITY</code> is the default. <code>CHANGE_IN_CAPACITY</code> indicates that the EC2 instance count increments or decrements by <code>ScalingAdjustment</code>, which should be expressed as an integer. <code>PERCENT_CHANGE_IN_CAPACITY</code> indicates the instance count increments or decrements by the percentage specified by <code>ScalingAdjustment</code>, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. <code>EXACT_CAPACITY</code> indicates the scaling activity results in an instance group with the number of EC2 instances specified by <code>ScalingAdjustment</code>, which should be expressed as a positive integer.</p>
pub fn adjustment_type(mut self, input: crate::model::AdjustmentType) -> Self {
self.adjustment_type = Some(input);
self
}
/// <p>The way in which EC2 instances are added (if <code>ScalingAdjustment</code> is a positive number) or terminated (if <code>ScalingAdjustment</code> is a negative number) each time the scaling activity is triggered. <code>CHANGE_IN_CAPACITY</code> is the default. <code>CHANGE_IN_CAPACITY</code> indicates that the EC2 instance count increments or decrements by <code>ScalingAdjustment</code>, which should be expressed as an integer. <code>PERCENT_CHANGE_IN_CAPACITY</code> indicates the instance count increments or decrements by the percentage specified by <code>ScalingAdjustment</code>, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. <code>EXACT_CAPACITY</code> indicates the scaling activity results in an instance group with the number of EC2 instances specified by <code>ScalingAdjustment</code>, which should be expressed as a positive integer.</p>
pub fn set_adjustment_type(
mut self,
input: std::option::Option<crate::model::AdjustmentType>,
) -> Self {
self.adjustment_type = input;
self
}
/// <p>The amount by which to scale in or scale out, based on the specified <code>AdjustmentType</code>. A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If <code>AdjustmentType</code> is set to <code>EXACT_CAPACITY</code>, the number should only be a positive integer. If <code>AdjustmentType</code> is set to <code>PERCENT_CHANGE_IN_CAPACITY</code>, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.</p>
pub fn scaling_adjustment(mut self, input: i32) -> Self {
self.scaling_adjustment = Some(input);
self
}
/// <p>The amount by which to scale in or scale out, based on the specified <code>AdjustmentType</code>. A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If <code>AdjustmentType</code> is set to <code>EXACT_CAPACITY</code>, the number should only be a positive integer. If <code>AdjustmentType</code> is set to <code>PERCENT_CHANGE_IN_CAPACITY</code>, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.</p>
pub fn set_scaling_adjustment(mut self, input: std::option::Option<i32>) -> Self {
self.scaling_adjustment = input;
self
}
/// <p>The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.</p>
pub fn cool_down(mut self, input: i32) -> Self {
self.cool_down = Some(input);
self
}
/// <p>The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.</p>
pub fn set_cool_down(mut self, input: std::option::Option<i32>) -> Self {
self.cool_down = input;
self
}
/// Consumes the builder and constructs a [`SimpleScalingPolicyConfiguration`](crate::model::SimpleScalingPolicyConfiguration)
pub fn build(self) -> crate::model::SimpleScalingPolicyConfiguration {
crate::model::SimpleScalingPolicyConfiguration {
adjustment_type: self.adjustment_type,
scaling_adjustment: self.scaling_adjustment,
cool_down: self.cool_down,
}
}
}
}
impl SimpleScalingPolicyConfiguration {
/// Creates a new builder-style object to manufacture [`SimpleScalingPolicyConfiguration`](crate::model::SimpleScalingPolicyConfiguration)
pub fn builder() -> crate::model::simple_scaling_policy_configuration::Builder {
crate::model::simple_scaling_policy_configuration::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum AdjustmentType {
#[allow(missing_docs)] // documentation missing in model
ChangeInCapacity,
#[allow(missing_docs)] // documentation missing in model
ExactCapacity,
#[allow(missing_docs)] // documentation missing in model
PercentChangeInCapacity,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for AdjustmentType {
fn from(s: &str) -> Self {
match s {
"CHANGE_IN_CAPACITY" => AdjustmentType::ChangeInCapacity,
"EXACT_CAPACITY" => AdjustmentType::ExactCapacity,
"PERCENT_CHANGE_IN_CAPACITY" => AdjustmentType::PercentChangeInCapacity,
other => AdjustmentType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for AdjustmentType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(AdjustmentType::from(s))
}
}
impl AdjustmentType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
AdjustmentType::ChangeInCapacity => "CHANGE_IN_CAPACITY",
AdjustmentType::ExactCapacity => "EXACT_CAPACITY",
AdjustmentType::PercentChangeInCapacity => "PERCENT_CHANGE_IN_CAPACITY",
AdjustmentType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CHANGE_IN_CAPACITY",
"EXACT_CAPACITY",
"PERCENT_CHANGE_IN_CAPACITY",
]
}
}
impl AsRef<str> for AdjustmentType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum MarketType {
#[allow(missing_docs)] // documentation missing in model
OnDemand,
#[allow(missing_docs)] // documentation missing in model
Spot,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for MarketType {
fn from(s: &str) -> Self {
match s {
"ON_DEMAND" => MarketType::OnDemand,
"SPOT" => MarketType::Spot,
other => MarketType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for MarketType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(MarketType::from(s))
}
}
impl MarketType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
MarketType::OnDemand => "ON_DEMAND",
MarketType::Spot => "SPOT",
MarketType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["ON_DEMAND", "SPOT"]
}
}
impl AsRef<str> for MarketType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activities triggered by automatic scaling rules will not cause an instance group to grow above or below these limits.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ScalingConstraints {
/// <p>The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.</p>
pub min_capacity: std::option::Option<i32>,
/// <p>The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.</p>
pub max_capacity: std::option::Option<i32>,
}
impl ScalingConstraints {
/// <p>The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.</p>
pub fn min_capacity(&self) -> std::option::Option<i32> {
self.min_capacity
}
/// <p>The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.</p>
pub fn max_capacity(&self) -> std::option::Option<i32> {
self.max_capacity
}
}
impl std::fmt::Debug for ScalingConstraints {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ScalingConstraints");
formatter.field("min_capacity", &self.min_capacity);
formatter.field("max_capacity", &self.max_capacity);
formatter.finish()
}
}
/// See [`ScalingConstraints`](crate::model::ScalingConstraints)
pub mod scaling_constraints {
/// A builder for [`ScalingConstraints`](crate::model::ScalingConstraints)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) min_capacity: std::option::Option<i32>,
pub(crate) max_capacity: std::option::Option<i32>,
}
impl Builder {
/// <p>The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.</p>
pub fn min_capacity(mut self, input: i32) -> Self {
self.min_capacity = Some(input);
self
}
/// <p>The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.</p>
pub fn set_min_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.min_capacity = input;
self
}
/// <p>The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.</p>
pub fn max_capacity(mut self, input: i32) -> Self {
self.max_capacity = Some(input);
self
}
/// <p>The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.</p>
pub fn set_max_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.max_capacity = input;
self
}
/// Consumes the builder and constructs a [`ScalingConstraints`](crate::model::ScalingConstraints)
pub fn build(self) -> crate::model::ScalingConstraints {
crate::model::ScalingConstraints {
min_capacity: self.min_capacity,
max_capacity: self.max_capacity,
}
}
}
}
impl ScalingConstraints {
/// Creates a new builder-style object to manufacture [`ScalingConstraints`](crate::model::ScalingConstraints)
pub fn builder() -> crate::model::scaling_constraints::Builder {
crate::model::scaling_constraints::Builder::default()
}
}
/// <p>A configuration for Amazon EMR block public access. When <code>BlockPublicSecurityGroupRules</code> is set to <code>true</code>, Amazon EMR prevents cluster creation if one of the cluster's security groups has a rule that allows inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using <code>PermittedPublicSecurityGroupRuleRanges</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct BlockPublicAccessConfiguration {
/// <p>Indicates whether Amazon EMR block public access is enabled (<code>true</code>) or disabled (<code>false</code>). By default, the value is <code>false</code> for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is <code>true</code>.</p>
pub block_public_security_group_rules: bool,
/// <p>Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for <code>PermittedPublicSecurityGroupRuleRanges</code>, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.</p>
/// <p>By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of <code>PermittedPublicSecurityGroupRuleRanges</code>.</p>
pub permitted_public_security_group_rule_ranges:
std::option::Option<std::vec::Vec<crate::model::PortRange>>,
/// <p>The classification within a configuration.</p>
pub classification: std::option::Option<std::string::String>,
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>A set of properties specified within a configuration classification.</p>
pub properties:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl BlockPublicAccessConfiguration {
/// <p>Indicates whether Amazon EMR block public access is enabled (<code>true</code>) or disabled (<code>false</code>). By default, the value is <code>false</code> for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is <code>true</code>.</p>
pub fn block_public_security_group_rules(&self) -> bool {
self.block_public_security_group_rules
}
/// <p>Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for <code>PermittedPublicSecurityGroupRuleRanges</code>, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.</p>
/// <p>By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of <code>PermittedPublicSecurityGroupRuleRanges</code>.</p>
pub fn permitted_public_security_group_rule_ranges(
&self,
) -> std::option::Option<&[crate::model::PortRange]> {
self.permitted_public_security_group_rule_ranges.as_deref()
}
/// <p>The classification within a configuration.</p>
pub fn classification(&self) -> std::option::Option<&str> {
self.classification.as_deref()
}
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>A set of properties specified within a configuration classification.</p>
pub fn properties(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.properties.as_ref()
}
}
impl std::fmt::Debug for BlockPublicAccessConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("BlockPublicAccessConfiguration");
formatter.field(
"block_public_security_group_rules",
&self.block_public_security_group_rules,
);
formatter.field(
"permitted_public_security_group_rule_ranges",
&self.permitted_public_security_group_rule_ranges,
);
formatter.field("classification", &self.classification);
formatter.field("configurations", &self.configurations);
formatter.field("properties", &self.properties);
formatter.finish()
}
}
/// See [`BlockPublicAccessConfiguration`](crate::model::BlockPublicAccessConfiguration)
pub mod block_public_access_configuration {
/// A builder for [`BlockPublicAccessConfiguration`](crate::model::BlockPublicAccessConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) block_public_security_group_rules: std::option::Option<bool>,
pub(crate) permitted_public_security_group_rule_ranges:
std::option::Option<std::vec::Vec<crate::model::PortRange>>,
pub(crate) classification: std::option::Option<std::string::String>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) properties: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>Indicates whether Amazon EMR block public access is enabled (<code>true</code>) or disabled (<code>false</code>). By default, the value is <code>false</code> for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is <code>true</code>.</p>
pub fn block_public_security_group_rules(mut self, input: bool) -> Self {
self.block_public_security_group_rules = Some(input);
self
}
/// <p>Indicates whether Amazon EMR block public access is enabled (<code>true</code>) or disabled (<code>false</code>). By default, the value is <code>false</code> for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is <code>true</code>.</p>
pub fn set_block_public_security_group_rules(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.block_public_security_group_rules = input;
self
}
/// Appends an item to `permitted_public_security_group_rule_ranges`.
///
/// To override the contents of this collection use [`set_permitted_public_security_group_rule_ranges`](Self::set_permitted_public_security_group_rule_ranges).
///
/// <p>Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for <code>PermittedPublicSecurityGroupRuleRanges</code>, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.</p>
/// <p>By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of <code>PermittedPublicSecurityGroupRuleRanges</code>.</p>
pub fn permitted_public_security_group_rule_ranges(
mut self,
input: crate::model::PortRange,
) -> Self {
let mut v = self
.permitted_public_security_group_rule_ranges
.unwrap_or_default();
v.push(input);
self.permitted_public_security_group_rule_ranges = Some(v);
self
}
/// <p>Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for <code>PermittedPublicSecurityGroupRuleRanges</code>, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.</p>
/// <p>By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of <code>PermittedPublicSecurityGroupRuleRanges</code>.</p>
pub fn set_permitted_public_security_group_rule_ranges(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PortRange>>,
) -> Self {
self.permitted_public_security_group_rule_ranges = input;
self
}
/// <p>The classification within a configuration.</p>
pub fn classification(mut self, input: impl Into<std::string::String>) -> Self {
self.classification = Some(input.into());
self
}
/// <p>The classification within a configuration.</p>
pub fn set_classification(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.classification = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <p>A list of additional configurations to apply within a configuration object.</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// Adds a key-value pair to `properties`.
///
/// To override the contents of this collection use [`set_properties`](Self::set_properties).
///
/// <p>A set of properties specified within a configuration classification.</p>
pub fn properties(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.properties.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.properties = Some(hash_map);
self
}
/// <p>A set of properties specified within a configuration classification.</p>
pub fn set_properties(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.properties = input;
self
}
/// Consumes the builder and constructs a [`BlockPublicAccessConfiguration`](crate::model::BlockPublicAccessConfiguration)
pub fn build(self) -> crate::model::BlockPublicAccessConfiguration {
crate::model::BlockPublicAccessConfiguration {
block_public_security_group_rules: self
.block_public_security_group_rules
.unwrap_or_default(),
permitted_public_security_group_rule_ranges: self
.permitted_public_security_group_rule_ranges,
classification: self.classification,
configurations: self.configurations,
properties: self.properties,
}
}
}
}
impl BlockPublicAccessConfiguration {
/// Creates a new builder-style object to manufacture [`BlockPublicAccessConfiguration`](crate::model::BlockPublicAccessConfiguration)
pub fn builder() -> crate::model::block_public_access_configuration::Builder {
crate::model::block_public_access_configuration::Builder::default()
}
}
/// <p>A list of port ranges that are permitted to allow inbound traffic from all public IP addresses. To specify a single port, use the same value for <code>MinRange</code> and <code>MaxRange</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PortRange {
/// <p>The smallest port number in a specified range of port numbers.</p>
pub min_range: std::option::Option<i32>,
/// <p>The smallest port number in a specified range of port numbers.</p>
pub max_range: std::option::Option<i32>,
}
impl PortRange {
/// <p>The smallest port number in a specified range of port numbers.</p>
pub fn min_range(&self) -> std::option::Option<i32> {
self.min_range
}
/// <p>The smallest port number in a specified range of port numbers.</p>
pub fn max_range(&self) -> std::option::Option<i32> {
self.max_range
}
}
impl std::fmt::Debug for PortRange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PortRange");
formatter.field("min_range", &self.min_range);
formatter.field("max_range", &self.max_range);
formatter.finish()
}
}
/// See [`PortRange`](crate::model::PortRange)
pub mod port_range {
/// A builder for [`PortRange`](crate::model::PortRange)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) min_range: std::option::Option<i32>,
pub(crate) max_range: std::option::Option<i32>,
}
impl Builder {
/// <p>The smallest port number in a specified range of port numbers.</p>
pub fn min_range(mut self, input: i32) -> Self {
self.min_range = Some(input);
self
}
/// <p>The smallest port number in a specified range of port numbers.</p>
pub fn set_min_range(mut self, input: std::option::Option<i32>) -> Self {
self.min_range = input;
self
}
/// <p>The smallest port number in a specified range of port numbers.</p>
pub fn max_range(mut self, input: i32) -> Self {
self.max_range = Some(input);
self
}
/// <p>The smallest port number in a specified range of port numbers.</p>
pub fn set_max_range(mut self, input: std::option::Option<i32>) -> Self {
self.max_range = input;
self
}
/// Consumes the builder and constructs a [`PortRange`](crate::model::PortRange)
pub fn build(self) -> crate::model::PortRange {
crate::model::PortRange {
min_range: self.min_range,
max_range: self.max_range,
}
}
}
}
impl PortRange {
/// Creates a new builder-style object to manufacture [`PortRange`](crate::model::PortRange)
pub fn builder() -> crate::model::port_range::Builder {
crate::model::port_range::Builder::default()
}
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See <code>PutAutoScalingPolicy</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AutoScalingPolicyDescription {
/// <p>The status of an automatic scaling policy. </p>
pub status: std::option::Option<crate::model::AutoScalingPolicyStatus>,
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub constraints: std::option::Option<crate::model::ScalingConstraints>,
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub rules: std::option::Option<std::vec::Vec<crate::model::ScalingRule>>,
}
impl AutoScalingPolicyDescription {
/// <p>The status of an automatic scaling policy. </p>
pub fn status(&self) -> std::option::Option<&crate::model::AutoScalingPolicyStatus> {
self.status.as_ref()
}
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub fn constraints(&self) -> std::option::Option<&crate::model::ScalingConstraints> {
self.constraints.as_ref()
}
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub fn rules(&self) -> std::option::Option<&[crate::model::ScalingRule]> {
self.rules.as_deref()
}
}
impl std::fmt::Debug for AutoScalingPolicyDescription {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AutoScalingPolicyDescription");
formatter.field("status", &self.status);
formatter.field("constraints", &self.constraints);
formatter.field("rules", &self.rules);
formatter.finish()
}
}
/// See [`AutoScalingPolicyDescription`](crate::model::AutoScalingPolicyDescription)
pub mod auto_scaling_policy_description {
/// A builder for [`AutoScalingPolicyDescription`](crate::model::AutoScalingPolicyDescription)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::AutoScalingPolicyStatus>,
pub(crate) constraints: std::option::Option<crate::model::ScalingConstraints>,
pub(crate) rules: std::option::Option<std::vec::Vec<crate::model::ScalingRule>>,
}
impl Builder {
/// <p>The status of an automatic scaling policy. </p>
pub fn status(mut self, input: crate::model::AutoScalingPolicyStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of an automatic scaling policy. </p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::AutoScalingPolicyStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub fn constraints(mut self, input: crate::model::ScalingConstraints) -> Self {
self.constraints = Some(input);
self
}
/// <p>The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.</p>
pub fn set_constraints(
mut self,
input: std::option::Option<crate::model::ScalingConstraints>,
) -> Self {
self.constraints = input;
self
}
/// Appends an item to `rules`.
///
/// To override the contents of this collection use [`set_rules`](Self::set_rules).
///
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub fn rules(mut self, input: crate::model::ScalingRule) -> Self {
let mut v = self.rules.unwrap_or_default();
v.push(input);
self.rules = Some(v);
self
}
/// <p>The scale-in and scale-out rules that comprise the automatic scaling policy.</p>
pub fn set_rules(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ScalingRule>>,
) -> Self {
self.rules = input;
self
}
/// Consumes the builder and constructs a [`AutoScalingPolicyDescription`](crate::model::AutoScalingPolicyDescription)
pub fn build(self) -> crate::model::AutoScalingPolicyDescription {
crate::model::AutoScalingPolicyDescription {
status: self.status,
constraints: self.constraints,
rules: self.rules,
}
}
}
}
impl AutoScalingPolicyDescription {
/// Creates a new builder-style object to manufacture [`AutoScalingPolicyDescription`](crate::model::AutoScalingPolicyDescription)
pub fn builder() -> crate::model::auto_scaling_policy_description::Builder {
crate::model::auto_scaling_policy_description::Builder::default()
}
}
/// <p>The status of an automatic scaling policy. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AutoScalingPolicyStatus {
/// <p>Indicates the status of the automatic scaling policy.</p>
pub state: std::option::Option<crate::model::AutoScalingPolicyState>,
/// <p>The reason for a change in status.</p>
pub state_change_reason: std::option::Option<crate::model::AutoScalingPolicyStateChangeReason>,
}
impl AutoScalingPolicyStatus {
/// <p>Indicates the status of the automatic scaling policy.</p>
pub fn state(&self) -> std::option::Option<&crate::model::AutoScalingPolicyState> {
self.state.as_ref()
}
/// <p>The reason for a change in status.</p>
pub fn state_change_reason(
&self,
) -> std::option::Option<&crate::model::AutoScalingPolicyStateChangeReason> {
self.state_change_reason.as_ref()
}
}
impl std::fmt::Debug for AutoScalingPolicyStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AutoScalingPolicyStatus");
formatter.field("state", &self.state);
formatter.field("state_change_reason", &self.state_change_reason);
formatter.finish()
}
}
/// See [`AutoScalingPolicyStatus`](crate::model::AutoScalingPolicyStatus)
pub mod auto_scaling_policy_status {
/// A builder for [`AutoScalingPolicyStatus`](crate::model::AutoScalingPolicyStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::AutoScalingPolicyState>,
pub(crate) state_change_reason:
std::option::Option<crate::model::AutoScalingPolicyStateChangeReason>,
}
impl Builder {
/// <p>Indicates the status of the automatic scaling policy.</p>
pub fn state(mut self, input: crate::model::AutoScalingPolicyState) -> Self {
self.state = Some(input);
self
}
/// <p>Indicates the status of the automatic scaling policy.</p>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::AutoScalingPolicyState>,
) -> Self {
self.state = input;
self
}
/// <p>The reason for a change in status.</p>
pub fn state_change_reason(
mut self,
input: crate::model::AutoScalingPolicyStateChangeReason,
) -> Self {
self.state_change_reason = Some(input);
self
}
/// <p>The reason for a change in status.</p>
pub fn set_state_change_reason(
mut self,
input: std::option::Option<crate::model::AutoScalingPolicyStateChangeReason>,
) -> Self {
self.state_change_reason = input;
self
}
/// Consumes the builder and constructs a [`AutoScalingPolicyStatus`](crate::model::AutoScalingPolicyStatus)
pub fn build(self) -> crate::model::AutoScalingPolicyStatus {
crate::model::AutoScalingPolicyStatus {
state: self.state,
state_change_reason: self.state_change_reason,
}
}
}
}
impl AutoScalingPolicyStatus {
/// Creates a new builder-style object to manufacture [`AutoScalingPolicyStatus`](crate::model::AutoScalingPolicyStatus)
pub fn builder() -> crate::model::auto_scaling_policy_status::Builder {
crate::model::auto_scaling_policy_status::Builder::default()
}
}
/// <p>The reason for an <code>AutoScalingPolicyStatus</code> change.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AutoScalingPolicyStateChangeReason {
/// <p>The code indicating the reason for the change in status.<code>USER_REQUEST</code> indicates that the scaling policy status was changed by a user. <code>PROVISION_FAILURE</code> indicates that the status change was because the policy failed to provision. <code>CLEANUP_FAILURE</code> indicates an error.</p>
pub code: std::option::Option<crate::model::AutoScalingPolicyStateChangeReasonCode>,
/// <p>A friendly, more verbose message that accompanies an automatic scaling policy state change.</p>
pub message: std::option::Option<std::string::String>,
}
impl AutoScalingPolicyStateChangeReason {
/// <p>The code indicating the reason for the change in status.<code>USER_REQUEST</code> indicates that the scaling policy status was changed by a user. <code>PROVISION_FAILURE</code> indicates that the status change was because the policy failed to provision. <code>CLEANUP_FAILURE</code> indicates an error.</p>
pub fn code(
&self,
) -> std::option::Option<&crate::model::AutoScalingPolicyStateChangeReasonCode> {
self.code.as_ref()
}
/// <p>A friendly, more verbose message that accompanies an automatic scaling policy state change.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
}
impl std::fmt::Debug for AutoScalingPolicyStateChangeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AutoScalingPolicyStateChangeReason");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`AutoScalingPolicyStateChangeReason`](crate::model::AutoScalingPolicyStateChangeReason)
pub mod auto_scaling_policy_state_change_reason {
/// A builder for [`AutoScalingPolicyStateChangeReason`](crate::model::AutoScalingPolicyStateChangeReason)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<crate::model::AutoScalingPolicyStateChangeReasonCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The code indicating the reason for the change in status.<code>USER_REQUEST</code> indicates that the scaling policy status was changed by a user. <code>PROVISION_FAILURE</code> indicates that the status change was because the policy failed to provision. <code>CLEANUP_FAILURE</code> indicates an error.</p>
pub fn code(mut self, input: crate::model::AutoScalingPolicyStateChangeReasonCode) -> Self {
self.code = Some(input);
self
}
/// <p>The code indicating the reason for the change in status.<code>USER_REQUEST</code> indicates that the scaling policy status was changed by a user. <code>PROVISION_FAILURE</code> indicates that the status change was because the policy failed to provision. <code>CLEANUP_FAILURE</code> indicates an error.</p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::AutoScalingPolicyStateChangeReasonCode>,
) -> Self {
self.code = input;
self
}
/// <p>A friendly, more verbose message that accompanies an automatic scaling policy state change.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>A friendly, more verbose message that accompanies an automatic scaling policy state change.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`AutoScalingPolicyStateChangeReason`](crate::model::AutoScalingPolicyStateChangeReason)
pub fn build(self) -> crate::model::AutoScalingPolicyStateChangeReason {
crate::model::AutoScalingPolicyStateChangeReason {
code: self.code,
message: self.message,
}
}
}
}
impl AutoScalingPolicyStateChangeReason {
/// Creates a new builder-style object to manufacture [`AutoScalingPolicyStateChangeReason`](crate::model::AutoScalingPolicyStateChangeReason)
pub fn builder() -> crate::model::auto_scaling_policy_state_change_reason::Builder {
crate::model::auto_scaling_policy_state_change_reason::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum AutoScalingPolicyStateChangeReasonCode {
#[allow(missing_docs)] // documentation missing in model
CleanupFailure,
#[allow(missing_docs)] // documentation missing in model
ProvisionFailure,
#[allow(missing_docs)] // documentation missing in model
UserRequest,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for AutoScalingPolicyStateChangeReasonCode {
fn from(s: &str) -> Self {
match s {
"CLEANUP_FAILURE" => AutoScalingPolicyStateChangeReasonCode::CleanupFailure,
"PROVISION_FAILURE" => AutoScalingPolicyStateChangeReasonCode::ProvisionFailure,
"USER_REQUEST" => AutoScalingPolicyStateChangeReasonCode::UserRequest,
other => AutoScalingPolicyStateChangeReasonCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for AutoScalingPolicyStateChangeReasonCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(AutoScalingPolicyStateChangeReasonCode::from(s))
}
}
impl AutoScalingPolicyStateChangeReasonCode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
AutoScalingPolicyStateChangeReasonCode::CleanupFailure => "CLEANUP_FAILURE",
AutoScalingPolicyStateChangeReasonCode::ProvisionFailure => "PROVISION_FAILURE",
AutoScalingPolicyStateChangeReasonCode::UserRequest => "USER_REQUEST",
AutoScalingPolicyStateChangeReasonCode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CLEANUP_FAILURE", "PROVISION_FAILURE", "USER_REQUEST"]
}
}
impl AsRef<str> for AutoScalingPolicyStateChangeReasonCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum AutoScalingPolicyState {
#[allow(missing_docs)] // documentation missing in model
Attached,
#[allow(missing_docs)] // documentation missing in model
Attaching,
#[allow(missing_docs)] // documentation missing in model
Detached,
#[allow(missing_docs)] // documentation missing in model
Detaching,
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Pending,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for AutoScalingPolicyState {
fn from(s: &str) -> Self {
match s {
"ATTACHED" => AutoScalingPolicyState::Attached,
"ATTACHING" => AutoScalingPolicyState::Attaching,
"DETACHED" => AutoScalingPolicyState::Detached,
"DETACHING" => AutoScalingPolicyState::Detaching,
"FAILED" => AutoScalingPolicyState::Failed,
"PENDING" => AutoScalingPolicyState::Pending,
other => AutoScalingPolicyState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for AutoScalingPolicyState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(AutoScalingPolicyState::from(s))
}
}
impl AutoScalingPolicyState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
AutoScalingPolicyState::Attached => "ATTACHED",
AutoScalingPolicyState::Attaching => "ATTACHING",
AutoScalingPolicyState::Detached => "DETACHED",
AutoScalingPolicyState::Detaching => "DETACHING",
AutoScalingPolicyState::Failed => "FAILED",
AutoScalingPolicyState::Pending => "PENDING",
AutoScalingPolicyState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"ATTACHED",
"ATTACHING",
"DETACHED",
"DETACHING",
"FAILED",
"PENDING",
]
}
}
impl AsRef<str> for AutoScalingPolicyState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Modify the size or configurations of an instance group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroupModifyConfig {
/// <p>Unique ID of the instance group to modify.</p>
pub instance_group_id: std::option::Option<std::string::String>,
/// <p>Target size for the instance group.</p>
pub instance_count: std::option::Option<i32>,
/// <p>The EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.</p>
pub ec2_instance_ids_to_terminate: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>Policy for customizing shrink operations.</p>
pub shrink_policy: std::option::Option<crate::model::ShrinkPolicy>,
/// <p>Type of reconfiguration requested. Valid values are MERGE and OVERWRITE.</p>
pub reconfiguration_type: std::option::Option<crate::model::ReconfigurationType>,
/// <p>A list of new or modified configurations to apply for an instance group.</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
}
impl InstanceGroupModifyConfig {
/// <p>Unique ID of the instance group to modify.</p>
pub fn instance_group_id(&self) -> std::option::Option<&str> {
self.instance_group_id.as_deref()
}
/// <p>Target size for the instance group.</p>
pub fn instance_count(&self) -> std::option::Option<i32> {
self.instance_count
}
/// <p>The EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.</p>
pub fn ec2_instance_ids_to_terminate(&self) -> std::option::Option<&[std::string::String]> {
self.ec2_instance_ids_to_terminate.as_deref()
}
/// <p>Policy for customizing shrink operations.</p>
pub fn shrink_policy(&self) -> std::option::Option<&crate::model::ShrinkPolicy> {
self.shrink_policy.as_ref()
}
/// <p>Type of reconfiguration requested. Valid values are MERGE and OVERWRITE.</p>
pub fn reconfiguration_type(&self) -> std::option::Option<&crate::model::ReconfigurationType> {
self.reconfiguration_type.as_ref()
}
/// <p>A list of new or modified configurations to apply for an instance group.</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
}
impl std::fmt::Debug for InstanceGroupModifyConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroupModifyConfig");
formatter.field("instance_group_id", &self.instance_group_id);
formatter.field("instance_count", &self.instance_count);
formatter.field(
"ec2_instance_ids_to_terminate",
&self.ec2_instance_ids_to_terminate,
);
formatter.field("shrink_policy", &self.shrink_policy);
formatter.field("reconfiguration_type", &self.reconfiguration_type);
formatter.field("configurations", &self.configurations);
formatter.finish()
}
}
/// See [`InstanceGroupModifyConfig`](crate::model::InstanceGroupModifyConfig)
pub mod instance_group_modify_config {
/// A builder for [`InstanceGroupModifyConfig`](crate::model::InstanceGroupModifyConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_group_id: std::option::Option<std::string::String>,
pub(crate) instance_count: std::option::Option<i32>,
pub(crate) ec2_instance_ids_to_terminate:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) shrink_policy: std::option::Option<crate::model::ShrinkPolicy>,
pub(crate) reconfiguration_type: std::option::Option<crate::model::ReconfigurationType>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
}
impl Builder {
/// <p>Unique ID of the instance group to modify.</p>
pub fn instance_group_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_group_id = Some(input.into());
self
}
/// <p>Unique ID of the instance group to modify.</p>
pub fn set_instance_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_group_id = input;
self
}
/// <p>Target size for the instance group.</p>
pub fn instance_count(mut self, input: i32) -> Self {
self.instance_count = Some(input);
self
}
/// <p>Target size for the instance group.</p>
pub fn set_instance_count(mut self, input: std::option::Option<i32>) -> Self {
self.instance_count = input;
self
}
/// Appends an item to `ec2_instance_ids_to_terminate`.
///
/// To override the contents of this collection use [`set_ec2_instance_ids_to_terminate`](Self::set_ec2_instance_ids_to_terminate).
///
/// <p>The EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.</p>
pub fn ec2_instance_ids_to_terminate(
mut self,
input: impl Into<std::string::String>,
) -> Self {
let mut v = self.ec2_instance_ids_to_terminate.unwrap_or_default();
v.push(input.into());
self.ec2_instance_ids_to_terminate = Some(v);
self
}
/// <p>The EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.</p>
pub fn set_ec2_instance_ids_to_terminate(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.ec2_instance_ids_to_terminate = input;
self
}
/// <p>Policy for customizing shrink operations.</p>
pub fn shrink_policy(mut self, input: crate::model::ShrinkPolicy) -> Self {
self.shrink_policy = Some(input);
self
}
/// <p>Policy for customizing shrink operations.</p>
pub fn set_shrink_policy(
mut self,
input: std::option::Option<crate::model::ShrinkPolicy>,
) -> Self {
self.shrink_policy = input;
self
}
/// <p>Type of reconfiguration requested. Valid values are MERGE and OVERWRITE.</p>
pub fn reconfiguration_type(mut self, input: crate::model::ReconfigurationType) -> Self {
self.reconfiguration_type = Some(input);
self
}
/// <p>Type of reconfiguration requested. Valid values are MERGE and OVERWRITE.</p>
pub fn set_reconfiguration_type(
mut self,
input: std::option::Option<crate::model::ReconfigurationType>,
) -> Self {
self.reconfiguration_type = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <p>A list of new or modified configurations to apply for an instance group.</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <p>A list of new or modified configurations to apply for an instance group.</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroupModifyConfig`](crate::model::InstanceGroupModifyConfig)
pub fn build(self) -> crate::model::InstanceGroupModifyConfig {
crate::model::InstanceGroupModifyConfig {
instance_group_id: self.instance_group_id,
instance_count: self.instance_count,
ec2_instance_ids_to_terminate: self.ec2_instance_ids_to_terminate,
shrink_policy: self.shrink_policy,
reconfiguration_type: self.reconfiguration_type,
configurations: self.configurations,
}
}
}
}
impl InstanceGroupModifyConfig {
/// Creates a new builder-style object to manufacture [`InstanceGroupModifyConfig`](crate::model::InstanceGroupModifyConfig)
pub fn builder() -> crate::model::instance_group_modify_config::Builder {
crate::model::instance_group_modify_config::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ReconfigurationType {
#[allow(missing_docs)] // documentation missing in model
Merge,
#[allow(missing_docs)] // documentation missing in model
Overwrite,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ReconfigurationType {
fn from(s: &str) -> Self {
match s {
"MERGE" => ReconfigurationType::Merge,
"OVERWRITE" => ReconfigurationType::Overwrite,
other => ReconfigurationType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ReconfigurationType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ReconfigurationType::from(s))
}
}
impl ReconfigurationType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ReconfigurationType::Merge => "MERGE",
ReconfigurationType::Overwrite => "OVERWRITE",
ReconfigurationType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["MERGE", "OVERWRITE"]
}
}
impl AsRef<str> for ReconfigurationType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Policy for customizing shrink operations. Allows configuration of decommissioning timeout and targeted instance shrinking.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ShrinkPolicy {
/// <p>The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.</p>
pub decommission_timeout: std::option::Option<i32>,
/// <p>Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.</p>
pub instance_resize_policy: std::option::Option<crate::model::InstanceResizePolicy>,
}
impl ShrinkPolicy {
/// <p>The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.</p>
pub fn decommission_timeout(&self) -> std::option::Option<i32> {
self.decommission_timeout
}
/// <p>Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.</p>
pub fn instance_resize_policy(
&self,
) -> std::option::Option<&crate::model::InstanceResizePolicy> {
self.instance_resize_policy.as_ref()
}
}
impl std::fmt::Debug for ShrinkPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ShrinkPolicy");
formatter.field("decommission_timeout", &self.decommission_timeout);
formatter.field("instance_resize_policy", &self.instance_resize_policy);
formatter.finish()
}
}
/// See [`ShrinkPolicy`](crate::model::ShrinkPolicy)
pub mod shrink_policy {
/// A builder for [`ShrinkPolicy`](crate::model::ShrinkPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) decommission_timeout: std::option::Option<i32>,
pub(crate) instance_resize_policy: std::option::Option<crate::model::InstanceResizePolicy>,
}
impl Builder {
/// <p>The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.</p>
pub fn decommission_timeout(mut self, input: i32) -> Self {
self.decommission_timeout = Some(input);
self
}
/// <p>The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.</p>
pub fn set_decommission_timeout(mut self, input: std::option::Option<i32>) -> Self {
self.decommission_timeout = input;
self
}
/// <p>Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.</p>
pub fn instance_resize_policy(mut self, input: crate::model::InstanceResizePolicy) -> Self {
self.instance_resize_policy = Some(input);
self
}
/// <p>Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.</p>
pub fn set_instance_resize_policy(
mut self,
input: std::option::Option<crate::model::InstanceResizePolicy>,
) -> Self {
self.instance_resize_policy = input;
self
}
/// Consumes the builder and constructs a [`ShrinkPolicy`](crate::model::ShrinkPolicy)
pub fn build(self) -> crate::model::ShrinkPolicy {
crate::model::ShrinkPolicy {
decommission_timeout: self.decommission_timeout,
instance_resize_policy: self.instance_resize_policy,
}
}
}
}
impl ShrinkPolicy {
/// Creates a new builder-style object to manufacture [`ShrinkPolicy`](crate::model::ShrinkPolicy)
pub fn builder() -> crate::model::shrink_policy::Builder {
crate::model::shrink_policy::Builder::default()
}
}
/// <p>Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceResizePolicy {
/// <p>Specific list of instances to be terminated when shrinking an instance group.</p>
pub instances_to_terminate: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>Specific list of instances to be protected when shrinking an instance group.</p>
pub instances_to_protect: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>Decommissioning timeout override for the specific list of instances to be terminated.</p>
pub instance_termination_timeout: std::option::Option<i32>,
}
impl InstanceResizePolicy {
/// <p>Specific list of instances to be terminated when shrinking an instance group.</p>
pub fn instances_to_terminate(&self) -> std::option::Option<&[std::string::String]> {
self.instances_to_terminate.as_deref()
}
/// <p>Specific list of instances to be protected when shrinking an instance group.</p>
pub fn instances_to_protect(&self) -> std::option::Option<&[std::string::String]> {
self.instances_to_protect.as_deref()
}
/// <p>Decommissioning timeout override for the specific list of instances to be terminated.</p>
pub fn instance_termination_timeout(&self) -> std::option::Option<i32> {
self.instance_termination_timeout
}
}
impl std::fmt::Debug for InstanceResizePolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceResizePolicy");
formatter.field("instances_to_terminate", &self.instances_to_terminate);
formatter.field("instances_to_protect", &self.instances_to_protect);
formatter.field(
"instance_termination_timeout",
&self.instance_termination_timeout,
);
formatter.finish()
}
}
/// See [`InstanceResizePolicy`](crate::model::InstanceResizePolicy)
pub mod instance_resize_policy {
/// A builder for [`InstanceResizePolicy`](crate::model::InstanceResizePolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instances_to_terminate: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) instances_to_protect: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) instance_termination_timeout: std::option::Option<i32>,
}
impl Builder {
/// Appends an item to `instances_to_terminate`.
///
/// To override the contents of this collection use [`set_instances_to_terminate`](Self::set_instances_to_terminate).
///
/// <p>Specific list of instances to be terminated when shrinking an instance group.</p>
pub fn instances_to_terminate(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.instances_to_terminate.unwrap_or_default();
v.push(input.into());
self.instances_to_terminate = Some(v);
self
}
/// <p>Specific list of instances to be terminated when shrinking an instance group.</p>
pub fn set_instances_to_terminate(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.instances_to_terminate = input;
self
}
/// Appends an item to `instances_to_protect`.
///
/// To override the contents of this collection use [`set_instances_to_protect`](Self::set_instances_to_protect).
///
/// <p>Specific list of instances to be protected when shrinking an instance group.</p>
pub fn instances_to_protect(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.instances_to_protect.unwrap_or_default();
v.push(input.into());
self.instances_to_protect = Some(v);
self
}
/// <p>Specific list of instances to be protected when shrinking an instance group.</p>
pub fn set_instances_to_protect(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.instances_to_protect = input;
self
}
/// <p>Decommissioning timeout override for the specific list of instances to be terminated.</p>
pub fn instance_termination_timeout(mut self, input: i32) -> Self {
self.instance_termination_timeout = Some(input);
self
}
/// <p>Decommissioning timeout override for the specific list of instances to be terminated.</p>
pub fn set_instance_termination_timeout(mut self, input: std::option::Option<i32>) -> Self {
self.instance_termination_timeout = input;
self
}
/// Consumes the builder and constructs a [`InstanceResizePolicy`](crate::model::InstanceResizePolicy)
pub fn build(self) -> crate::model::InstanceResizePolicy {
crate::model::InstanceResizePolicy {
instances_to_terminate: self.instances_to_terminate,
instances_to_protect: self.instances_to_protect,
instance_termination_timeout: self.instance_termination_timeout,
}
}
}
}
impl InstanceResizePolicy {
/// Creates a new builder-style object to manufacture [`InstanceResizePolicy`](crate::model::InstanceResizePolicy)
pub fn builder() -> crate::model::instance_resize_policy::Builder {
crate::model::instance_resize_policy::Builder::default()
}
}
/// <p>Configuration parameters for an instance fleet modification request.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleetModifyConfig {
/// <p>A unique identifier for the instance fleet.</p>
pub instance_fleet_id: std::option::Option<std::string::String>,
/// <p>The target capacity of On-Demand units for the instance fleet. For more information see <code>InstanceFleetConfig$TargetOnDemandCapacity</code>.</p>
pub target_on_demand_capacity: std::option::Option<i32>,
/// <p>The target capacity of Spot units for the instance fleet. For more information, see <code>InstanceFleetConfig$TargetSpotCapacity</code>.</p>
pub target_spot_capacity: std::option::Option<i32>,
}
impl InstanceFleetModifyConfig {
/// <p>A unique identifier for the instance fleet.</p>
pub fn instance_fleet_id(&self) -> std::option::Option<&str> {
self.instance_fleet_id.as_deref()
}
/// <p>The target capacity of On-Demand units for the instance fleet. For more information see <code>InstanceFleetConfig$TargetOnDemandCapacity</code>.</p>
pub fn target_on_demand_capacity(&self) -> std::option::Option<i32> {
self.target_on_demand_capacity
}
/// <p>The target capacity of Spot units for the instance fleet. For more information, see <code>InstanceFleetConfig$TargetSpotCapacity</code>.</p>
pub fn target_spot_capacity(&self) -> std::option::Option<i32> {
self.target_spot_capacity
}
}
impl std::fmt::Debug for InstanceFleetModifyConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleetModifyConfig");
formatter.field("instance_fleet_id", &self.instance_fleet_id);
formatter.field("target_on_demand_capacity", &self.target_on_demand_capacity);
formatter.field("target_spot_capacity", &self.target_spot_capacity);
formatter.finish()
}
}
/// See [`InstanceFleetModifyConfig`](crate::model::InstanceFleetModifyConfig)
pub mod instance_fleet_modify_config {
/// A builder for [`InstanceFleetModifyConfig`](crate::model::InstanceFleetModifyConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_fleet_id: std::option::Option<std::string::String>,
pub(crate) target_on_demand_capacity: std::option::Option<i32>,
pub(crate) target_spot_capacity: std::option::Option<i32>,
}
impl Builder {
/// <p>A unique identifier for the instance fleet.</p>
pub fn instance_fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_fleet_id = Some(input.into());
self
}
/// <p>A unique identifier for the instance fleet.</p>
pub fn set_instance_fleet_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_fleet_id = input;
self
}
/// <p>The target capacity of On-Demand units for the instance fleet. For more information see <code>InstanceFleetConfig$TargetOnDemandCapacity</code>.</p>
pub fn target_on_demand_capacity(mut self, input: i32) -> Self {
self.target_on_demand_capacity = Some(input);
self
}
/// <p>The target capacity of On-Demand units for the instance fleet. For more information see <code>InstanceFleetConfig$TargetOnDemandCapacity</code>.</p>
pub fn set_target_on_demand_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.target_on_demand_capacity = input;
self
}
/// <p>The target capacity of Spot units for the instance fleet. For more information, see <code>InstanceFleetConfig$TargetSpotCapacity</code>.</p>
pub fn target_spot_capacity(mut self, input: i32) -> Self {
self.target_spot_capacity = Some(input);
self
}
/// <p>The target capacity of Spot units for the instance fleet. For more information, see <code>InstanceFleetConfig$TargetSpotCapacity</code>.</p>
pub fn set_target_spot_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.target_spot_capacity = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleetModifyConfig`](crate::model::InstanceFleetModifyConfig)
pub fn build(self) -> crate::model::InstanceFleetModifyConfig {
crate::model::InstanceFleetModifyConfig {
instance_fleet_id: self.instance_fleet_id,
target_on_demand_capacity: self.target_on_demand_capacity,
target_spot_capacity: self.target_spot_capacity,
}
}
}
}
impl InstanceFleetModifyConfig {
/// Creates a new builder-style object to manufacture [`InstanceFleetModifyConfig`](crate::model::InstanceFleetModifyConfig)
pub fn builder() -> crate::model::instance_fleet_modify_config::Builder {
crate::model::instance_fleet_modify_config::Builder::default()
}
}
/// <p>Details for an Amazon EMR Studio session mapping. The details do not include the time the session mapping was last modified.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SessionMappingSummary {
/// <p>The ID of the Amazon EMR Studio.</p>
pub studio_id: std::option::Option<std::string::String>,
/// <p>The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.</p>
pub identity_id: std::option::Option<std::string::String>,
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub identity_name: std::option::Option<std::string::String>,
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub identity_type: std::option::Option<crate::model::IdentityType>,
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub session_policy_arn: std::option::Option<std::string::String>,
/// <p>The time the session mapping was created.</p>
pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl SessionMappingSummary {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(&self) -> std::option::Option<&str> {
self.studio_id.as_deref()
}
/// <p>The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.</p>
pub fn identity_id(&self) -> std::option::Option<&str> {
self.identity_id.as_deref()
}
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub fn identity_name(&self) -> std::option::Option<&str> {
self.identity_name.as_deref()
}
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub fn identity_type(&self) -> std::option::Option<&crate::model::IdentityType> {
self.identity_type.as_ref()
}
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub fn session_policy_arn(&self) -> std::option::Option<&str> {
self.session_policy_arn.as_deref()
}
/// <p>The time the session mapping was created.</p>
pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_time.as_ref()
}
}
impl std::fmt::Debug for SessionMappingSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SessionMappingSummary");
formatter.field("studio_id", &self.studio_id);
formatter.field("identity_id", &self.identity_id);
formatter.field("identity_name", &self.identity_name);
formatter.field("identity_type", &self.identity_type);
formatter.field("session_policy_arn", &self.session_policy_arn);
formatter.field("creation_time", &self.creation_time);
formatter.finish()
}
}
/// See [`SessionMappingSummary`](crate::model::SessionMappingSummary)
pub mod session_mapping_summary {
/// A builder for [`SessionMappingSummary`](crate::model::SessionMappingSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) studio_id: std::option::Option<std::string::String>,
pub(crate) identity_id: std::option::Option<std::string::String>,
pub(crate) identity_name: std::option::Option<std::string::String>,
pub(crate) identity_type: std::option::Option<crate::model::IdentityType>,
pub(crate) session_policy_arn: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(mut self, input: impl Into<std::string::String>) -> Self {
self.studio_id = Some(input.into());
self
}
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn set_studio_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.studio_id = input;
self
}
/// <p>The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.</p>
pub fn identity_id(mut self, input: impl Into<std::string::String>) -> Self {
self.identity_id = Some(input.into());
self
}
/// <p>The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.</p>
pub fn set_identity_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.identity_id = input;
self
}
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub fn identity_name(mut self, input: impl Into<std::string::String>) -> Self {
self.identity_name = Some(input.into());
self
}
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub fn set_identity_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.identity_name = input;
self
}
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub fn identity_type(mut self, input: crate::model::IdentityType) -> Self {
self.identity_type = Some(input);
self
}
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub fn set_identity_type(
mut self,
input: std::option::Option<crate::model::IdentityType>,
) -> Self {
self.identity_type = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub fn session_policy_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.session_policy_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub fn set_session_policy_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.session_policy_arn = input;
self
}
/// <p>The time the session mapping was created.</p>
pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_time = Some(input);
self
}
/// <p>The time the session mapping was created.</p>
pub fn set_creation_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_time = input;
self
}
/// Consumes the builder and constructs a [`SessionMappingSummary`](crate::model::SessionMappingSummary)
pub fn build(self) -> crate::model::SessionMappingSummary {
crate::model::SessionMappingSummary {
studio_id: self.studio_id,
identity_id: self.identity_id,
identity_name: self.identity_name,
identity_type: self.identity_type,
session_policy_arn: self.session_policy_arn,
creation_time: self.creation_time,
}
}
}
}
impl SessionMappingSummary {
/// Creates a new builder-style object to manufacture [`SessionMappingSummary`](crate::model::SessionMappingSummary)
pub fn builder() -> crate::model::session_mapping_summary::Builder {
crate::model::session_mapping_summary::Builder::default()
}
}
/// <p>Details for an Amazon EMR Studio, including ID, Name, VPC, and Description. The details do not include subnets, IAM roles, security groups, or tags associated with the Studio.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StudioSummary {
/// <p>The ID of the Amazon EMR Studio.</p>
pub studio_id: std::option::Option<std::string::String>,
/// <p>The name of the Amazon EMR Studio.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR Studio.</p>
pub vpc_id: std::option::Option<std::string::String>,
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub url: std::option::Option<std::string::String>,
/// <p>Specifies whether the Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub auth_mode: std::option::Option<crate::model::AuthMode>,
/// <p>The time when the Amazon EMR Studio was created.</p>
pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl StudioSummary {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(&self) -> std::option::Option<&str> {
self.studio_id.as_deref()
}
/// <p>The name of the Amazon EMR Studio.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR Studio.</p>
pub fn vpc_id(&self) -> std::option::Option<&str> {
self.vpc_id.as_deref()
}
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub fn url(&self) -> std::option::Option<&str> {
self.url.as_deref()
}
/// <p>Specifies whether the Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub fn auth_mode(&self) -> std::option::Option<&crate::model::AuthMode> {
self.auth_mode.as_ref()
}
/// <p>The time when the Amazon EMR Studio was created.</p>
pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_time.as_ref()
}
}
impl std::fmt::Debug for StudioSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StudioSummary");
formatter.field("studio_id", &self.studio_id);
formatter.field("name", &self.name);
formatter.field("vpc_id", &self.vpc_id);
formatter.field("description", &self.description);
formatter.field("url", &self.url);
formatter.field("auth_mode", &self.auth_mode);
formatter.field("creation_time", &self.creation_time);
formatter.finish()
}
}
/// See [`StudioSummary`](crate::model::StudioSummary)
pub mod studio_summary {
/// A builder for [`StudioSummary`](crate::model::StudioSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) studio_id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) vpc_id: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) url: std::option::Option<std::string::String>,
pub(crate) auth_mode: std::option::Option<crate::model::AuthMode>,
pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(mut self, input: impl Into<std::string::String>) -> Self {
self.studio_id = Some(input.into());
self
}
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn set_studio_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.studio_id = input;
self
}
/// <p>The name of the Amazon EMR Studio.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the Amazon EMR Studio.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR Studio.</p>
pub fn vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
self.vpc_id = Some(input.into());
self
}
/// <p>The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR Studio.</p>
pub fn set_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.vpc_id = input;
self
}
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub fn url(mut self, input: impl Into<std::string::String>) -> Self {
self.url = Some(input.into());
self
}
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub fn set_url(mut self, input: std::option::Option<std::string::String>) -> Self {
self.url = input;
self
}
/// <p>Specifies whether the Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub fn auth_mode(mut self, input: crate::model::AuthMode) -> Self {
self.auth_mode = Some(input);
self
}
/// <p>Specifies whether the Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub fn set_auth_mode(mut self, input: std::option::Option<crate::model::AuthMode>) -> Self {
self.auth_mode = input;
self
}
/// <p>The time when the Amazon EMR Studio was created.</p>
pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_time = Some(input);
self
}
/// <p>The time when the Amazon EMR Studio was created.</p>
pub fn set_creation_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_time = input;
self
}
/// Consumes the builder and constructs a [`StudioSummary`](crate::model::StudioSummary)
pub fn build(self) -> crate::model::StudioSummary {
crate::model::StudioSummary {
studio_id: self.studio_id,
name: self.name,
vpc_id: self.vpc_id,
description: self.description,
url: self.url,
auth_mode: self.auth_mode,
creation_time: self.creation_time,
}
}
}
}
impl StudioSummary {
/// Creates a new builder-style object to manufacture [`StudioSummary`](crate::model::StudioSummary)
pub fn builder() -> crate::model::studio_summary::Builder {
crate::model::studio_summary::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum AuthMode {
#[allow(missing_docs)] // documentation missing in model
Iam,
#[allow(missing_docs)] // documentation missing in model
Sso,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for AuthMode {
fn from(s: &str) -> Self {
match s {
"IAM" => AuthMode::Iam,
"SSO" => AuthMode::Sso,
other => AuthMode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for AuthMode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(AuthMode::from(s))
}
}
impl AuthMode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
AuthMode::Iam => "IAM",
AuthMode::Sso => "SSO",
AuthMode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["IAM", "SSO"]
}
}
impl AsRef<str> for AuthMode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The summary of the cluster step.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepSummary {
/// <p>The identifier of the cluster step.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The name of the cluster step.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The Hadoop job configuration of the cluster step.</p>
pub config: std::option::Option<crate::model::HadoopStepConfig>,
/// <p>The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.</p>
pub action_on_failure: std::option::Option<crate::model::ActionOnFailure>,
/// <p>The current execution status details of the cluster step.</p>
pub status: std::option::Option<crate::model::StepStatus>,
}
impl StepSummary {
/// <p>The identifier of the cluster step.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The name of the cluster step.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The Hadoop job configuration of the cluster step.</p>
pub fn config(&self) -> std::option::Option<&crate::model::HadoopStepConfig> {
self.config.as_ref()
}
/// <p>The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.</p>
pub fn action_on_failure(&self) -> std::option::Option<&crate::model::ActionOnFailure> {
self.action_on_failure.as_ref()
}
/// <p>The current execution status details of the cluster step.</p>
pub fn status(&self) -> std::option::Option<&crate::model::StepStatus> {
self.status.as_ref()
}
}
impl std::fmt::Debug for StepSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepSummary");
formatter.field("id", &self.id);
formatter.field("name", &self.name);
formatter.field("config", &self.config);
formatter.field("action_on_failure", &self.action_on_failure);
formatter.field("status", &self.status);
formatter.finish()
}
}
/// See [`StepSummary`](crate::model::StepSummary)
pub mod step_summary {
/// A builder for [`StepSummary`](crate::model::StepSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) config: std::option::Option<crate::model::HadoopStepConfig>,
pub(crate) action_on_failure: std::option::Option<crate::model::ActionOnFailure>,
pub(crate) status: std::option::Option<crate::model::StepStatus>,
}
impl Builder {
/// <p>The identifier of the cluster step.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The identifier of the cluster step.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The name of the cluster step.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the cluster step.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The Hadoop job configuration of the cluster step.</p>
pub fn config(mut self, input: crate::model::HadoopStepConfig) -> Self {
self.config = Some(input);
self
}
/// <p>The Hadoop job configuration of the cluster step.</p>
pub fn set_config(
mut self,
input: std::option::Option<crate::model::HadoopStepConfig>,
) -> Self {
self.config = input;
self
}
/// <p>The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.</p>
pub fn action_on_failure(mut self, input: crate::model::ActionOnFailure) -> Self {
self.action_on_failure = Some(input);
self
}
/// <p>The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.</p>
pub fn set_action_on_failure(
mut self,
input: std::option::Option<crate::model::ActionOnFailure>,
) -> Self {
self.action_on_failure = input;
self
}
/// <p>The current execution status details of the cluster step.</p>
pub fn status(mut self, input: crate::model::StepStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current execution status details of the cluster step.</p>
pub fn set_status(mut self, input: std::option::Option<crate::model::StepStatus>) -> Self {
self.status = input;
self
}
/// Consumes the builder and constructs a [`StepSummary`](crate::model::StepSummary)
pub fn build(self) -> crate::model::StepSummary {
crate::model::StepSummary {
id: self.id,
name: self.name,
config: self.config,
action_on_failure: self.action_on_failure,
status: self.status,
}
}
}
}
impl StepSummary {
/// Creates a new builder-style object to manufacture [`StepSummary`](crate::model::StepSummary)
pub fn builder() -> crate::model::step_summary::Builder {
crate::model::step_summary::Builder::default()
}
}
/// <p>The execution status details of the cluster step.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepStatus {
/// <p>The execution state of the cluster step.</p>
pub state: std::option::Option<crate::model::StepState>,
/// <p>The reason for the step execution status change.</p>
pub state_change_reason: std::option::Option<crate::model::StepStateChangeReason>,
/// <p>The details for the step failure including reason, message, and log file path where the root cause was identified.</p>
pub failure_details: std::option::Option<crate::model::FailureDetails>,
/// <p>The timeline of the cluster step status over time.</p>
pub timeline: std::option::Option<crate::model::StepTimeline>,
}
impl StepStatus {
/// <p>The execution state of the cluster step.</p>
pub fn state(&self) -> std::option::Option<&crate::model::StepState> {
self.state.as_ref()
}
/// <p>The reason for the step execution status change.</p>
pub fn state_change_reason(&self) -> std::option::Option<&crate::model::StepStateChangeReason> {
self.state_change_reason.as_ref()
}
/// <p>The details for the step failure including reason, message, and log file path where the root cause was identified.</p>
pub fn failure_details(&self) -> std::option::Option<&crate::model::FailureDetails> {
self.failure_details.as_ref()
}
/// <p>The timeline of the cluster step status over time.</p>
pub fn timeline(&self) -> std::option::Option<&crate::model::StepTimeline> {
self.timeline.as_ref()
}
}
impl std::fmt::Debug for StepStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepStatus");
formatter.field("state", &self.state);
formatter.field("state_change_reason", &self.state_change_reason);
formatter.field("failure_details", &self.failure_details);
formatter.field("timeline", &self.timeline);
formatter.finish()
}
}
/// See [`StepStatus`](crate::model::StepStatus)
pub mod step_status {
/// A builder for [`StepStatus`](crate::model::StepStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::StepState>,
pub(crate) state_change_reason: std::option::Option<crate::model::StepStateChangeReason>,
pub(crate) failure_details: std::option::Option<crate::model::FailureDetails>,
pub(crate) timeline: std::option::Option<crate::model::StepTimeline>,
}
impl Builder {
/// <p>The execution state of the cluster step.</p>
pub fn state(mut self, input: crate::model::StepState) -> Self {
self.state = Some(input);
self
}
/// <p>The execution state of the cluster step.</p>
pub fn set_state(mut self, input: std::option::Option<crate::model::StepState>) -> Self {
self.state = input;
self
}
/// <p>The reason for the step execution status change.</p>
pub fn state_change_reason(mut self, input: crate::model::StepStateChangeReason) -> Self {
self.state_change_reason = Some(input);
self
}
/// <p>The reason for the step execution status change.</p>
pub fn set_state_change_reason(
mut self,
input: std::option::Option<crate::model::StepStateChangeReason>,
) -> Self {
self.state_change_reason = input;
self
}
/// <p>The details for the step failure including reason, message, and log file path where the root cause was identified.</p>
pub fn failure_details(mut self, input: crate::model::FailureDetails) -> Self {
self.failure_details = Some(input);
self
}
/// <p>The details for the step failure including reason, message, and log file path where the root cause was identified.</p>
pub fn set_failure_details(
mut self,
input: std::option::Option<crate::model::FailureDetails>,
) -> Self {
self.failure_details = input;
self
}
/// <p>The timeline of the cluster step status over time.</p>
pub fn timeline(mut self, input: crate::model::StepTimeline) -> Self {
self.timeline = Some(input);
self
}
/// <p>The timeline of the cluster step status over time.</p>
pub fn set_timeline(
mut self,
input: std::option::Option<crate::model::StepTimeline>,
) -> Self {
self.timeline = input;
self
}
/// Consumes the builder and constructs a [`StepStatus`](crate::model::StepStatus)
pub fn build(self) -> crate::model::StepStatus {
crate::model::StepStatus {
state: self.state,
state_change_reason: self.state_change_reason,
failure_details: self.failure_details,
timeline: self.timeline,
}
}
}
}
impl StepStatus {
/// Creates a new builder-style object to manufacture [`StepStatus`](crate::model::StepStatus)
pub fn builder() -> crate::model::step_status::Builder {
crate::model::step_status::Builder::default()
}
}
/// <p>The timeline of the cluster step lifecycle.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepTimeline {
/// <p>The date and time when the cluster step was created.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the cluster step execution started.</p>
pub start_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the cluster step execution completed or failed.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl StepTimeline {
/// <p>The date and time when the cluster step was created.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The date and time when the cluster step execution started.</p>
pub fn start_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_date_time.as_ref()
}
/// <p>The date and time when the cluster step execution completed or failed.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
}
impl std::fmt::Debug for StepTimeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepTimeline");
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("start_date_time", &self.start_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.finish()
}
}
/// See [`StepTimeline`](crate::model::StepTimeline)
pub mod step_timeline {
/// A builder for [`StepTimeline`](crate::model::StepTimeline)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) start_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The date and time when the cluster step was created.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The date and time when the cluster step was created.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The date and time when the cluster step execution started.</p>
pub fn start_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_date_time = Some(input);
self
}
/// <p>The date and time when the cluster step execution started.</p>
pub fn set_start_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_date_time = input;
self
}
/// <p>The date and time when the cluster step execution completed or failed.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The date and time when the cluster step execution completed or failed.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// Consumes the builder and constructs a [`StepTimeline`](crate::model::StepTimeline)
pub fn build(self) -> crate::model::StepTimeline {
crate::model::StepTimeline {
creation_date_time: self.creation_date_time,
start_date_time: self.start_date_time,
end_date_time: self.end_date_time,
}
}
}
}
impl StepTimeline {
/// Creates a new builder-style object to manufacture [`StepTimeline`](crate::model::StepTimeline)
pub fn builder() -> crate::model::step_timeline::Builder {
crate::model::step_timeline::Builder::default()
}
}
/// <p>The details of the step failure. The service attempts to detect the root cause for many common failures.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FailureDetails {
/// <p>The reason for the step failure. In the case where the service cannot successfully determine the root cause of the failure, it returns "Unknown Error" as a reason.</p>
pub reason: std::option::Option<std::string::String>,
/// <p>The descriptive message including the error the Amazon EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.</p>
pub message: std::option::Option<std::string::String>,
/// <p>The path to the log file where the step failure root cause was originally recorded.</p>
pub log_file: std::option::Option<std::string::String>,
}
impl FailureDetails {
/// <p>The reason for the step failure. In the case where the service cannot successfully determine the root cause of the failure, it returns "Unknown Error" as a reason.</p>
pub fn reason(&self) -> std::option::Option<&str> {
self.reason.as_deref()
}
/// <p>The descriptive message including the error the Amazon EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
/// <p>The path to the log file where the step failure root cause was originally recorded.</p>
pub fn log_file(&self) -> std::option::Option<&str> {
self.log_file.as_deref()
}
}
impl std::fmt::Debug for FailureDetails {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FailureDetails");
formatter.field("reason", &self.reason);
formatter.field("message", &self.message);
formatter.field("log_file", &self.log_file);
formatter.finish()
}
}
/// See [`FailureDetails`](crate::model::FailureDetails)
pub mod failure_details {
/// A builder for [`FailureDetails`](crate::model::FailureDetails)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) reason: std::option::Option<std::string::String>,
pub(crate) message: std::option::Option<std::string::String>,
pub(crate) log_file: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The reason for the step failure. In the case where the service cannot successfully determine the root cause of the failure, it returns "Unknown Error" as a reason.</p>
pub fn reason(mut self, input: impl Into<std::string::String>) -> Self {
self.reason = Some(input.into());
self
}
/// <p>The reason for the step failure. In the case where the service cannot successfully determine the root cause of the failure, it returns "Unknown Error" as a reason.</p>
pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.reason = input;
self
}
/// <p>The descriptive message including the error the Amazon EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>The descriptive message including the error the Amazon EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// <p>The path to the log file where the step failure root cause was originally recorded.</p>
pub fn log_file(mut self, input: impl Into<std::string::String>) -> Self {
self.log_file = Some(input.into());
self
}
/// <p>The path to the log file where the step failure root cause was originally recorded.</p>
pub fn set_log_file(mut self, input: std::option::Option<std::string::String>) -> Self {
self.log_file = input;
self
}
/// Consumes the builder and constructs a [`FailureDetails`](crate::model::FailureDetails)
pub fn build(self) -> crate::model::FailureDetails {
crate::model::FailureDetails {
reason: self.reason,
message: self.message,
log_file: self.log_file,
}
}
}
}
impl FailureDetails {
/// Creates a new builder-style object to manufacture [`FailureDetails`](crate::model::FailureDetails)
pub fn builder() -> crate::model::failure_details::Builder {
crate::model::failure_details::Builder::default()
}
}
/// <p>The details of the step state change reason.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepStateChangeReason {
/// <p>The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.</p>
pub code: std::option::Option<crate::model::StepStateChangeReasonCode>,
/// <p>The descriptive message for the state change reason.</p>
pub message: std::option::Option<std::string::String>,
}
impl StepStateChangeReason {
/// <p>The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.</p>
pub fn code(&self) -> std::option::Option<&crate::model::StepStateChangeReasonCode> {
self.code.as_ref()
}
/// <p>The descriptive message for the state change reason.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
}
impl std::fmt::Debug for StepStateChangeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepStateChangeReason");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`StepStateChangeReason`](crate::model::StepStateChangeReason)
pub mod step_state_change_reason {
/// A builder for [`StepStateChangeReason`](crate::model::StepStateChangeReason)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<crate::model::StepStateChangeReasonCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.</p>
pub fn code(mut self, input: crate::model::StepStateChangeReasonCode) -> Self {
self.code = Some(input);
self
}
/// <p>The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.</p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::StepStateChangeReasonCode>,
) -> Self {
self.code = input;
self
}
/// <p>The descriptive message for the state change reason.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>The descriptive message for the state change reason.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`StepStateChangeReason`](crate::model::StepStateChangeReason)
pub fn build(self) -> crate::model::StepStateChangeReason {
crate::model::StepStateChangeReason {
code: self.code,
message: self.message,
}
}
}
}
impl StepStateChangeReason {
/// Creates a new builder-style object to manufacture [`StepStateChangeReason`](crate::model::StepStateChangeReason)
pub fn builder() -> crate::model::step_state_change_reason::Builder {
crate::model::step_state_change_reason::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum StepStateChangeReasonCode {
#[allow(missing_docs)] // documentation missing in model
None,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for StepStateChangeReasonCode {
fn from(s: &str) -> Self {
match s {
"NONE" => StepStateChangeReasonCode::None,
other => StepStateChangeReasonCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for StepStateChangeReasonCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(StepStateChangeReasonCode::from(s))
}
}
impl StepStateChangeReasonCode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
StepStateChangeReasonCode::None => "NONE",
StepStateChangeReasonCode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["NONE"]
}
}
impl AsRef<str> for StepStateChangeReasonCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum StepState {
#[allow(missing_docs)] // documentation missing in model
Cancelled,
#[allow(missing_docs)] // documentation missing in model
CancelPending,
#[allow(missing_docs)] // documentation missing in model
Completed,
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Interrupted,
#[allow(missing_docs)] // documentation missing in model
Pending,
#[allow(missing_docs)] // documentation missing in model
Running,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for StepState {
fn from(s: &str) -> Self {
match s {
"CANCELLED" => StepState::Cancelled,
"CANCEL_PENDING" => StepState::CancelPending,
"COMPLETED" => StepState::Completed,
"FAILED" => StepState::Failed,
"INTERRUPTED" => StepState::Interrupted,
"PENDING" => StepState::Pending,
"RUNNING" => StepState::Running,
other => StepState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for StepState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(StepState::from(s))
}
}
impl StepState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
StepState::Cancelled => "CANCELLED",
StepState::CancelPending => "CANCEL_PENDING",
StepState::Completed => "COMPLETED",
StepState::Failed => "FAILED",
StepState::Interrupted => "INTERRUPTED",
StepState::Pending => "PENDING",
StepState::Running => "RUNNING",
StepState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CANCELLED",
"CANCEL_PENDING",
"COMPLETED",
"FAILED",
"INTERRUPTED",
"PENDING",
"RUNNING",
]
}
}
impl AsRef<str> for StepState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A cluster step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct HadoopStepConfig {
/// <p>The path to the JAR file that runs during the step.</p>
pub jar: std::option::Option<std::string::String>,
/// <p>The list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub properties:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.</p>
pub main_class: std::option::Option<std::string::String>,
/// <p>The list of command line arguments to pass to the JAR file's main function for execution.</p>
pub args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl HadoopStepConfig {
/// <p>The path to the JAR file that runs during the step.</p>
pub fn jar(&self) -> std::option::Option<&str> {
self.jar.as_deref()
}
/// <p>The list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub fn properties(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.properties.as_ref()
}
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.</p>
pub fn main_class(&self) -> std::option::Option<&str> {
self.main_class.as_deref()
}
/// <p>The list of command line arguments to pass to the JAR file's main function for execution.</p>
pub fn args(&self) -> std::option::Option<&[std::string::String]> {
self.args.as_deref()
}
}
impl std::fmt::Debug for HadoopStepConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("HadoopStepConfig");
formatter.field("jar", &self.jar);
formatter.field("properties", &self.properties);
formatter.field("main_class", &self.main_class);
formatter.field("args", &self.args);
formatter.finish()
}
}
/// See [`HadoopStepConfig`](crate::model::HadoopStepConfig)
pub mod hadoop_step_config {
/// A builder for [`HadoopStepConfig`](crate::model::HadoopStepConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) jar: std::option::Option<std::string::String>,
pub(crate) properties: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) main_class: std::option::Option<std::string::String>,
pub(crate) args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The path to the JAR file that runs during the step.</p>
pub fn jar(mut self, input: impl Into<std::string::String>) -> Self {
self.jar = Some(input.into());
self
}
/// <p>The path to the JAR file that runs during the step.</p>
pub fn set_jar(mut self, input: std::option::Option<std::string::String>) -> Self {
self.jar = input;
self
}
/// Adds a key-value pair to `properties`.
///
/// To override the contents of this collection use [`set_properties`](Self::set_properties).
///
/// <p>The list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub fn properties(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.properties.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.properties = Some(hash_map);
self
}
/// <p>The list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.</p>
pub fn set_properties(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.properties = input;
self
}
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.</p>
pub fn main_class(mut self, input: impl Into<std::string::String>) -> Self {
self.main_class = Some(input.into());
self
}
/// <p>The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.</p>
pub fn set_main_class(mut self, input: std::option::Option<std::string::String>) -> Self {
self.main_class = input;
self
}
/// Appends an item to `args`.
///
/// To override the contents of this collection use [`set_args`](Self::set_args).
///
/// <p>The list of command line arguments to pass to the JAR file's main function for execution.</p>
pub fn args(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.args.unwrap_or_default();
v.push(input.into());
self.args = Some(v);
self
}
/// <p>The list of command line arguments to pass to the JAR file's main function for execution.</p>
pub fn set_args(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.args = input;
self
}
/// Consumes the builder and constructs a [`HadoopStepConfig`](crate::model::HadoopStepConfig)
pub fn build(self) -> crate::model::HadoopStepConfig {
crate::model::HadoopStepConfig {
jar: self.jar,
properties: self.properties,
main_class: self.main_class,
args: self.args,
}
}
}
}
impl HadoopStepConfig {
/// Creates a new builder-style object to manufacture [`HadoopStepConfig`](crate::model::HadoopStepConfig)
pub fn builder() -> crate::model::hadoop_step_config::Builder {
crate::model::hadoop_step_config::Builder::default()
}
}
/// <p>The creation date and time, and name, of a security configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SecurityConfigurationSummary {
/// <p>The name of the security configuration.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The date and time the security configuration was created.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl SecurityConfigurationSummary {
/// <p>The name of the security configuration.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The date and time the security configuration was created.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
}
impl std::fmt::Debug for SecurityConfigurationSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SecurityConfigurationSummary");
formatter.field("name", &self.name);
formatter.field("creation_date_time", &self.creation_date_time);
formatter.finish()
}
}
/// See [`SecurityConfigurationSummary`](crate::model::SecurityConfigurationSummary)
pub mod security_configuration_summary {
/// A builder for [`SecurityConfigurationSummary`](crate::model::SecurityConfigurationSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The name of the security configuration.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the security configuration.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The date and time the security configuration was created.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The date and time the security configuration was created.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// Consumes the builder and constructs a [`SecurityConfigurationSummary`](crate::model::SecurityConfigurationSummary)
pub fn build(self) -> crate::model::SecurityConfigurationSummary {
crate::model::SecurityConfigurationSummary {
name: self.name,
creation_date_time: self.creation_date_time,
}
}
}
}
impl SecurityConfigurationSummary {
/// Creates a new builder-style object to manufacture [`SecurityConfigurationSummary`](crate::model::SecurityConfigurationSummary)
pub fn builder() -> crate::model::security_configuration_summary::Builder {
crate::model::security_configuration_summary::Builder::default()
}
}
/// <p>The release label filters by application or version prefix.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ReleaseLabelFilter {
/// <p>Optional release label version prefix filter. For example, <code>emr-5</code>.</p>
pub prefix: std::option::Option<std::string::String>,
/// <p>Optional release label application filter. For example, <code>[email protected]</code>.</p>
pub application: std::option::Option<std::string::String>,
}
impl ReleaseLabelFilter {
/// <p>Optional release label version prefix filter. For example, <code>emr-5</code>.</p>
pub fn prefix(&self) -> std::option::Option<&str> {
self.prefix.as_deref()
}
/// <p>Optional release label application filter. For example, <code>[email protected]</code>.</p>
pub fn application(&self) -> std::option::Option<&str> {
self.application.as_deref()
}
}
impl std::fmt::Debug for ReleaseLabelFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ReleaseLabelFilter");
formatter.field("prefix", &self.prefix);
formatter.field("application", &self.application);
formatter.finish()
}
}
/// See [`ReleaseLabelFilter`](crate::model::ReleaseLabelFilter)
pub mod release_label_filter {
/// A builder for [`ReleaseLabelFilter`](crate::model::ReleaseLabelFilter)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) prefix: std::option::Option<std::string::String>,
pub(crate) application: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Optional release label version prefix filter. For example, <code>emr-5</code>.</p>
pub fn prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.prefix = Some(input.into());
self
}
/// <p>Optional release label version prefix filter. For example, <code>emr-5</code>.</p>
pub fn set_prefix(mut self, input: std::option::Option<std::string::String>) -> Self {
self.prefix = input;
self
}
/// <p>Optional release label application filter. For example, <code>[email protected]</code>.</p>
pub fn application(mut self, input: impl Into<std::string::String>) -> Self {
self.application = Some(input.into());
self
}
/// <p>Optional release label application filter. For example, <code>[email protected]</code>.</p>
pub fn set_application(mut self, input: std::option::Option<std::string::String>) -> Self {
self.application = input;
self
}
/// Consumes the builder and constructs a [`ReleaseLabelFilter`](crate::model::ReleaseLabelFilter)
pub fn build(self) -> crate::model::ReleaseLabelFilter {
crate::model::ReleaseLabelFilter {
prefix: self.prefix,
application: self.application,
}
}
}
}
impl ReleaseLabelFilter {
/// Creates a new builder-style object to manufacture [`ReleaseLabelFilter`](crate::model::ReleaseLabelFilter)
pub fn builder() -> crate::model::release_label_filter::Builder {
crate::model::release_label_filter::Builder::default()
}
}
/// <p>Details for a notebook execution. The details include information such as the unique ID and status of the notebook execution.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NotebookExecutionSummary {
/// <p>The unique identifier of the notebook execution.</p>
pub notebook_execution_id: std::option::Option<std::string::String>,
/// <p>The unique identifier of the editor associated with the notebook execution.</p>
pub editor_id: std::option::Option<std::string::String>,
/// <p>The name of the notebook execution.</p>
pub notebook_execution_name: std::option::Option<std::string::String>,
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub status: std::option::Option<crate::model::NotebookExecutionStatus>,
/// <p>The timestamp when notebook execution started.</p>
pub start_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The timestamp when notebook execution started.</p>
pub end_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl NotebookExecutionSummary {
/// <p>The unique identifier of the notebook execution.</p>
pub fn notebook_execution_id(&self) -> std::option::Option<&str> {
self.notebook_execution_id.as_deref()
}
/// <p>The unique identifier of the editor associated with the notebook execution.</p>
pub fn editor_id(&self) -> std::option::Option<&str> {
self.editor_id.as_deref()
}
/// <p>The name of the notebook execution.</p>
pub fn notebook_execution_name(&self) -> std::option::Option<&str> {
self.notebook_execution_name.as_deref()
}
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub fn status(&self) -> std::option::Option<&crate::model::NotebookExecutionStatus> {
self.status.as_ref()
}
/// <p>The timestamp when notebook execution started.</p>
pub fn start_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_time.as_ref()
}
/// <p>The timestamp when notebook execution started.</p>
pub fn end_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_time.as_ref()
}
}
impl std::fmt::Debug for NotebookExecutionSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NotebookExecutionSummary");
formatter.field("notebook_execution_id", &self.notebook_execution_id);
formatter.field("editor_id", &self.editor_id);
formatter.field("notebook_execution_name", &self.notebook_execution_name);
formatter.field("status", &self.status);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.finish()
}
}
/// See [`NotebookExecutionSummary`](crate::model::NotebookExecutionSummary)
pub mod notebook_execution_summary {
/// A builder for [`NotebookExecutionSummary`](crate::model::NotebookExecutionSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) notebook_execution_id: std::option::Option<std::string::String>,
pub(crate) editor_id: std::option::Option<std::string::String>,
pub(crate) notebook_execution_name: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::NotebookExecutionStatus>,
pub(crate) start_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The unique identifier of the notebook execution.</p>
pub fn notebook_execution_id(mut self, input: impl Into<std::string::String>) -> Self {
self.notebook_execution_id = Some(input.into());
self
}
/// <p>The unique identifier of the notebook execution.</p>
pub fn set_notebook_execution_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notebook_execution_id = input;
self
}
/// <p>The unique identifier of the editor associated with the notebook execution.</p>
pub fn editor_id(mut self, input: impl Into<std::string::String>) -> Self {
self.editor_id = Some(input.into());
self
}
/// <p>The unique identifier of the editor associated with the notebook execution.</p>
pub fn set_editor_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.editor_id = input;
self
}
/// <p>The name of the notebook execution.</p>
pub fn notebook_execution_name(mut self, input: impl Into<std::string::String>) -> Self {
self.notebook_execution_name = Some(input.into());
self
}
/// <p>The name of the notebook execution.</p>
pub fn set_notebook_execution_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notebook_execution_name = input;
self
}
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub fn status(mut self, input: crate::model::NotebookExecutionStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::NotebookExecutionStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The timestamp when notebook execution started.</p>
pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_time = Some(input);
self
}
/// <p>The timestamp when notebook execution started.</p>
pub fn set_start_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_time = input;
self
}
/// <p>The timestamp when notebook execution started.</p>
pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_time = Some(input);
self
}
/// <p>The timestamp when notebook execution started.</p>
pub fn set_end_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_time = input;
self
}
/// Consumes the builder and constructs a [`NotebookExecutionSummary`](crate::model::NotebookExecutionSummary)
pub fn build(self) -> crate::model::NotebookExecutionSummary {
crate::model::NotebookExecutionSummary {
notebook_execution_id: self.notebook_execution_id,
editor_id: self.editor_id,
notebook_execution_name: self.notebook_execution_name,
status: self.status,
start_time: self.start_time,
end_time: self.end_time,
}
}
}
}
impl NotebookExecutionSummary {
/// Creates a new builder-style object to manufacture [`NotebookExecutionSummary`](crate::model::NotebookExecutionSummary)
pub fn builder() -> crate::model::notebook_execution_summary::Builder {
crate::model::notebook_execution_summary::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum NotebookExecutionStatus {
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Failing,
#[allow(missing_docs)] // documentation missing in model
Finished,
#[allow(missing_docs)] // documentation missing in model
Finishing,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
Starting,
#[allow(missing_docs)] // documentation missing in model
StartPending,
#[allow(missing_docs)] // documentation missing in model
Stopped,
#[allow(missing_docs)] // documentation missing in model
Stopping,
#[allow(missing_docs)] // documentation missing in model
StopPending,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for NotebookExecutionStatus {
fn from(s: &str) -> Self {
match s {
"FAILED" => NotebookExecutionStatus::Failed,
"FAILING" => NotebookExecutionStatus::Failing,
"FINISHED" => NotebookExecutionStatus::Finished,
"FINISHING" => NotebookExecutionStatus::Finishing,
"RUNNING" => NotebookExecutionStatus::Running,
"STARTING" => NotebookExecutionStatus::Starting,
"START_PENDING" => NotebookExecutionStatus::StartPending,
"STOPPED" => NotebookExecutionStatus::Stopped,
"STOPPING" => NotebookExecutionStatus::Stopping,
"STOP_PENDING" => NotebookExecutionStatus::StopPending,
other => NotebookExecutionStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for NotebookExecutionStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(NotebookExecutionStatus::from(s))
}
}
impl NotebookExecutionStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
NotebookExecutionStatus::Failed => "FAILED",
NotebookExecutionStatus::Failing => "FAILING",
NotebookExecutionStatus::Finished => "FINISHED",
NotebookExecutionStatus::Finishing => "FINISHING",
NotebookExecutionStatus::Running => "RUNNING",
NotebookExecutionStatus::Starting => "STARTING",
NotebookExecutionStatus::StartPending => "START_PENDING",
NotebookExecutionStatus::Stopped => "STOPPED",
NotebookExecutionStatus::Stopping => "STOPPING",
NotebookExecutionStatus::StopPending => "STOP_PENDING",
NotebookExecutionStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"FAILED",
"FAILING",
"FINISHED",
"FINISHING",
"RUNNING",
"STARTING",
"START_PENDING",
"STOPPED",
"STOPPING",
"STOP_PENDING",
]
}
}
impl AsRef<str> for NotebookExecutionStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Represents an EC2 instance provisioned as part of cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Instance {
/// <p>The unique identifier for the instance in Amazon EMR.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The unique identifier of the instance in Amazon EC2.</p>
pub ec2_instance_id: std::option::Option<std::string::String>,
/// <p>The public DNS name of the instance.</p>
pub public_dns_name: std::option::Option<std::string::String>,
/// <p>The public IP address of the instance.</p>
pub public_ip_address: std::option::Option<std::string::String>,
/// <p>The private DNS name of the instance.</p>
pub private_dns_name: std::option::Option<std::string::String>,
/// <p>The private IP address of the instance.</p>
pub private_ip_address: std::option::Option<std::string::String>,
/// <p>The current status of the instance.</p>
pub status: std::option::Option<crate::model::InstanceStatus>,
/// <p>The identifier of the instance group to which this instance belongs.</p>
pub instance_group_id: std::option::Option<std::string::String>,
/// <p>The unique identifier of the instance fleet to which an EC2 instance belongs.</p>
pub instance_fleet_id: std::option::Option<std::string::String>,
/// <p>The instance purchasing option. Valid values are <code>ON_DEMAND</code> or <code>SPOT</code>. </p>
pub market: std::option::Option<crate::model::MarketType>,
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The list of Amazon EBS volumes that are attached to this instance.</p>
pub ebs_volumes: std::option::Option<std::vec::Vec<crate::model::EbsVolume>>,
}
impl Instance {
/// <p>The unique identifier for the instance in Amazon EMR.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The unique identifier of the instance in Amazon EC2.</p>
pub fn ec2_instance_id(&self) -> std::option::Option<&str> {
self.ec2_instance_id.as_deref()
}
/// <p>The public DNS name of the instance.</p>
pub fn public_dns_name(&self) -> std::option::Option<&str> {
self.public_dns_name.as_deref()
}
/// <p>The public IP address of the instance.</p>
pub fn public_ip_address(&self) -> std::option::Option<&str> {
self.public_ip_address.as_deref()
}
/// <p>The private DNS name of the instance.</p>
pub fn private_dns_name(&self) -> std::option::Option<&str> {
self.private_dns_name.as_deref()
}
/// <p>The private IP address of the instance.</p>
pub fn private_ip_address(&self) -> std::option::Option<&str> {
self.private_ip_address.as_deref()
}
/// <p>The current status of the instance.</p>
pub fn status(&self) -> std::option::Option<&crate::model::InstanceStatus> {
self.status.as_ref()
}
/// <p>The identifier of the instance group to which this instance belongs.</p>
pub fn instance_group_id(&self) -> std::option::Option<&str> {
self.instance_group_id.as_deref()
}
/// <p>The unique identifier of the instance fleet to which an EC2 instance belongs.</p>
pub fn instance_fleet_id(&self) -> std::option::Option<&str> {
self.instance_fleet_id.as_deref()
}
/// <p>The instance purchasing option. Valid values are <code>ON_DEMAND</code> or <code>SPOT</code>. </p>
pub fn market(&self) -> std::option::Option<&crate::model::MarketType> {
self.market.as_ref()
}
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The list of Amazon EBS volumes that are attached to this instance.</p>
pub fn ebs_volumes(&self) -> std::option::Option<&[crate::model::EbsVolume]> {
self.ebs_volumes.as_deref()
}
}
impl std::fmt::Debug for Instance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Instance");
formatter.field("id", &self.id);
formatter.field("ec2_instance_id", &self.ec2_instance_id);
formatter.field("public_dns_name", &self.public_dns_name);
formatter.field("public_ip_address", &self.public_ip_address);
formatter.field("private_dns_name", &self.private_dns_name);
formatter.field("private_ip_address", &self.private_ip_address);
formatter.field("status", &self.status);
formatter.field("instance_group_id", &self.instance_group_id);
formatter.field("instance_fleet_id", &self.instance_fleet_id);
formatter.field("market", &self.market);
formatter.field("instance_type", &self.instance_type);
formatter.field("ebs_volumes", &self.ebs_volumes);
formatter.finish()
}
}
/// See [`Instance`](crate::model::Instance)
pub mod instance {
/// A builder for [`Instance`](crate::model::Instance)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) ec2_instance_id: std::option::Option<std::string::String>,
pub(crate) public_dns_name: std::option::Option<std::string::String>,
pub(crate) public_ip_address: std::option::Option<std::string::String>,
pub(crate) private_dns_name: std::option::Option<std::string::String>,
pub(crate) private_ip_address: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::InstanceStatus>,
pub(crate) instance_group_id: std::option::Option<std::string::String>,
pub(crate) instance_fleet_id: std::option::Option<std::string::String>,
pub(crate) market: std::option::Option<crate::model::MarketType>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) ebs_volumes: std::option::Option<std::vec::Vec<crate::model::EbsVolume>>,
}
impl Builder {
/// <p>The unique identifier for the instance in Amazon EMR.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The unique identifier for the instance in Amazon EMR.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The unique identifier of the instance in Amazon EC2.</p>
pub fn ec2_instance_id(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_instance_id = Some(input.into());
self
}
/// <p>The unique identifier of the instance in Amazon EC2.</p>
pub fn set_ec2_instance_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ec2_instance_id = input;
self
}
/// <p>The public DNS name of the instance.</p>
pub fn public_dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.public_dns_name = Some(input.into());
self
}
/// <p>The public DNS name of the instance.</p>
pub fn set_public_dns_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.public_dns_name = input;
self
}
/// <p>The public IP address of the instance.</p>
pub fn public_ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.public_ip_address = Some(input.into());
self
}
/// <p>The public IP address of the instance.</p>
pub fn set_public_ip_address(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.public_ip_address = input;
self
}
/// <p>The private DNS name of the instance.</p>
pub fn private_dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.private_dns_name = Some(input.into());
self
}
/// <p>The private DNS name of the instance.</p>
pub fn set_private_dns_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.private_dns_name = input;
self
}
/// <p>The private IP address of the instance.</p>
pub fn private_ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.private_ip_address = Some(input.into());
self
}
/// <p>The private IP address of the instance.</p>
pub fn set_private_ip_address(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.private_ip_address = input;
self
}
/// <p>The current status of the instance.</p>
pub fn status(mut self, input: crate::model::InstanceStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status of the instance.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::InstanceStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The identifier of the instance group to which this instance belongs.</p>
pub fn instance_group_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_group_id = Some(input.into());
self
}
/// <p>The identifier of the instance group to which this instance belongs.</p>
pub fn set_instance_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_group_id = input;
self
}
/// <p>The unique identifier of the instance fleet to which an EC2 instance belongs.</p>
pub fn instance_fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_fleet_id = Some(input.into());
self
}
/// <p>The unique identifier of the instance fleet to which an EC2 instance belongs.</p>
pub fn set_instance_fleet_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_fleet_id = input;
self
}
/// <p>The instance purchasing option. Valid values are <code>ON_DEMAND</code> or <code>SPOT</code>. </p>
pub fn market(mut self, input: crate::model::MarketType) -> Self {
self.market = Some(input);
self
}
/// <p>The instance purchasing option. Valid values are <code>ON_DEMAND</code> or <code>SPOT</code>. </p>
pub fn set_market(mut self, input: std::option::Option<crate::model::MarketType>) -> Self {
self.market = input;
self
}
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// Appends an item to `ebs_volumes`.
///
/// To override the contents of this collection use [`set_ebs_volumes`](Self::set_ebs_volumes).
///
/// <p>The list of Amazon EBS volumes that are attached to this instance.</p>
pub fn ebs_volumes(mut self, input: crate::model::EbsVolume) -> Self {
let mut v = self.ebs_volumes.unwrap_or_default();
v.push(input);
self.ebs_volumes = Some(v);
self
}
/// <p>The list of Amazon EBS volumes that are attached to this instance.</p>
pub fn set_ebs_volumes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EbsVolume>>,
) -> Self {
self.ebs_volumes = input;
self
}
/// Consumes the builder and constructs a [`Instance`](crate::model::Instance)
pub fn build(self) -> crate::model::Instance {
crate::model::Instance {
id: self.id,
ec2_instance_id: self.ec2_instance_id,
public_dns_name: self.public_dns_name,
public_ip_address: self.public_ip_address,
private_dns_name: self.private_dns_name,
private_ip_address: self.private_ip_address,
status: self.status,
instance_group_id: self.instance_group_id,
instance_fleet_id: self.instance_fleet_id,
market: self.market,
instance_type: self.instance_type,
ebs_volumes: self.ebs_volumes,
}
}
}
}
impl Instance {
/// Creates a new builder-style object to manufacture [`Instance`](crate::model::Instance)
pub fn builder() -> crate::model::instance::Builder {
crate::model::instance::Builder::default()
}
}
/// <p>EBS block device that's attached to an EC2 instance.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EbsVolume {
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub device: std::option::Option<std::string::String>,
/// <p>The volume identifier of the EBS volume.</p>
pub volume_id: std::option::Option<std::string::String>,
}
impl EbsVolume {
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub fn device(&self) -> std::option::Option<&str> {
self.device.as_deref()
}
/// <p>The volume identifier of the EBS volume.</p>
pub fn volume_id(&self) -> std::option::Option<&str> {
self.volume_id.as_deref()
}
}
impl std::fmt::Debug for EbsVolume {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EbsVolume");
formatter.field("device", &self.device);
formatter.field("volume_id", &self.volume_id);
formatter.finish()
}
}
/// See [`EbsVolume`](crate::model::EbsVolume)
pub mod ebs_volume {
/// A builder for [`EbsVolume`](crate::model::EbsVolume)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) device: std::option::Option<std::string::String>,
pub(crate) volume_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub fn device(mut self, input: impl Into<std::string::String>) -> Self {
self.device = Some(input.into());
self
}
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub fn set_device(mut self, input: std::option::Option<std::string::String>) -> Self {
self.device = input;
self
}
/// <p>The volume identifier of the EBS volume.</p>
pub fn volume_id(mut self, input: impl Into<std::string::String>) -> Self {
self.volume_id = Some(input.into());
self
}
/// <p>The volume identifier of the EBS volume.</p>
pub fn set_volume_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.volume_id = input;
self
}
/// Consumes the builder and constructs a [`EbsVolume`](crate::model::EbsVolume)
pub fn build(self) -> crate::model::EbsVolume {
crate::model::EbsVolume {
device: self.device,
volume_id: self.volume_id,
}
}
}
}
impl EbsVolume {
/// Creates a new builder-style object to manufacture [`EbsVolume`](crate::model::EbsVolume)
pub fn builder() -> crate::model::ebs_volume::Builder {
crate::model::ebs_volume::Builder::default()
}
}
/// <p>The instance status details.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceStatus {
/// <p>The current state of the instance.</p>
pub state: std::option::Option<crate::model::InstanceState>,
/// <p>The details of the status change reason for the instance.</p>
pub state_change_reason: std::option::Option<crate::model::InstanceStateChangeReason>,
/// <p>The timeline of the instance status over time.</p>
pub timeline: std::option::Option<crate::model::InstanceTimeline>,
}
impl InstanceStatus {
/// <p>The current state of the instance.</p>
pub fn state(&self) -> std::option::Option<&crate::model::InstanceState> {
self.state.as_ref()
}
/// <p>The details of the status change reason for the instance.</p>
pub fn state_change_reason(
&self,
) -> std::option::Option<&crate::model::InstanceStateChangeReason> {
self.state_change_reason.as_ref()
}
/// <p>The timeline of the instance status over time.</p>
pub fn timeline(&self) -> std::option::Option<&crate::model::InstanceTimeline> {
self.timeline.as_ref()
}
}
impl std::fmt::Debug for InstanceStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceStatus");
formatter.field("state", &self.state);
formatter.field("state_change_reason", &self.state_change_reason);
formatter.field("timeline", &self.timeline);
formatter.finish()
}
}
/// See [`InstanceStatus`](crate::model::InstanceStatus)
pub mod instance_status {
/// A builder for [`InstanceStatus`](crate::model::InstanceStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::InstanceState>,
pub(crate) state_change_reason:
std::option::Option<crate::model::InstanceStateChangeReason>,
pub(crate) timeline: std::option::Option<crate::model::InstanceTimeline>,
}
impl Builder {
/// <p>The current state of the instance.</p>
pub fn state(mut self, input: crate::model::InstanceState) -> Self {
self.state = Some(input);
self
}
/// <p>The current state of the instance.</p>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::InstanceState>,
) -> Self {
self.state = input;
self
}
/// <p>The details of the status change reason for the instance.</p>
pub fn state_change_reason(
mut self,
input: crate::model::InstanceStateChangeReason,
) -> Self {
self.state_change_reason = Some(input);
self
}
/// <p>The details of the status change reason for the instance.</p>
pub fn set_state_change_reason(
mut self,
input: std::option::Option<crate::model::InstanceStateChangeReason>,
) -> Self {
self.state_change_reason = input;
self
}
/// <p>The timeline of the instance status over time.</p>
pub fn timeline(mut self, input: crate::model::InstanceTimeline) -> Self {
self.timeline = Some(input);
self
}
/// <p>The timeline of the instance status over time.</p>
pub fn set_timeline(
mut self,
input: std::option::Option<crate::model::InstanceTimeline>,
) -> Self {
self.timeline = input;
self
}
/// Consumes the builder and constructs a [`InstanceStatus`](crate::model::InstanceStatus)
pub fn build(self) -> crate::model::InstanceStatus {
crate::model::InstanceStatus {
state: self.state,
state_change_reason: self.state_change_reason,
timeline: self.timeline,
}
}
}
}
impl InstanceStatus {
/// Creates a new builder-style object to manufacture [`InstanceStatus`](crate::model::InstanceStatus)
pub fn builder() -> crate::model::instance_status::Builder {
crate::model::instance_status::Builder::default()
}
}
/// <p>The timeline of the instance lifecycle.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceTimeline {
/// <p>The creation date and time of the instance.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the instance was ready to perform tasks.</p>
pub ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the instance was terminated.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl InstanceTimeline {
/// <p>The creation date and time of the instance.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The date and time when the instance was ready to perform tasks.</p>
pub fn ready_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.ready_date_time.as_ref()
}
/// <p>The date and time when the instance was terminated.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
}
impl std::fmt::Debug for InstanceTimeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceTimeline");
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("ready_date_time", &self.ready_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.finish()
}
}
/// See [`InstanceTimeline`](crate::model::InstanceTimeline)
pub mod instance_timeline {
/// A builder for [`InstanceTimeline`](crate::model::InstanceTimeline)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The creation date and time of the instance.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The creation date and time of the instance.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The date and time when the instance was ready to perform tasks.</p>
pub fn ready_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.ready_date_time = Some(input);
self
}
/// <p>The date and time when the instance was ready to perform tasks.</p>
pub fn set_ready_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.ready_date_time = input;
self
}
/// <p>The date and time when the instance was terminated.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The date and time when the instance was terminated.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// Consumes the builder and constructs a [`InstanceTimeline`](crate::model::InstanceTimeline)
pub fn build(self) -> crate::model::InstanceTimeline {
crate::model::InstanceTimeline {
creation_date_time: self.creation_date_time,
ready_date_time: self.ready_date_time,
end_date_time: self.end_date_time,
}
}
}
}
impl InstanceTimeline {
/// Creates a new builder-style object to manufacture [`InstanceTimeline`](crate::model::InstanceTimeline)
pub fn builder() -> crate::model::instance_timeline::Builder {
crate::model::instance_timeline::Builder::default()
}
}
/// <p>The details of the status change reason for the instance.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceStateChangeReason {
/// <p>The programmable code for the state change reason.</p>
pub code: std::option::Option<crate::model::InstanceStateChangeReasonCode>,
/// <p>The status change reason description.</p>
pub message: std::option::Option<std::string::String>,
}
impl InstanceStateChangeReason {
/// <p>The programmable code for the state change reason.</p>
pub fn code(&self) -> std::option::Option<&crate::model::InstanceStateChangeReasonCode> {
self.code.as_ref()
}
/// <p>The status change reason description.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
}
impl std::fmt::Debug for InstanceStateChangeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceStateChangeReason");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`InstanceStateChangeReason`](crate::model::InstanceStateChangeReason)
pub mod instance_state_change_reason {
/// A builder for [`InstanceStateChangeReason`](crate::model::InstanceStateChangeReason)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<crate::model::InstanceStateChangeReasonCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The programmable code for the state change reason.</p>
pub fn code(mut self, input: crate::model::InstanceStateChangeReasonCode) -> Self {
self.code = Some(input);
self
}
/// <p>The programmable code for the state change reason.</p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::InstanceStateChangeReasonCode>,
) -> Self {
self.code = input;
self
}
/// <p>The status change reason description.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>The status change reason description.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`InstanceStateChangeReason`](crate::model::InstanceStateChangeReason)
pub fn build(self) -> crate::model::InstanceStateChangeReason {
crate::model::InstanceStateChangeReason {
code: self.code,
message: self.message,
}
}
}
}
impl InstanceStateChangeReason {
/// Creates a new builder-style object to manufacture [`InstanceStateChangeReason`](crate::model::InstanceStateChangeReason)
pub fn builder() -> crate::model::instance_state_change_reason::Builder {
crate::model::instance_state_change_reason::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceStateChangeReasonCode {
#[allow(missing_docs)] // documentation missing in model
BootstrapFailure,
#[allow(missing_docs)] // documentation missing in model
ClusterTerminated,
#[allow(missing_docs)] // documentation missing in model
InstanceFailure,
#[allow(missing_docs)] // documentation missing in model
InternalError,
#[allow(missing_docs)] // documentation missing in model
ValidationError,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceStateChangeReasonCode {
fn from(s: &str) -> Self {
match s {
"BOOTSTRAP_FAILURE" => InstanceStateChangeReasonCode::BootstrapFailure,
"CLUSTER_TERMINATED" => InstanceStateChangeReasonCode::ClusterTerminated,
"INSTANCE_FAILURE" => InstanceStateChangeReasonCode::InstanceFailure,
"INTERNAL_ERROR" => InstanceStateChangeReasonCode::InternalError,
"VALIDATION_ERROR" => InstanceStateChangeReasonCode::ValidationError,
other => InstanceStateChangeReasonCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceStateChangeReasonCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceStateChangeReasonCode::from(s))
}
}
impl InstanceStateChangeReasonCode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceStateChangeReasonCode::BootstrapFailure => "BOOTSTRAP_FAILURE",
InstanceStateChangeReasonCode::ClusterTerminated => "CLUSTER_TERMINATED",
InstanceStateChangeReasonCode::InstanceFailure => "INSTANCE_FAILURE",
InstanceStateChangeReasonCode::InternalError => "INTERNAL_ERROR",
InstanceStateChangeReasonCode::ValidationError => "VALIDATION_ERROR",
InstanceStateChangeReasonCode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"BOOTSTRAP_FAILURE",
"CLUSTER_TERMINATED",
"INSTANCE_FAILURE",
"INTERNAL_ERROR",
"VALIDATION_ERROR",
]
}
}
impl AsRef<str> for InstanceStateChangeReasonCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceState {
#[allow(missing_docs)] // documentation missing in model
AwaitingFulfillment,
#[allow(missing_docs)] // documentation missing in model
Bootstrapping,
#[allow(missing_docs)] // documentation missing in model
Provisioning,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
Terminated,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceState {
fn from(s: &str) -> Self {
match s {
"AWAITING_FULFILLMENT" => InstanceState::AwaitingFulfillment,
"BOOTSTRAPPING" => InstanceState::Bootstrapping,
"PROVISIONING" => InstanceState::Provisioning,
"RUNNING" => InstanceState::Running,
"TERMINATED" => InstanceState::Terminated,
other => InstanceState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceState::from(s))
}
}
impl InstanceState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceState::AwaitingFulfillment => "AWAITING_FULFILLMENT",
InstanceState::Bootstrapping => "BOOTSTRAPPING",
InstanceState::Provisioning => "PROVISIONING",
InstanceState::Running => "RUNNING",
InstanceState::Terminated => "TERMINATED",
InstanceState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"AWAITING_FULFILLMENT",
"BOOTSTRAPPING",
"PROVISIONING",
"RUNNING",
"TERMINATED",
]
}
}
impl AsRef<str> for InstanceState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceGroupType {
#[allow(missing_docs)] // documentation missing in model
Core,
#[allow(missing_docs)] // documentation missing in model
Master,
#[allow(missing_docs)] // documentation missing in model
Task,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceGroupType {
fn from(s: &str) -> Self {
match s {
"CORE" => InstanceGroupType::Core,
"MASTER" => InstanceGroupType::Master,
"TASK" => InstanceGroupType::Task,
other => InstanceGroupType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceGroupType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceGroupType::from(s))
}
}
impl InstanceGroupType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceGroupType::Core => "CORE",
InstanceGroupType::Master => "MASTER",
InstanceGroupType::Task => "TASK",
InstanceGroupType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CORE", "MASTER", "TASK"]
}
}
impl AsRef<str> for InstanceGroupType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>This entity represents an instance group, which is a group of instances that have common purpose. For example, CORE instance group is used for HDFS.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroup {
/// <p>The identifier of the instance group.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The name of the instance group.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.</p>
pub market: std::option::Option<crate::model::MarketType>,
/// <p>The type of the instance group. Valid values are MASTER, CORE or TASK.</p>
pub instance_group_type: std::option::Option<crate::model::InstanceGroupType>,
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub bid_price: std::option::Option<std::string::String>,
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The target number of instances for the instance group.</p>
pub requested_instance_count: std::option::Option<i32>,
/// <p>The number of instances currently running in this instance group.</p>
pub running_instance_count: std::option::Option<i32>,
/// <p>The current status of the instance group.</p>
pub status: std::option::Option<crate::model::InstanceGroupStatus>,
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>The version number of the requested configuration specification for this instance group.</p>
pub configurations_version: i64,
/// <p>A list of configurations that were successfully applied for an instance group last time.</p>
pub last_successfully_applied_configurations:
std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>The version number of a configuration specification that was successfully applied for an instance group last time. </p>
pub last_successfully_applied_configurations_version: i64,
/// <p>The EBS block devices that are mapped to this instance group.</p>
pub ebs_block_devices: std::option::Option<std::vec::Vec<crate::model::EbsBlockDevice>>,
/// <p>If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.</p>
pub ebs_optimized: std::option::Option<bool>,
/// <p>Policy for customizing shrink operations.</p>
pub shrink_policy: std::option::Option<crate::model::ShrinkPolicy>,
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.</p>
pub auto_scaling_policy: std::option::Option<crate::model::AutoScalingPolicyDescription>,
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub custom_ami_id: std::option::Option<std::string::String>,
}
impl InstanceGroup {
/// <p>The identifier of the instance group.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The name of the instance group.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.</p>
pub fn market(&self) -> std::option::Option<&crate::model::MarketType> {
self.market.as_ref()
}
/// <p>The type of the instance group. Valid values are MASTER, CORE or TASK.</p>
pub fn instance_group_type(&self) -> std::option::Option<&crate::model::InstanceGroupType> {
self.instance_group_type.as_ref()
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn bid_price(&self) -> std::option::Option<&str> {
self.bid_price.as_deref()
}
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The target number of instances for the instance group.</p>
pub fn requested_instance_count(&self) -> std::option::Option<i32> {
self.requested_instance_count
}
/// <p>The number of instances currently running in this instance group.</p>
pub fn running_instance_count(&self) -> std::option::Option<i32> {
self.running_instance_count
}
/// <p>The current status of the instance group.</p>
pub fn status(&self) -> std::option::Option<&crate::model::InstanceGroupStatus> {
self.status.as_ref()
}
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>The version number of the requested configuration specification for this instance group.</p>
pub fn configurations_version(&self) -> i64 {
self.configurations_version
}
/// <p>A list of configurations that were successfully applied for an instance group last time.</p>
pub fn last_successfully_applied_configurations(
&self,
) -> std::option::Option<&[crate::model::Configuration]> {
self.last_successfully_applied_configurations.as_deref()
}
/// <p>The version number of a configuration specification that was successfully applied for an instance group last time. </p>
pub fn last_successfully_applied_configurations_version(&self) -> i64 {
self.last_successfully_applied_configurations_version
}
/// <p>The EBS block devices that are mapped to this instance group.</p>
pub fn ebs_block_devices(&self) -> std::option::Option<&[crate::model::EbsBlockDevice]> {
self.ebs_block_devices.as_deref()
}
/// <p>If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.</p>
pub fn ebs_optimized(&self) -> std::option::Option<bool> {
self.ebs_optimized
}
/// <p>Policy for customizing shrink operations.</p>
pub fn shrink_policy(&self) -> std::option::Option<&crate::model::ShrinkPolicy> {
self.shrink_policy.as_ref()
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.</p>
pub fn auto_scaling_policy(
&self,
) -> std::option::Option<&crate::model::AutoScalingPolicyDescription> {
self.auto_scaling_policy.as_ref()
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn custom_ami_id(&self) -> std::option::Option<&str> {
self.custom_ami_id.as_deref()
}
}
impl std::fmt::Debug for InstanceGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroup");
formatter.field("id", &self.id);
formatter.field("name", &self.name);
formatter.field("market", &self.market);
formatter.field("instance_group_type", &self.instance_group_type);
formatter.field("bid_price", &self.bid_price);
formatter.field("instance_type", &self.instance_type);
formatter.field("requested_instance_count", &self.requested_instance_count);
formatter.field("running_instance_count", &self.running_instance_count);
formatter.field("status", &self.status);
formatter.field("configurations", &self.configurations);
formatter.field("configurations_version", &self.configurations_version);
formatter.field(
"last_successfully_applied_configurations",
&self.last_successfully_applied_configurations,
);
formatter.field(
"last_successfully_applied_configurations_version",
&self.last_successfully_applied_configurations_version,
);
formatter.field("ebs_block_devices", &self.ebs_block_devices);
formatter.field("ebs_optimized", &self.ebs_optimized);
formatter.field("shrink_policy", &self.shrink_policy);
formatter.field("auto_scaling_policy", &self.auto_scaling_policy);
formatter.field("custom_ami_id", &self.custom_ami_id);
formatter.finish()
}
}
/// See [`InstanceGroup`](crate::model::InstanceGroup)
pub mod instance_group {
/// A builder for [`InstanceGroup`](crate::model::InstanceGroup)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) market: std::option::Option<crate::model::MarketType>,
pub(crate) instance_group_type: std::option::Option<crate::model::InstanceGroupType>,
pub(crate) bid_price: std::option::Option<std::string::String>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) requested_instance_count: std::option::Option<i32>,
pub(crate) running_instance_count: std::option::Option<i32>,
pub(crate) status: std::option::Option<crate::model::InstanceGroupStatus>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) configurations_version: std::option::Option<i64>,
pub(crate) last_successfully_applied_configurations:
std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) last_successfully_applied_configurations_version: std::option::Option<i64>,
pub(crate) ebs_block_devices:
std::option::Option<std::vec::Vec<crate::model::EbsBlockDevice>>,
pub(crate) ebs_optimized: std::option::Option<bool>,
pub(crate) shrink_policy: std::option::Option<crate::model::ShrinkPolicy>,
pub(crate) auto_scaling_policy:
std::option::Option<crate::model::AutoScalingPolicyDescription>,
pub(crate) custom_ami_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The identifier of the instance group.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The identifier of the instance group.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The name of the instance group.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the instance group.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.</p>
pub fn market(mut self, input: crate::model::MarketType) -> Self {
self.market = Some(input);
self
}
/// <p>The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.</p>
pub fn set_market(mut self, input: std::option::Option<crate::model::MarketType>) -> Self {
self.market = input;
self
}
/// <p>The type of the instance group. Valid values are MASTER, CORE or TASK.</p>
pub fn instance_group_type(mut self, input: crate::model::InstanceGroupType) -> Self {
self.instance_group_type = Some(input);
self
}
/// <p>The type of the instance group. Valid values are MASTER, CORE or TASK.</p>
pub fn set_instance_group_type(
mut self,
input: std::option::Option<crate::model::InstanceGroupType>,
) -> Self {
self.instance_group_type = input;
self
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn bid_price(mut self, input: impl Into<std::string::String>) -> Self {
self.bid_price = Some(input.into());
self
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn set_bid_price(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bid_price = input;
self
}
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The EC2 instance type for all instances in the instance group.</p>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// <p>The target number of instances for the instance group.</p>
pub fn requested_instance_count(mut self, input: i32) -> Self {
self.requested_instance_count = Some(input);
self
}
/// <p>The target number of instances for the instance group.</p>
pub fn set_requested_instance_count(mut self, input: std::option::Option<i32>) -> Self {
self.requested_instance_count = input;
self
}
/// <p>The number of instances currently running in this instance group.</p>
pub fn running_instance_count(mut self, input: i32) -> Self {
self.running_instance_count = Some(input);
self
}
/// <p>The number of instances currently running in this instance group.</p>
pub fn set_running_instance_count(mut self, input: std::option::Option<i32>) -> Self {
self.running_instance_count = input;
self
}
/// <p>The current status of the instance group.</p>
pub fn status(mut self, input: crate::model::InstanceGroupStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status of the instance group.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::InstanceGroupStatus>,
) -> Self {
self.status = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <note>
/// <p>Amazon EMR releases 4.x or later.</p>
/// </note>
/// <p>The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// <p>The version number of the requested configuration specification for this instance group.</p>
pub fn configurations_version(mut self, input: i64) -> Self {
self.configurations_version = Some(input);
self
}
/// <p>The version number of the requested configuration specification for this instance group.</p>
pub fn set_configurations_version(mut self, input: std::option::Option<i64>) -> Self {
self.configurations_version = input;
self
}
/// Appends an item to `last_successfully_applied_configurations`.
///
/// To override the contents of this collection use [`set_last_successfully_applied_configurations`](Self::set_last_successfully_applied_configurations).
///
/// <p>A list of configurations that were successfully applied for an instance group last time.</p>
pub fn last_successfully_applied_configurations(
mut self,
input: crate::model::Configuration,
) -> Self {
let mut v = self
.last_successfully_applied_configurations
.unwrap_or_default();
v.push(input);
self.last_successfully_applied_configurations = Some(v);
self
}
/// <p>A list of configurations that were successfully applied for an instance group last time.</p>
pub fn set_last_successfully_applied_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.last_successfully_applied_configurations = input;
self
}
/// <p>The version number of a configuration specification that was successfully applied for an instance group last time. </p>
pub fn last_successfully_applied_configurations_version(mut self, input: i64) -> Self {
self.last_successfully_applied_configurations_version = Some(input);
self
}
/// <p>The version number of a configuration specification that was successfully applied for an instance group last time. </p>
pub fn set_last_successfully_applied_configurations_version(
mut self,
input: std::option::Option<i64>,
) -> Self {
self.last_successfully_applied_configurations_version = input;
self
}
/// Appends an item to `ebs_block_devices`.
///
/// To override the contents of this collection use [`set_ebs_block_devices`](Self::set_ebs_block_devices).
///
/// <p>The EBS block devices that are mapped to this instance group.</p>
pub fn ebs_block_devices(mut self, input: crate::model::EbsBlockDevice) -> Self {
let mut v = self.ebs_block_devices.unwrap_or_default();
v.push(input);
self.ebs_block_devices = Some(v);
self
}
/// <p>The EBS block devices that are mapped to this instance group.</p>
pub fn set_ebs_block_devices(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EbsBlockDevice>>,
) -> Self {
self.ebs_block_devices = input;
self
}
/// <p>If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.</p>
pub fn ebs_optimized(mut self, input: bool) -> Self {
self.ebs_optimized = Some(input);
self
}
/// <p>If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.</p>
pub fn set_ebs_optimized(mut self, input: std::option::Option<bool>) -> Self {
self.ebs_optimized = input;
self
}
/// <p>Policy for customizing shrink operations.</p>
pub fn shrink_policy(mut self, input: crate::model::ShrinkPolicy) -> Self {
self.shrink_policy = Some(input);
self
}
/// <p>Policy for customizing shrink operations.</p>
pub fn set_shrink_policy(
mut self,
input: std::option::Option<crate::model::ShrinkPolicy>,
) -> Self {
self.shrink_policy = input;
self
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.</p>
pub fn auto_scaling_policy(
mut self,
input: crate::model::AutoScalingPolicyDescription,
) -> Self {
self.auto_scaling_policy = Some(input);
self
}
/// <p>An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.</p>
pub fn set_auto_scaling_policy(
mut self,
input: std::option::Option<crate::model::AutoScalingPolicyDescription>,
) -> Self {
self.auto_scaling_policy = input;
self
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn custom_ami_id(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_ami_id = Some(input.into());
self
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn set_custom_ami_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_ami_id = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroup`](crate::model::InstanceGroup)
pub fn build(self) -> crate::model::InstanceGroup {
crate::model::InstanceGroup {
id: self.id,
name: self.name,
market: self.market,
instance_group_type: self.instance_group_type,
bid_price: self.bid_price,
instance_type: self.instance_type,
requested_instance_count: self.requested_instance_count,
running_instance_count: self.running_instance_count,
status: self.status,
configurations: self.configurations,
configurations_version: self.configurations_version.unwrap_or_default(),
last_successfully_applied_configurations: self
.last_successfully_applied_configurations,
last_successfully_applied_configurations_version: self
.last_successfully_applied_configurations_version
.unwrap_or_default(),
ebs_block_devices: self.ebs_block_devices,
ebs_optimized: self.ebs_optimized,
shrink_policy: self.shrink_policy,
auto_scaling_policy: self.auto_scaling_policy,
custom_ami_id: self.custom_ami_id,
}
}
}
}
impl InstanceGroup {
/// Creates a new builder-style object to manufacture [`InstanceGroup`](crate::model::InstanceGroup)
pub fn builder() -> crate::model::instance_group::Builder {
crate::model::instance_group::Builder::default()
}
}
/// <p>Configuration of requested EBS block device associated with the instance group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EbsBlockDevice {
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub volume_specification: std::option::Option<crate::model::VolumeSpecification>,
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub device: std::option::Option<std::string::String>,
}
impl EbsBlockDevice {
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub fn volume_specification(&self) -> std::option::Option<&crate::model::VolumeSpecification> {
self.volume_specification.as_ref()
}
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub fn device(&self) -> std::option::Option<&str> {
self.device.as_deref()
}
}
impl std::fmt::Debug for EbsBlockDevice {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EbsBlockDevice");
formatter.field("volume_specification", &self.volume_specification);
formatter.field("device", &self.device);
formatter.finish()
}
}
/// See [`EbsBlockDevice`](crate::model::EbsBlockDevice)
pub mod ebs_block_device {
/// A builder for [`EbsBlockDevice`](crate::model::EbsBlockDevice)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) volume_specification: std::option::Option<crate::model::VolumeSpecification>,
pub(crate) device: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub fn volume_specification(mut self, input: crate::model::VolumeSpecification) -> Self {
self.volume_specification = Some(input);
self
}
/// <p>EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.</p>
pub fn set_volume_specification(
mut self,
input: std::option::Option<crate::model::VolumeSpecification>,
) -> Self {
self.volume_specification = input;
self
}
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub fn device(mut self, input: impl Into<std::string::String>) -> Self {
self.device = Some(input.into());
self
}
/// <p>The device name that is exposed to the instance, such as /dev/sdh.</p>
pub fn set_device(mut self, input: std::option::Option<std::string::String>) -> Self {
self.device = input;
self
}
/// Consumes the builder and constructs a [`EbsBlockDevice`](crate::model::EbsBlockDevice)
pub fn build(self) -> crate::model::EbsBlockDevice {
crate::model::EbsBlockDevice {
volume_specification: self.volume_specification,
device: self.device,
}
}
}
}
impl EbsBlockDevice {
/// Creates a new builder-style object to manufacture [`EbsBlockDevice`](crate::model::EbsBlockDevice)
pub fn builder() -> crate::model::ebs_block_device::Builder {
crate::model::ebs_block_device::Builder::default()
}
}
/// <p>The details of the instance group status.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroupStatus {
/// <p>The current state of the instance group.</p>
pub state: std::option::Option<crate::model::InstanceGroupState>,
/// <p>The status change reason details for the instance group.</p>
pub state_change_reason: std::option::Option<crate::model::InstanceGroupStateChangeReason>,
/// <p>The timeline of the instance group status over time.</p>
pub timeline: std::option::Option<crate::model::InstanceGroupTimeline>,
}
impl InstanceGroupStatus {
/// <p>The current state of the instance group.</p>
pub fn state(&self) -> std::option::Option<&crate::model::InstanceGroupState> {
self.state.as_ref()
}
/// <p>The status change reason details for the instance group.</p>
pub fn state_change_reason(
&self,
) -> std::option::Option<&crate::model::InstanceGroupStateChangeReason> {
self.state_change_reason.as_ref()
}
/// <p>The timeline of the instance group status over time.</p>
pub fn timeline(&self) -> std::option::Option<&crate::model::InstanceGroupTimeline> {
self.timeline.as_ref()
}
}
impl std::fmt::Debug for InstanceGroupStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroupStatus");
formatter.field("state", &self.state);
formatter.field("state_change_reason", &self.state_change_reason);
formatter.field("timeline", &self.timeline);
formatter.finish()
}
}
/// See [`InstanceGroupStatus`](crate::model::InstanceGroupStatus)
pub mod instance_group_status {
/// A builder for [`InstanceGroupStatus`](crate::model::InstanceGroupStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::InstanceGroupState>,
pub(crate) state_change_reason:
std::option::Option<crate::model::InstanceGroupStateChangeReason>,
pub(crate) timeline: std::option::Option<crate::model::InstanceGroupTimeline>,
}
impl Builder {
/// <p>The current state of the instance group.</p>
pub fn state(mut self, input: crate::model::InstanceGroupState) -> Self {
self.state = Some(input);
self
}
/// <p>The current state of the instance group.</p>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::InstanceGroupState>,
) -> Self {
self.state = input;
self
}
/// <p>The status change reason details for the instance group.</p>
pub fn state_change_reason(
mut self,
input: crate::model::InstanceGroupStateChangeReason,
) -> Self {
self.state_change_reason = Some(input);
self
}
/// <p>The status change reason details for the instance group.</p>
pub fn set_state_change_reason(
mut self,
input: std::option::Option<crate::model::InstanceGroupStateChangeReason>,
) -> Self {
self.state_change_reason = input;
self
}
/// <p>The timeline of the instance group status over time.</p>
pub fn timeline(mut self, input: crate::model::InstanceGroupTimeline) -> Self {
self.timeline = Some(input);
self
}
/// <p>The timeline of the instance group status over time.</p>
pub fn set_timeline(
mut self,
input: std::option::Option<crate::model::InstanceGroupTimeline>,
) -> Self {
self.timeline = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroupStatus`](crate::model::InstanceGroupStatus)
pub fn build(self) -> crate::model::InstanceGroupStatus {
crate::model::InstanceGroupStatus {
state: self.state,
state_change_reason: self.state_change_reason,
timeline: self.timeline,
}
}
}
}
impl InstanceGroupStatus {
/// Creates a new builder-style object to manufacture [`InstanceGroupStatus`](crate::model::InstanceGroupStatus)
pub fn builder() -> crate::model::instance_group_status::Builder {
crate::model::instance_group_status::Builder::default()
}
}
/// <p>The timeline of the instance group lifecycle.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroupTimeline {
/// <p>The creation date and time of the instance group.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the instance group became ready to perform tasks.</p>
pub ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the instance group terminated.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl InstanceGroupTimeline {
/// <p>The creation date and time of the instance group.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The date and time when the instance group became ready to perform tasks.</p>
pub fn ready_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.ready_date_time.as_ref()
}
/// <p>The date and time when the instance group terminated.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
}
impl std::fmt::Debug for InstanceGroupTimeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroupTimeline");
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("ready_date_time", &self.ready_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.finish()
}
}
/// See [`InstanceGroupTimeline`](crate::model::InstanceGroupTimeline)
pub mod instance_group_timeline {
/// A builder for [`InstanceGroupTimeline`](crate::model::InstanceGroupTimeline)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The creation date and time of the instance group.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The creation date and time of the instance group.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The date and time when the instance group became ready to perform tasks.</p>
pub fn ready_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.ready_date_time = Some(input);
self
}
/// <p>The date and time when the instance group became ready to perform tasks.</p>
pub fn set_ready_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.ready_date_time = input;
self
}
/// <p>The date and time when the instance group terminated.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The date and time when the instance group terminated.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroupTimeline`](crate::model::InstanceGroupTimeline)
pub fn build(self) -> crate::model::InstanceGroupTimeline {
crate::model::InstanceGroupTimeline {
creation_date_time: self.creation_date_time,
ready_date_time: self.ready_date_time,
end_date_time: self.end_date_time,
}
}
}
}
impl InstanceGroupTimeline {
/// Creates a new builder-style object to manufacture [`InstanceGroupTimeline`](crate::model::InstanceGroupTimeline)
pub fn builder() -> crate::model::instance_group_timeline::Builder {
crate::model::instance_group_timeline::Builder::default()
}
}
/// <p>The status change reason details for the instance group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroupStateChangeReason {
/// <p>The programmable code for the state change reason.</p>
pub code: std::option::Option<crate::model::InstanceGroupStateChangeReasonCode>,
/// <p>The status change reason description.</p>
pub message: std::option::Option<std::string::String>,
}
impl InstanceGroupStateChangeReason {
/// <p>The programmable code for the state change reason.</p>
pub fn code(&self) -> std::option::Option<&crate::model::InstanceGroupStateChangeReasonCode> {
self.code.as_ref()
}
/// <p>The status change reason description.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
}
impl std::fmt::Debug for InstanceGroupStateChangeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroupStateChangeReason");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`InstanceGroupStateChangeReason`](crate::model::InstanceGroupStateChangeReason)
pub mod instance_group_state_change_reason {
/// A builder for [`InstanceGroupStateChangeReason`](crate::model::InstanceGroupStateChangeReason)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<crate::model::InstanceGroupStateChangeReasonCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The programmable code for the state change reason.</p>
pub fn code(mut self, input: crate::model::InstanceGroupStateChangeReasonCode) -> Self {
self.code = Some(input);
self
}
/// <p>The programmable code for the state change reason.</p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::InstanceGroupStateChangeReasonCode>,
) -> Self {
self.code = input;
self
}
/// <p>The status change reason description.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>The status change reason description.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroupStateChangeReason`](crate::model::InstanceGroupStateChangeReason)
pub fn build(self) -> crate::model::InstanceGroupStateChangeReason {
crate::model::InstanceGroupStateChangeReason {
code: self.code,
message: self.message,
}
}
}
}
impl InstanceGroupStateChangeReason {
/// Creates a new builder-style object to manufacture [`InstanceGroupStateChangeReason`](crate::model::InstanceGroupStateChangeReason)
pub fn builder() -> crate::model::instance_group_state_change_reason::Builder {
crate::model::instance_group_state_change_reason::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceGroupStateChangeReasonCode {
#[allow(missing_docs)] // documentation missing in model
ClusterTerminated,
#[allow(missing_docs)] // documentation missing in model
InstanceFailure,
#[allow(missing_docs)] // documentation missing in model
InternalError,
#[allow(missing_docs)] // documentation missing in model
ValidationError,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceGroupStateChangeReasonCode {
fn from(s: &str) -> Self {
match s {
"CLUSTER_TERMINATED" => InstanceGroupStateChangeReasonCode::ClusterTerminated,
"INSTANCE_FAILURE" => InstanceGroupStateChangeReasonCode::InstanceFailure,
"INTERNAL_ERROR" => InstanceGroupStateChangeReasonCode::InternalError,
"VALIDATION_ERROR" => InstanceGroupStateChangeReasonCode::ValidationError,
other => InstanceGroupStateChangeReasonCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceGroupStateChangeReasonCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceGroupStateChangeReasonCode::from(s))
}
}
impl InstanceGroupStateChangeReasonCode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceGroupStateChangeReasonCode::ClusterTerminated => "CLUSTER_TERMINATED",
InstanceGroupStateChangeReasonCode::InstanceFailure => "INSTANCE_FAILURE",
InstanceGroupStateChangeReasonCode::InternalError => "INTERNAL_ERROR",
InstanceGroupStateChangeReasonCode::ValidationError => "VALIDATION_ERROR",
InstanceGroupStateChangeReasonCode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CLUSTER_TERMINATED",
"INSTANCE_FAILURE",
"INTERNAL_ERROR",
"VALIDATION_ERROR",
]
}
}
impl AsRef<str> for InstanceGroupStateChangeReasonCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceGroupState {
#[allow(missing_docs)] // documentation missing in model
Arrested,
#[allow(missing_docs)] // documentation missing in model
Bootstrapping,
#[allow(missing_docs)] // documentation missing in model
Ended,
#[allow(missing_docs)] // documentation missing in model
Provisioning,
#[allow(missing_docs)] // documentation missing in model
Reconfiguring,
#[allow(missing_docs)] // documentation missing in model
Resizing,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
ShuttingDown,
#[allow(missing_docs)] // documentation missing in model
Suspended,
#[allow(missing_docs)] // documentation missing in model
Terminated,
#[allow(missing_docs)] // documentation missing in model
Terminating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceGroupState {
fn from(s: &str) -> Self {
match s {
"ARRESTED" => InstanceGroupState::Arrested,
"BOOTSTRAPPING" => InstanceGroupState::Bootstrapping,
"ENDED" => InstanceGroupState::Ended,
"PROVISIONING" => InstanceGroupState::Provisioning,
"RECONFIGURING" => InstanceGroupState::Reconfiguring,
"RESIZING" => InstanceGroupState::Resizing,
"RUNNING" => InstanceGroupState::Running,
"SHUTTING_DOWN" => InstanceGroupState::ShuttingDown,
"SUSPENDED" => InstanceGroupState::Suspended,
"TERMINATED" => InstanceGroupState::Terminated,
"TERMINATING" => InstanceGroupState::Terminating,
other => InstanceGroupState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceGroupState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceGroupState::from(s))
}
}
impl InstanceGroupState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceGroupState::Arrested => "ARRESTED",
InstanceGroupState::Bootstrapping => "BOOTSTRAPPING",
InstanceGroupState::Ended => "ENDED",
InstanceGroupState::Provisioning => "PROVISIONING",
InstanceGroupState::Reconfiguring => "RECONFIGURING",
InstanceGroupState::Resizing => "RESIZING",
InstanceGroupState::Running => "RUNNING",
InstanceGroupState::ShuttingDown => "SHUTTING_DOWN",
InstanceGroupState::Suspended => "SUSPENDED",
InstanceGroupState::Terminated => "TERMINATED",
InstanceGroupState::Terminating => "TERMINATING",
InstanceGroupState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"ARRESTED",
"BOOTSTRAPPING",
"ENDED",
"PROVISIONING",
"RECONFIGURING",
"RESIZING",
"RUNNING",
"SHUTTING_DOWN",
"SUSPENDED",
"TERMINATED",
"TERMINATING",
]
}
}
impl AsRef<str> for InstanceGroupState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Describes an instance fleet, which is a group of EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot Instances, which are provisioned to meet a defined target capacity. </p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleet {
/// <p>The unique identifier of the instance fleet.</p>
pub id: std::option::Option<std::string::String>,
/// <p>A friendly name for the instance fleet.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The current status of the instance fleet. </p>
pub status: std::option::Option<crate::model::InstanceFleetStatus>,
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, or TASK. </p>
pub instance_fleet_type: std::option::Option<crate::model::InstanceFleetType>,
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedOnDemandCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub target_on_demand_capacity: std::option::Option<i32>,
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedSpotCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub target_spot_capacity: std::option::Option<i32>,
/// <p>The number of On-Demand units that have been provisioned for the instance fleet to fulfill <code>TargetOnDemandCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetOnDemandCapacity</code>.</p>
pub provisioned_on_demand_capacity: std::option::Option<i32>,
/// <p>The number of Spot units that have been provisioned for this instance fleet to fulfill <code>TargetSpotCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetSpotCapacity</code>.</p>
pub provisioned_spot_capacity: std::option::Option<i32>,
/// <p>An array of specifications for the instance types that comprise an instance fleet.</p>
pub instance_type_specifications:
std::option::Option<std::vec::Vec<crate::model::InstanceTypeSpecification>>,
/// <p>Describes the launch specification for an instance fleet. </p>
pub launch_specifications:
std::option::Option<crate::model::InstanceFleetProvisioningSpecifications>,
}
impl InstanceFleet {
/// <p>The unique identifier of the instance fleet.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>A friendly name for the instance fleet.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The current status of the instance fleet. </p>
pub fn status(&self) -> std::option::Option<&crate::model::InstanceFleetStatus> {
self.status.as_ref()
}
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, or TASK. </p>
pub fn instance_fleet_type(&self) -> std::option::Option<&crate::model::InstanceFleetType> {
self.instance_fleet_type.as_ref()
}
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedOnDemandCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_on_demand_capacity(&self) -> std::option::Option<i32> {
self.target_on_demand_capacity
}
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedSpotCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_spot_capacity(&self) -> std::option::Option<i32> {
self.target_spot_capacity
}
/// <p>The number of On-Demand units that have been provisioned for the instance fleet to fulfill <code>TargetOnDemandCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetOnDemandCapacity</code>.</p>
pub fn provisioned_on_demand_capacity(&self) -> std::option::Option<i32> {
self.provisioned_on_demand_capacity
}
/// <p>The number of Spot units that have been provisioned for this instance fleet to fulfill <code>TargetSpotCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetSpotCapacity</code>.</p>
pub fn provisioned_spot_capacity(&self) -> std::option::Option<i32> {
self.provisioned_spot_capacity
}
/// <p>An array of specifications for the instance types that comprise an instance fleet.</p>
pub fn instance_type_specifications(
&self,
) -> std::option::Option<&[crate::model::InstanceTypeSpecification]> {
self.instance_type_specifications.as_deref()
}
/// <p>Describes the launch specification for an instance fleet. </p>
pub fn launch_specifications(
&self,
) -> std::option::Option<&crate::model::InstanceFleetProvisioningSpecifications> {
self.launch_specifications.as_ref()
}
}
impl std::fmt::Debug for InstanceFleet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleet");
formatter.field("id", &self.id);
formatter.field("name", &self.name);
formatter.field("status", &self.status);
formatter.field("instance_fleet_type", &self.instance_fleet_type);
formatter.field("target_on_demand_capacity", &self.target_on_demand_capacity);
formatter.field("target_spot_capacity", &self.target_spot_capacity);
formatter.field(
"provisioned_on_demand_capacity",
&self.provisioned_on_demand_capacity,
);
formatter.field("provisioned_spot_capacity", &self.provisioned_spot_capacity);
formatter.field(
"instance_type_specifications",
&self.instance_type_specifications,
);
formatter.field("launch_specifications", &self.launch_specifications);
formatter.finish()
}
}
/// See [`InstanceFleet`](crate::model::InstanceFleet)
pub mod instance_fleet {
/// A builder for [`InstanceFleet`](crate::model::InstanceFleet)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::InstanceFleetStatus>,
pub(crate) instance_fleet_type: std::option::Option<crate::model::InstanceFleetType>,
pub(crate) target_on_demand_capacity: std::option::Option<i32>,
pub(crate) target_spot_capacity: std::option::Option<i32>,
pub(crate) provisioned_on_demand_capacity: std::option::Option<i32>,
pub(crate) provisioned_spot_capacity: std::option::Option<i32>,
pub(crate) instance_type_specifications:
std::option::Option<std::vec::Vec<crate::model::InstanceTypeSpecification>>,
pub(crate) launch_specifications:
std::option::Option<crate::model::InstanceFleetProvisioningSpecifications>,
}
impl Builder {
/// <p>The unique identifier of the instance fleet.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The unique identifier of the instance fleet.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>A friendly name for the instance fleet.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>A friendly name for the instance fleet.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The current status of the instance fleet. </p>
pub fn status(mut self, input: crate::model::InstanceFleetStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status of the instance fleet. </p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::InstanceFleetStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, or TASK. </p>
pub fn instance_fleet_type(mut self, input: crate::model::InstanceFleetType) -> Self {
self.instance_fleet_type = Some(input);
self
}
/// <p>The node type that the instance fleet hosts. Valid values are MASTER, CORE, or TASK. </p>
pub fn set_instance_fleet_type(
mut self,
input: std::option::Option<crate::model::InstanceFleetType>,
) -> Self {
self.instance_fleet_type = input;
self
}
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedOnDemandCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_on_demand_capacity(mut self, input: i32) -> Self {
self.target_on_demand_capacity = Some(input);
self
}
/// <p>The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When an On-Demand Instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedOnDemandCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using <code>TargetSpotCapacity</code>. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn set_target_on_demand_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.target_on_demand_capacity = input;
self
}
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedSpotCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn target_spot_capacity(mut self, input: i32) -> Self {
self.target_spot_capacity = Some(input);
self
}
/// <p>The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by <code>InstanceTypeConfig</code>. Each instance configuration has a specified <code>WeightedCapacity</code>. When a Spot instance is provisioned, the <code>WeightedCapacity</code> units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a <code>WeightedCapacity</code> of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use <code>InstanceFleet$ProvisionedSpotCapacity</code> to determine the Spot capacity units that have been provisioned for the instance fleet.</p> <note>
/// <p>If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> should be greater than 0. For a master instance fleet, only one of <code>TargetSpotCapacity</code> and <code>TargetOnDemandCapacity</code> can be specified, and its value must be 1.</p>
/// </note>
pub fn set_target_spot_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.target_spot_capacity = input;
self
}
/// <p>The number of On-Demand units that have been provisioned for the instance fleet to fulfill <code>TargetOnDemandCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetOnDemandCapacity</code>.</p>
pub fn provisioned_on_demand_capacity(mut self, input: i32) -> Self {
self.provisioned_on_demand_capacity = Some(input);
self
}
/// <p>The number of On-Demand units that have been provisioned for the instance fleet to fulfill <code>TargetOnDemandCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetOnDemandCapacity</code>.</p>
pub fn set_provisioned_on_demand_capacity(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.provisioned_on_demand_capacity = input;
self
}
/// <p>The number of Spot units that have been provisioned for this instance fleet to fulfill <code>TargetSpotCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetSpotCapacity</code>.</p>
pub fn provisioned_spot_capacity(mut self, input: i32) -> Self {
self.provisioned_spot_capacity = Some(input);
self
}
/// <p>The number of Spot units that have been provisioned for this instance fleet to fulfill <code>TargetSpotCapacity</code>. This provisioned capacity might be less than or greater than <code>TargetSpotCapacity</code>.</p>
pub fn set_provisioned_spot_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.provisioned_spot_capacity = input;
self
}
/// Appends an item to `instance_type_specifications`.
///
/// To override the contents of this collection use [`set_instance_type_specifications`](Self::set_instance_type_specifications).
///
/// <p>An array of specifications for the instance types that comprise an instance fleet.</p>
pub fn instance_type_specifications(
mut self,
input: crate::model::InstanceTypeSpecification,
) -> Self {
let mut v = self.instance_type_specifications.unwrap_or_default();
v.push(input);
self.instance_type_specifications = Some(v);
self
}
/// <p>An array of specifications for the instance types that comprise an instance fleet.</p>
pub fn set_instance_type_specifications(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InstanceTypeSpecification>>,
) -> Self {
self.instance_type_specifications = input;
self
}
/// <p>Describes the launch specification for an instance fleet. </p>
pub fn launch_specifications(
mut self,
input: crate::model::InstanceFleetProvisioningSpecifications,
) -> Self {
self.launch_specifications = Some(input);
self
}
/// <p>Describes the launch specification for an instance fleet. </p>
pub fn set_launch_specifications(
mut self,
input: std::option::Option<crate::model::InstanceFleetProvisioningSpecifications>,
) -> Self {
self.launch_specifications = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleet`](crate::model::InstanceFleet)
pub fn build(self) -> crate::model::InstanceFleet {
crate::model::InstanceFleet {
id: self.id,
name: self.name,
status: self.status,
instance_fleet_type: self.instance_fleet_type,
target_on_demand_capacity: self.target_on_demand_capacity,
target_spot_capacity: self.target_spot_capacity,
provisioned_on_demand_capacity: self.provisioned_on_demand_capacity,
provisioned_spot_capacity: self.provisioned_spot_capacity,
instance_type_specifications: self.instance_type_specifications,
launch_specifications: self.launch_specifications,
}
}
}
}
impl InstanceFleet {
/// Creates a new builder-style object to manufacture [`InstanceFleet`](crate::model::InstanceFleet)
pub fn builder() -> crate::model::instance_fleet::Builder {
crate::model::instance_fleet::Builder::default()
}
}
/// <p>The configuration specification for each instance type in an instance fleet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceTypeSpecification {
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.</p>
pub weighted_capacity: std::option::Option<i32>,
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD.</p>
pub bid_price: std::option::Option<std::string::String>,
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%).</p>
pub bid_price_as_percentage_of_on_demand_price: std::option::Option<f64>,
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>.</p>
pub ebs_block_devices: std::option::Option<std::vec::Vec<crate::model::EbsBlockDevice>>,
/// <p>Evaluates to <code>TRUE</code> when the specified <code>InstanceType</code> is EBS-optimized.</p>
pub ebs_optimized: std::option::Option<bool>,
/// <p>The custom AMI ID to use for the instance type.</p>
pub custom_ami_id: std::option::Option<std::string::String>,
}
impl InstanceTypeSpecification {
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.</p>
pub fn weighted_capacity(&self) -> std::option::Option<i32> {
self.weighted_capacity
}
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD.</p>
pub fn bid_price(&self) -> std::option::Option<&str> {
self.bid_price.as_deref()
}
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%).</p>
pub fn bid_price_as_percentage_of_on_demand_price(&self) -> std::option::Option<f64> {
self.bid_price_as_percentage_of_on_demand_price
}
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>.</p>
pub fn ebs_block_devices(&self) -> std::option::Option<&[crate::model::EbsBlockDevice]> {
self.ebs_block_devices.as_deref()
}
/// <p>Evaluates to <code>TRUE</code> when the specified <code>InstanceType</code> is EBS-optimized.</p>
pub fn ebs_optimized(&self) -> std::option::Option<bool> {
self.ebs_optimized
}
/// <p>The custom AMI ID to use for the instance type.</p>
pub fn custom_ami_id(&self) -> std::option::Option<&str> {
self.custom_ami_id.as_deref()
}
}
impl std::fmt::Debug for InstanceTypeSpecification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceTypeSpecification");
formatter.field("instance_type", &self.instance_type);
formatter.field("weighted_capacity", &self.weighted_capacity);
formatter.field("bid_price", &self.bid_price);
formatter.field(
"bid_price_as_percentage_of_on_demand_price",
&self.bid_price_as_percentage_of_on_demand_price,
);
formatter.field("configurations", &self.configurations);
formatter.field("ebs_block_devices", &self.ebs_block_devices);
formatter.field("ebs_optimized", &self.ebs_optimized);
formatter.field("custom_ami_id", &self.custom_ami_id);
formatter.finish()
}
}
/// See [`InstanceTypeSpecification`](crate::model::InstanceTypeSpecification)
pub mod instance_type_specification {
/// A builder for [`InstanceTypeSpecification`](crate::model::InstanceTypeSpecification)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) weighted_capacity: std::option::Option<i32>,
pub(crate) bid_price: std::option::Option<std::string::String>,
pub(crate) bid_price_as_percentage_of_on_demand_price: std::option::Option<f64>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) ebs_block_devices:
std::option::Option<std::vec::Vec<crate::model::EbsBlockDevice>>,
pub(crate) ebs_optimized: std::option::Option<bool>,
pub(crate) custom_ami_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The EC2 instance type, for example <code>m3.xlarge</code>.</p>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.</p>
pub fn weighted_capacity(mut self, input: i32) -> Self {
self.weighted_capacity = Some(input);
self
}
/// <p>The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in <code>InstanceFleetConfig</code>. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.</p>
pub fn set_weighted_capacity(mut self, input: std::option::Option<i32>) -> Self {
self.weighted_capacity = input;
self
}
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD.</p>
pub fn bid_price(mut self, input: impl Into<std::string::String>) -> Self {
self.bid_price = Some(input.into());
self
}
/// <p>The bid price for each EC2 Spot Instance type as defined by <code>InstanceType</code>. Expressed in USD.</p>
pub fn set_bid_price(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bid_price = input;
self
}
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%).</p>
pub fn bid_price_as_percentage_of_on_demand_price(mut self, input: f64) -> Self {
self.bid_price_as_percentage_of_on_demand_price = Some(input);
self
}
/// <p>The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by <code>InstanceType</code>. Expressed as a number (for example, 20 specifies 20%).</p>
pub fn set_bid_price_as_percentage_of_on_demand_price(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.bid_price_as_percentage_of_on_demand_price = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <p>A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// Appends an item to `ebs_block_devices`.
///
/// To override the contents of this collection use [`set_ebs_block_devices`](Self::set_ebs_block_devices).
///
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>.</p>
pub fn ebs_block_devices(mut self, input: crate::model::EbsBlockDevice) -> Self {
let mut v = self.ebs_block_devices.unwrap_or_default();
v.push(input);
self.ebs_block_devices = Some(v);
self
}
/// <p>The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by <code>InstanceType</code>.</p>
pub fn set_ebs_block_devices(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EbsBlockDevice>>,
) -> Self {
self.ebs_block_devices = input;
self
}
/// <p>Evaluates to <code>TRUE</code> when the specified <code>InstanceType</code> is EBS-optimized.</p>
pub fn ebs_optimized(mut self, input: bool) -> Self {
self.ebs_optimized = Some(input);
self
}
/// <p>Evaluates to <code>TRUE</code> when the specified <code>InstanceType</code> is EBS-optimized.</p>
pub fn set_ebs_optimized(mut self, input: std::option::Option<bool>) -> Self {
self.ebs_optimized = input;
self
}
/// <p>The custom AMI ID to use for the instance type.</p>
pub fn custom_ami_id(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_ami_id = Some(input.into());
self
}
/// <p>The custom AMI ID to use for the instance type.</p>
pub fn set_custom_ami_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_ami_id = input;
self
}
/// Consumes the builder and constructs a [`InstanceTypeSpecification`](crate::model::InstanceTypeSpecification)
pub fn build(self) -> crate::model::InstanceTypeSpecification {
crate::model::InstanceTypeSpecification {
instance_type: self.instance_type,
weighted_capacity: self.weighted_capacity,
bid_price: self.bid_price,
bid_price_as_percentage_of_on_demand_price: self
.bid_price_as_percentage_of_on_demand_price,
configurations: self.configurations,
ebs_block_devices: self.ebs_block_devices,
ebs_optimized: self.ebs_optimized,
custom_ami_id: self.custom_ami_id,
}
}
}
}
impl InstanceTypeSpecification {
/// Creates a new builder-style object to manufacture [`InstanceTypeSpecification`](crate::model::InstanceTypeSpecification)
pub fn builder() -> crate::model::instance_type_specification::Builder {
crate::model::instance_type_specification::Builder::default()
}
}
/// <p>The status of the instance fleet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleetStatus {
/// <p>A code representing the instance fleet status.</p>
/// <ul>
/// <li> <p> <code>PROVISIONING</code>—The instance fleet is provisioning EC2 resources and is not yet ready to run jobs.</p> </li>
/// <li> <p> <code>BOOTSTRAPPING</code>—EC2 instances and other resources have been provisioned and the bootstrap actions specified for the instances are underway.</p> </li>
/// <li> <p> <code>RUNNING</code>—EC2 instances and other resources are running. They are either executing jobs or waiting to execute jobs.</p> </li>
/// <li> <p> <code>RESIZING</code>—A resize operation is underway. EC2 instances are either being added or removed.</p> </li>
/// <li> <p> <code>SUSPENDED</code>—A resize operation could not complete. Existing EC2 instances are running, but instances can't be added or removed.</p> </li>
/// <li> <p> <code>TERMINATING</code>—The instance fleet is terminating EC2 instances.</p> </li>
/// <li> <p> <code>TERMINATED</code>—The instance fleet is no longer active, and all EC2 instances have been terminated.</p> </li>
/// </ul>
pub state: std::option::Option<crate::model::InstanceFleetState>,
/// <p>Provides status change reason details for the instance fleet.</p>
pub state_change_reason: std::option::Option<crate::model::InstanceFleetStateChangeReason>,
/// <p>Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.</p>
pub timeline: std::option::Option<crate::model::InstanceFleetTimeline>,
}
impl InstanceFleetStatus {
/// <p>A code representing the instance fleet status.</p>
/// <ul>
/// <li> <p> <code>PROVISIONING</code>—The instance fleet is provisioning EC2 resources and is not yet ready to run jobs.</p> </li>
/// <li> <p> <code>BOOTSTRAPPING</code>—EC2 instances and other resources have been provisioned and the bootstrap actions specified for the instances are underway.</p> </li>
/// <li> <p> <code>RUNNING</code>—EC2 instances and other resources are running. They are either executing jobs or waiting to execute jobs.</p> </li>
/// <li> <p> <code>RESIZING</code>—A resize operation is underway. EC2 instances are either being added or removed.</p> </li>
/// <li> <p> <code>SUSPENDED</code>—A resize operation could not complete. Existing EC2 instances are running, but instances can't be added or removed.</p> </li>
/// <li> <p> <code>TERMINATING</code>—The instance fleet is terminating EC2 instances.</p> </li>
/// <li> <p> <code>TERMINATED</code>—The instance fleet is no longer active, and all EC2 instances have been terminated.</p> </li>
/// </ul>
pub fn state(&self) -> std::option::Option<&crate::model::InstanceFleetState> {
self.state.as_ref()
}
/// <p>Provides status change reason details for the instance fleet.</p>
pub fn state_change_reason(
&self,
) -> std::option::Option<&crate::model::InstanceFleetStateChangeReason> {
self.state_change_reason.as_ref()
}
/// <p>Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.</p>
pub fn timeline(&self) -> std::option::Option<&crate::model::InstanceFleetTimeline> {
self.timeline.as_ref()
}
}
impl std::fmt::Debug for InstanceFleetStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleetStatus");
formatter.field("state", &self.state);
formatter.field("state_change_reason", &self.state_change_reason);
formatter.field("timeline", &self.timeline);
formatter.finish()
}
}
/// See [`InstanceFleetStatus`](crate::model::InstanceFleetStatus)
pub mod instance_fleet_status {
/// A builder for [`InstanceFleetStatus`](crate::model::InstanceFleetStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::InstanceFleetState>,
pub(crate) state_change_reason:
std::option::Option<crate::model::InstanceFleetStateChangeReason>,
pub(crate) timeline: std::option::Option<crate::model::InstanceFleetTimeline>,
}
impl Builder {
/// <p>A code representing the instance fleet status.</p>
/// <ul>
/// <li> <p> <code>PROVISIONING</code>—The instance fleet is provisioning EC2 resources and is not yet ready to run jobs.</p> </li>
/// <li> <p> <code>BOOTSTRAPPING</code>—EC2 instances and other resources have been provisioned and the bootstrap actions specified for the instances are underway.</p> </li>
/// <li> <p> <code>RUNNING</code>—EC2 instances and other resources are running. They are either executing jobs or waiting to execute jobs.</p> </li>
/// <li> <p> <code>RESIZING</code>—A resize operation is underway. EC2 instances are either being added or removed.</p> </li>
/// <li> <p> <code>SUSPENDED</code>—A resize operation could not complete. Existing EC2 instances are running, but instances can't be added or removed.</p> </li>
/// <li> <p> <code>TERMINATING</code>—The instance fleet is terminating EC2 instances.</p> </li>
/// <li> <p> <code>TERMINATED</code>—The instance fleet is no longer active, and all EC2 instances have been terminated.</p> </li>
/// </ul>
pub fn state(mut self, input: crate::model::InstanceFleetState) -> Self {
self.state = Some(input);
self
}
/// <p>A code representing the instance fleet status.</p>
/// <ul>
/// <li> <p> <code>PROVISIONING</code>—The instance fleet is provisioning EC2 resources and is not yet ready to run jobs.</p> </li>
/// <li> <p> <code>BOOTSTRAPPING</code>—EC2 instances and other resources have been provisioned and the bootstrap actions specified for the instances are underway.</p> </li>
/// <li> <p> <code>RUNNING</code>—EC2 instances and other resources are running. They are either executing jobs or waiting to execute jobs.</p> </li>
/// <li> <p> <code>RESIZING</code>—A resize operation is underway. EC2 instances are either being added or removed.</p> </li>
/// <li> <p> <code>SUSPENDED</code>—A resize operation could not complete. Existing EC2 instances are running, but instances can't be added or removed.</p> </li>
/// <li> <p> <code>TERMINATING</code>—The instance fleet is terminating EC2 instances.</p> </li>
/// <li> <p> <code>TERMINATED</code>—The instance fleet is no longer active, and all EC2 instances have been terminated.</p> </li>
/// </ul>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::InstanceFleetState>,
) -> Self {
self.state = input;
self
}
/// <p>Provides status change reason details for the instance fleet.</p>
pub fn state_change_reason(
mut self,
input: crate::model::InstanceFleetStateChangeReason,
) -> Self {
self.state_change_reason = Some(input);
self
}
/// <p>Provides status change reason details for the instance fleet.</p>
pub fn set_state_change_reason(
mut self,
input: std::option::Option<crate::model::InstanceFleetStateChangeReason>,
) -> Self {
self.state_change_reason = input;
self
}
/// <p>Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.</p>
pub fn timeline(mut self, input: crate::model::InstanceFleetTimeline) -> Self {
self.timeline = Some(input);
self
}
/// <p>Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.</p>
pub fn set_timeline(
mut self,
input: std::option::Option<crate::model::InstanceFleetTimeline>,
) -> Self {
self.timeline = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleetStatus`](crate::model::InstanceFleetStatus)
pub fn build(self) -> crate::model::InstanceFleetStatus {
crate::model::InstanceFleetStatus {
state: self.state,
state_change_reason: self.state_change_reason,
timeline: self.timeline,
}
}
}
}
impl InstanceFleetStatus {
/// Creates a new builder-style object to manufacture [`InstanceFleetStatus`](crate::model::InstanceFleetStatus)
pub fn builder() -> crate::model::instance_fleet_status::Builder {
crate::model::instance_fleet_status::Builder::default()
}
}
/// <p>Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleetTimeline {
/// <p>The time and date the instance fleet was created.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The time and date the instance fleet was ready to run jobs.</p>
pub ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The time and date the instance fleet terminated.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl InstanceFleetTimeline {
/// <p>The time and date the instance fleet was created.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The time and date the instance fleet was ready to run jobs.</p>
pub fn ready_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.ready_date_time.as_ref()
}
/// <p>The time and date the instance fleet terminated.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
}
impl std::fmt::Debug for InstanceFleetTimeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleetTimeline");
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("ready_date_time", &self.ready_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.finish()
}
}
/// See [`InstanceFleetTimeline`](crate::model::InstanceFleetTimeline)
pub mod instance_fleet_timeline {
/// A builder for [`InstanceFleetTimeline`](crate::model::InstanceFleetTimeline)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The time and date the instance fleet was created.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The time and date the instance fleet was created.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The time and date the instance fleet was ready to run jobs.</p>
pub fn ready_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.ready_date_time = Some(input);
self
}
/// <p>The time and date the instance fleet was ready to run jobs.</p>
pub fn set_ready_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.ready_date_time = input;
self
}
/// <p>The time and date the instance fleet terminated.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The time and date the instance fleet terminated.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleetTimeline`](crate::model::InstanceFleetTimeline)
pub fn build(self) -> crate::model::InstanceFleetTimeline {
crate::model::InstanceFleetTimeline {
creation_date_time: self.creation_date_time,
ready_date_time: self.ready_date_time,
end_date_time: self.end_date_time,
}
}
}
}
impl InstanceFleetTimeline {
/// Creates a new builder-style object to manufacture [`InstanceFleetTimeline`](crate::model::InstanceFleetTimeline)
pub fn builder() -> crate::model::instance_fleet_timeline::Builder {
crate::model::instance_fleet_timeline::Builder::default()
}
}
/// <p>Provides status change reason details for the instance fleet.</p> <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceFleetStateChangeReason {
/// <p>A code corresponding to the reason the state change occurred.</p>
pub code: std::option::Option<crate::model::InstanceFleetStateChangeReasonCode>,
/// <p>An explanatory message.</p>
pub message: std::option::Option<std::string::String>,
}
impl InstanceFleetStateChangeReason {
/// <p>A code corresponding to the reason the state change occurred.</p>
pub fn code(&self) -> std::option::Option<&crate::model::InstanceFleetStateChangeReasonCode> {
self.code.as_ref()
}
/// <p>An explanatory message.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
}
impl std::fmt::Debug for InstanceFleetStateChangeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceFleetStateChangeReason");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`InstanceFleetStateChangeReason`](crate::model::InstanceFleetStateChangeReason)
pub mod instance_fleet_state_change_reason {
/// A builder for [`InstanceFleetStateChangeReason`](crate::model::InstanceFleetStateChangeReason)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<crate::model::InstanceFleetStateChangeReasonCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A code corresponding to the reason the state change occurred.</p>
pub fn code(mut self, input: crate::model::InstanceFleetStateChangeReasonCode) -> Self {
self.code = Some(input);
self
}
/// <p>A code corresponding to the reason the state change occurred.</p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::InstanceFleetStateChangeReasonCode>,
) -> Self {
self.code = input;
self
}
/// <p>An explanatory message.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>An explanatory message.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`InstanceFleetStateChangeReason`](crate::model::InstanceFleetStateChangeReason)
pub fn build(self) -> crate::model::InstanceFleetStateChangeReason {
crate::model::InstanceFleetStateChangeReason {
code: self.code,
message: self.message,
}
}
}
}
impl InstanceFleetStateChangeReason {
/// Creates a new builder-style object to manufacture [`InstanceFleetStateChangeReason`](crate::model::InstanceFleetStateChangeReason)
pub fn builder() -> crate::model::instance_fleet_state_change_reason::Builder {
crate::model::instance_fleet_state_change_reason::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceFleetStateChangeReasonCode {
#[allow(missing_docs)] // documentation missing in model
ClusterTerminated,
#[allow(missing_docs)] // documentation missing in model
InstanceFailure,
#[allow(missing_docs)] // documentation missing in model
InternalError,
#[allow(missing_docs)] // documentation missing in model
ValidationError,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceFleetStateChangeReasonCode {
fn from(s: &str) -> Self {
match s {
"CLUSTER_TERMINATED" => InstanceFleetStateChangeReasonCode::ClusterTerminated,
"INSTANCE_FAILURE" => InstanceFleetStateChangeReasonCode::InstanceFailure,
"INTERNAL_ERROR" => InstanceFleetStateChangeReasonCode::InternalError,
"VALIDATION_ERROR" => InstanceFleetStateChangeReasonCode::ValidationError,
other => InstanceFleetStateChangeReasonCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceFleetStateChangeReasonCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceFleetStateChangeReasonCode::from(s))
}
}
impl InstanceFleetStateChangeReasonCode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceFleetStateChangeReasonCode::ClusterTerminated => "CLUSTER_TERMINATED",
InstanceFleetStateChangeReasonCode::InstanceFailure => "INSTANCE_FAILURE",
InstanceFleetStateChangeReasonCode::InternalError => "INTERNAL_ERROR",
InstanceFleetStateChangeReasonCode::ValidationError => "VALIDATION_ERROR",
InstanceFleetStateChangeReasonCode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CLUSTER_TERMINATED",
"INSTANCE_FAILURE",
"INTERNAL_ERROR",
"VALIDATION_ERROR",
]
}
}
impl AsRef<str> for InstanceFleetStateChangeReasonCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceFleetState {
#[allow(missing_docs)] // documentation missing in model
Bootstrapping,
#[allow(missing_docs)] // documentation missing in model
Provisioning,
#[allow(missing_docs)] // documentation missing in model
Resizing,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
Suspended,
#[allow(missing_docs)] // documentation missing in model
Terminated,
#[allow(missing_docs)] // documentation missing in model
Terminating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceFleetState {
fn from(s: &str) -> Self {
match s {
"BOOTSTRAPPING" => InstanceFleetState::Bootstrapping,
"PROVISIONING" => InstanceFleetState::Provisioning,
"RESIZING" => InstanceFleetState::Resizing,
"RUNNING" => InstanceFleetState::Running,
"SUSPENDED" => InstanceFleetState::Suspended,
"TERMINATED" => InstanceFleetState::Terminated,
"TERMINATING" => InstanceFleetState::Terminating,
other => InstanceFleetState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceFleetState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceFleetState::from(s))
}
}
impl InstanceFleetState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceFleetState::Bootstrapping => "BOOTSTRAPPING",
InstanceFleetState::Provisioning => "PROVISIONING",
InstanceFleetState::Resizing => "RESIZING",
InstanceFleetState::Running => "RUNNING",
InstanceFleetState::Suspended => "SUSPENDED",
InstanceFleetState::Terminated => "TERMINATED",
InstanceFleetState::Terminating => "TERMINATING",
InstanceFleetState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"BOOTSTRAPPING",
"PROVISIONING",
"RESIZING",
"RUNNING",
"SUSPENDED",
"TERMINATED",
"TERMINATING",
]
}
}
impl AsRef<str> for InstanceFleetState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The summary description of the cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ClusterSummary {
/// <p>The unique identifier for the cluster.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The name of the cluster.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The details about the current status of the cluster.</p>
pub status: std::option::Option<crate::model::ClusterStatus>,
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub normalized_instance_hours: std::option::Option<i32>,
/// <p>The Amazon Resource Name of the cluster.</p>
pub cluster_arn: std::option::Option<std::string::String>,
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub outpost_arn: std::option::Option<std::string::String>,
}
impl ClusterSummary {
/// <p>The unique identifier for the cluster.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The name of the cluster.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The details about the current status of the cluster.</p>
pub fn status(&self) -> std::option::Option<&crate::model::ClusterStatus> {
self.status.as_ref()
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn normalized_instance_hours(&self) -> std::option::Option<i32> {
self.normalized_instance_hours
}
/// <p>The Amazon Resource Name of the cluster.</p>
pub fn cluster_arn(&self) -> std::option::Option<&str> {
self.cluster_arn.as_deref()
}
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub fn outpost_arn(&self) -> std::option::Option<&str> {
self.outpost_arn.as_deref()
}
}
impl std::fmt::Debug for ClusterSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ClusterSummary");
formatter.field("id", &self.id);
formatter.field("name", &self.name);
formatter.field("status", &self.status);
formatter.field("normalized_instance_hours", &self.normalized_instance_hours);
formatter.field("cluster_arn", &self.cluster_arn);
formatter.field("outpost_arn", &self.outpost_arn);
formatter.finish()
}
}
/// See [`ClusterSummary`](crate::model::ClusterSummary)
pub mod cluster_summary {
/// A builder for [`ClusterSummary`](crate::model::ClusterSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::ClusterStatus>,
pub(crate) normalized_instance_hours: std::option::Option<i32>,
pub(crate) cluster_arn: std::option::Option<std::string::String>,
pub(crate) outpost_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The unique identifier for the cluster.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The unique identifier for the cluster.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The name of the cluster.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the cluster.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The details about the current status of the cluster.</p>
pub fn status(mut self, input: crate::model::ClusterStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The details about the current status of the cluster.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::ClusterStatus>,
) -> Self {
self.status = input;
self
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn normalized_instance_hours(mut self, input: i32) -> Self {
self.normalized_instance_hours = Some(input);
self
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn set_normalized_instance_hours(mut self, input: std::option::Option<i32>) -> Self {
self.normalized_instance_hours = input;
self
}
/// <p>The Amazon Resource Name of the cluster.</p>
pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name of the cluster.</p>
pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.cluster_arn = input;
self
}
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub fn outpost_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.outpost_arn = Some(input.into());
self
}
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub fn set_outpost_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.outpost_arn = input;
self
}
/// Consumes the builder and constructs a [`ClusterSummary`](crate::model::ClusterSummary)
pub fn build(self) -> crate::model::ClusterSummary {
crate::model::ClusterSummary {
id: self.id,
name: self.name,
status: self.status,
normalized_instance_hours: self.normalized_instance_hours,
cluster_arn: self.cluster_arn,
outpost_arn: self.outpost_arn,
}
}
}
}
impl ClusterSummary {
/// Creates a new builder-style object to manufacture [`ClusterSummary`](crate::model::ClusterSummary)
pub fn builder() -> crate::model::cluster_summary::Builder {
crate::model::cluster_summary::Builder::default()
}
}
/// <p>The detailed status of the cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ClusterStatus {
/// <p>The current state of the cluster.</p>
pub state: std::option::Option<crate::model::ClusterState>,
/// <p>The reason for the cluster status change.</p>
pub state_change_reason: std::option::Option<crate::model::ClusterStateChangeReason>,
/// <p>A timeline that represents the status of a cluster over the lifetime of the cluster.</p>
pub timeline: std::option::Option<crate::model::ClusterTimeline>,
}
impl ClusterStatus {
/// <p>The current state of the cluster.</p>
pub fn state(&self) -> std::option::Option<&crate::model::ClusterState> {
self.state.as_ref()
}
/// <p>The reason for the cluster status change.</p>
pub fn state_change_reason(
&self,
) -> std::option::Option<&crate::model::ClusterStateChangeReason> {
self.state_change_reason.as_ref()
}
/// <p>A timeline that represents the status of a cluster over the lifetime of the cluster.</p>
pub fn timeline(&self) -> std::option::Option<&crate::model::ClusterTimeline> {
self.timeline.as_ref()
}
}
impl std::fmt::Debug for ClusterStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ClusterStatus");
formatter.field("state", &self.state);
formatter.field("state_change_reason", &self.state_change_reason);
formatter.field("timeline", &self.timeline);
formatter.finish()
}
}
/// See [`ClusterStatus`](crate::model::ClusterStatus)
pub mod cluster_status {
/// A builder for [`ClusterStatus`](crate::model::ClusterStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::ClusterState>,
pub(crate) state_change_reason: std::option::Option<crate::model::ClusterStateChangeReason>,
pub(crate) timeline: std::option::Option<crate::model::ClusterTimeline>,
}
impl Builder {
/// <p>The current state of the cluster.</p>
pub fn state(mut self, input: crate::model::ClusterState) -> Self {
self.state = Some(input);
self
}
/// <p>The current state of the cluster.</p>
pub fn set_state(mut self, input: std::option::Option<crate::model::ClusterState>) -> Self {
self.state = input;
self
}
/// <p>The reason for the cluster status change.</p>
pub fn state_change_reason(
mut self,
input: crate::model::ClusterStateChangeReason,
) -> Self {
self.state_change_reason = Some(input);
self
}
/// <p>The reason for the cluster status change.</p>
pub fn set_state_change_reason(
mut self,
input: std::option::Option<crate::model::ClusterStateChangeReason>,
) -> Self {
self.state_change_reason = input;
self
}
/// <p>A timeline that represents the status of a cluster over the lifetime of the cluster.</p>
pub fn timeline(mut self, input: crate::model::ClusterTimeline) -> Self {
self.timeline = Some(input);
self
}
/// <p>A timeline that represents the status of a cluster over the lifetime of the cluster.</p>
pub fn set_timeline(
mut self,
input: std::option::Option<crate::model::ClusterTimeline>,
) -> Self {
self.timeline = input;
self
}
/// Consumes the builder and constructs a [`ClusterStatus`](crate::model::ClusterStatus)
pub fn build(self) -> crate::model::ClusterStatus {
crate::model::ClusterStatus {
state: self.state,
state_change_reason: self.state_change_reason,
timeline: self.timeline,
}
}
}
}
impl ClusterStatus {
/// Creates a new builder-style object to manufacture [`ClusterStatus`](crate::model::ClusterStatus)
pub fn builder() -> crate::model::cluster_status::Builder {
crate::model::cluster_status::Builder::default()
}
}
/// <p>Represents the timeline of the cluster's lifecycle.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ClusterTimeline {
/// <p>The creation date and time of the cluster.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the cluster was ready to run steps.</p>
pub ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the cluster was terminated.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl ClusterTimeline {
/// <p>The creation date and time of the cluster.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The date and time when the cluster was ready to run steps.</p>
pub fn ready_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.ready_date_time.as_ref()
}
/// <p>The date and time when the cluster was terminated.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
}
impl std::fmt::Debug for ClusterTimeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ClusterTimeline");
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("ready_date_time", &self.ready_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.finish()
}
}
/// See [`ClusterTimeline`](crate::model::ClusterTimeline)
pub mod cluster_timeline {
/// A builder for [`ClusterTimeline`](crate::model::ClusterTimeline)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The creation date and time of the cluster.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The creation date and time of the cluster.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The date and time when the cluster was ready to run steps.</p>
pub fn ready_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.ready_date_time = Some(input);
self
}
/// <p>The date and time when the cluster was ready to run steps.</p>
pub fn set_ready_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.ready_date_time = input;
self
}
/// <p>The date and time when the cluster was terminated.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The date and time when the cluster was terminated.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// Consumes the builder and constructs a [`ClusterTimeline`](crate::model::ClusterTimeline)
pub fn build(self) -> crate::model::ClusterTimeline {
crate::model::ClusterTimeline {
creation_date_time: self.creation_date_time,
ready_date_time: self.ready_date_time,
end_date_time: self.end_date_time,
}
}
}
}
impl ClusterTimeline {
/// Creates a new builder-style object to manufacture [`ClusterTimeline`](crate::model::ClusterTimeline)
pub fn builder() -> crate::model::cluster_timeline::Builder {
crate::model::cluster_timeline::Builder::default()
}
}
/// <p>The reason that the cluster changed to its current state.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ClusterStateChangeReason {
/// <p>The programmatic code for the state change reason.</p>
pub code: std::option::Option<crate::model::ClusterStateChangeReasonCode>,
/// <p>The descriptive message for the state change reason.</p>
pub message: std::option::Option<std::string::String>,
}
impl ClusterStateChangeReason {
/// <p>The programmatic code for the state change reason.</p>
pub fn code(&self) -> std::option::Option<&crate::model::ClusterStateChangeReasonCode> {
self.code.as_ref()
}
/// <p>The descriptive message for the state change reason.</p>
pub fn message(&self) -> std::option::Option<&str> {
self.message.as_deref()
}
}
impl std::fmt::Debug for ClusterStateChangeReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ClusterStateChangeReason");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`ClusterStateChangeReason`](crate::model::ClusterStateChangeReason)
pub mod cluster_state_change_reason {
/// A builder for [`ClusterStateChangeReason`](crate::model::ClusterStateChangeReason)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<crate::model::ClusterStateChangeReasonCode>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The programmatic code for the state change reason.</p>
pub fn code(mut self, input: crate::model::ClusterStateChangeReasonCode) -> Self {
self.code = Some(input);
self
}
/// <p>The programmatic code for the state change reason.</p>
pub fn set_code(
mut self,
input: std::option::Option<crate::model::ClusterStateChangeReasonCode>,
) -> Self {
self.code = input;
self
}
/// <p>The descriptive message for the state change reason.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
/// <p>The descriptive message for the state change reason.</p>
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`ClusterStateChangeReason`](crate::model::ClusterStateChangeReason)
pub fn build(self) -> crate::model::ClusterStateChangeReason {
crate::model::ClusterStateChangeReason {
code: self.code,
message: self.message,
}
}
}
}
impl ClusterStateChangeReason {
/// Creates a new builder-style object to manufacture [`ClusterStateChangeReason`](crate::model::ClusterStateChangeReason)
pub fn builder() -> crate::model::cluster_state_change_reason::Builder {
crate::model::cluster_state_change_reason::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ClusterStateChangeReasonCode {
#[allow(missing_docs)] // documentation missing in model
AllStepsCompleted,
#[allow(missing_docs)] // documentation missing in model
BootstrapFailure,
#[allow(missing_docs)] // documentation missing in model
InstanceFailure,
#[allow(missing_docs)] // documentation missing in model
InstanceFleetTimeout,
#[allow(missing_docs)] // documentation missing in model
InternalError,
#[allow(missing_docs)] // documentation missing in model
StepFailure,
#[allow(missing_docs)] // documentation missing in model
UserRequest,
#[allow(missing_docs)] // documentation missing in model
ValidationError,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ClusterStateChangeReasonCode {
fn from(s: &str) -> Self {
match s {
"ALL_STEPS_COMPLETED" => ClusterStateChangeReasonCode::AllStepsCompleted,
"BOOTSTRAP_FAILURE" => ClusterStateChangeReasonCode::BootstrapFailure,
"INSTANCE_FAILURE" => ClusterStateChangeReasonCode::InstanceFailure,
"INSTANCE_FLEET_TIMEOUT" => ClusterStateChangeReasonCode::InstanceFleetTimeout,
"INTERNAL_ERROR" => ClusterStateChangeReasonCode::InternalError,
"STEP_FAILURE" => ClusterStateChangeReasonCode::StepFailure,
"USER_REQUEST" => ClusterStateChangeReasonCode::UserRequest,
"VALIDATION_ERROR" => ClusterStateChangeReasonCode::ValidationError,
other => ClusterStateChangeReasonCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ClusterStateChangeReasonCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ClusterStateChangeReasonCode::from(s))
}
}
impl ClusterStateChangeReasonCode {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ClusterStateChangeReasonCode::AllStepsCompleted => "ALL_STEPS_COMPLETED",
ClusterStateChangeReasonCode::BootstrapFailure => "BOOTSTRAP_FAILURE",
ClusterStateChangeReasonCode::InstanceFailure => "INSTANCE_FAILURE",
ClusterStateChangeReasonCode::InstanceFleetTimeout => "INSTANCE_FLEET_TIMEOUT",
ClusterStateChangeReasonCode::InternalError => "INTERNAL_ERROR",
ClusterStateChangeReasonCode::StepFailure => "STEP_FAILURE",
ClusterStateChangeReasonCode::UserRequest => "USER_REQUEST",
ClusterStateChangeReasonCode::ValidationError => "VALIDATION_ERROR",
ClusterStateChangeReasonCode::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"ALL_STEPS_COMPLETED",
"BOOTSTRAP_FAILURE",
"INSTANCE_FAILURE",
"INSTANCE_FLEET_TIMEOUT",
"INTERNAL_ERROR",
"STEP_FAILURE",
"USER_REQUEST",
"VALIDATION_ERROR",
]
}
}
impl AsRef<str> for ClusterStateChangeReasonCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ClusterState {
#[allow(missing_docs)] // documentation missing in model
Bootstrapping,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
Starting,
#[allow(missing_docs)] // documentation missing in model
Terminated,
#[allow(missing_docs)] // documentation missing in model
TerminatedWithErrors,
#[allow(missing_docs)] // documentation missing in model
Terminating,
#[allow(missing_docs)] // documentation missing in model
Waiting,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ClusterState {
fn from(s: &str) -> Self {
match s {
"BOOTSTRAPPING" => ClusterState::Bootstrapping,
"RUNNING" => ClusterState::Running,
"STARTING" => ClusterState::Starting,
"TERMINATED" => ClusterState::Terminated,
"TERMINATED_WITH_ERRORS" => ClusterState::TerminatedWithErrors,
"TERMINATING" => ClusterState::Terminating,
"WAITING" => ClusterState::Waiting,
other => ClusterState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ClusterState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ClusterState::from(s))
}
}
impl ClusterState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ClusterState::Bootstrapping => "BOOTSTRAPPING",
ClusterState::Running => "RUNNING",
ClusterState::Starting => "STARTING",
ClusterState::Terminated => "TERMINATED",
ClusterState::TerminatedWithErrors => "TERMINATED_WITH_ERRORS",
ClusterState::Terminating => "TERMINATING",
ClusterState::Waiting => "WAITING",
ClusterState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"BOOTSTRAPPING",
"RUNNING",
"STARTING",
"TERMINATED",
"TERMINATED_WITH_ERRORS",
"TERMINATING",
"WAITING",
]
}
}
impl AsRef<str> for ClusterState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An entity describing an executable that runs on a cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Command {
/// <p>The name of the command.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The Amazon S3 location of the command script.</p>
pub script_path: std::option::Option<std::string::String>,
/// <p>Arguments for Amazon EMR to pass to the command for execution.</p>
pub args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Command {
/// <p>The name of the command.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The Amazon S3 location of the command script.</p>
pub fn script_path(&self) -> std::option::Option<&str> {
self.script_path.as_deref()
}
/// <p>Arguments for Amazon EMR to pass to the command for execution.</p>
pub fn args(&self) -> std::option::Option<&[std::string::String]> {
self.args.as_deref()
}
}
impl std::fmt::Debug for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Command");
formatter.field("name", &self.name);
formatter.field("script_path", &self.script_path);
formatter.field("args", &self.args);
formatter.finish()
}
}
/// See [`Command`](crate::model::Command)
pub mod command {
/// A builder for [`Command`](crate::model::Command)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) script_path: std::option::Option<std::string::String>,
pub(crate) args: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the command.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the command.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The Amazon S3 location of the command script.</p>
pub fn script_path(mut self, input: impl Into<std::string::String>) -> Self {
self.script_path = Some(input.into());
self
}
/// <p>The Amazon S3 location of the command script.</p>
pub fn set_script_path(mut self, input: std::option::Option<std::string::String>) -> Self {
self.script_path = input;
self
}
/// Appends an item to `args`.
///
/// To override the contents of this collection use [`set_args`](Self::set_args).
///
/// <p>Arguments for Amazon EMR to pass to the command for execution.</p>
pub fn args(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.args.unwrap_or_default();
v.push(input.into());
self.args = Some(v);
self
}
/// <p>Arguments for Amazon EMR to pass to the command for execution.</p>
pub fn set_args(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.args = input;
self
}
/// Consumes the builder and constructs a [`Command`](crate::model::Command)
pub fn build(self) -> crate::model::Command {
crate::model::Command {
name: self.name,
script_path: self.script_path,
args: self.args,
}
}
}
}
impl Command {
/// Creates a new builder-style object to manufacture [`Command`](crate::model::Command)
pub fn builder() -> crate::model::command::Builder {
crate::model::command::Builder::default()
}
}
/// <p>Details for an Amazon EMR Studio session mapping including creation time, user or group ID, Studio ID, and so on.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SessionMappingDetail {
/// <p>The ID of the Amazon EMR Studio.</p>
pub studio_id: std::option::Option<std::string::String>,
/// <p>The globally unique identifier (GUID) of the user or group.</p>
pub identity_id: std::option::Option<std::string::String>,
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub identity_name: std::option::Option<std::string::String>,
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub identity_type: std::option::Option<crate::model::IdentityType>,
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub session_policy_arn: std::option::Option<std::string::String>,
/// <p>The time the session mapping was created.</p>
pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The time the session mapping was last modified.</p>
pub last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl SessionMappingDetail {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(&self) -> std::option::Option<&str> {
self.studio_id.as_deref()
}
/// <p>The globally unique identifier (GUID) of the user or group.</p>
pub fn identity_id(&self) -> std::option::Option<&str> {
self.identity_id.as_deref()
}
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub fn identity_name(&self) -> std::option::Option<&str> {
self.identity_name.as_deref()
}
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub fn identity_type(&self) -> std::option::Option<&crate::model::IdentityType> {
self.identity_type.as_ref()
}
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub fn session_policy_arn(&self) -> std::option::Option<&str> {
self.session_policy_arn.as_deref()
}
/// <p>The time the session mapping was created.</p>
pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_time.as_ref()
}
/// <p>The time the session mapping was last modified.</p>
pub fn last_modified_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_modified_time.as_ref()
}
}
impl std::fmt::Debug for SessionMappingDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SessionMappingDetail");
formatter.field("studio_id", &self.studio_id);
formatter.field("identity_id", &self.identity_id);
formatter.field("identity_name", &self.identity_name);
formatter.field("identity_type", &self.identity_type);
formatter.field("session_policy_arn", &self.session_policy_arn);
formatter.field("creation_time", &self.creation_time);
formatter.field("last_modified_time", &self.last_modified_time);
formatter.finish()
}
}
/// See [`SessionMappingDetail`](crate::model::SessionMappingDetail)
pub mod session_mapping_detail {
/// A builder for [`SessionMappingDetail`](crate::model::SessionMappingDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) studio_id: std::option::Option<std::string::String>,
pub(crate) identity_id: std::option::Option<std::string::String>,
pub(crate) identity_name: std::option::Option<std::string::String>,
pub(crate) identity_type: std::option::Option<crate::model::IdentityType>,
pub(crate) session_policy_arn: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) last_modified_time: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(mut self, input: impl Into<std::string::String>) -> Self {
self.studio_id = Some(input.into());
self
}
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn set_studio_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.studio_id = input;
self
}
/// <p>The globally unique identifier (GUID) of the user or group.</p>
pub fn identity_id(mut self, input: impl Into<std::string::String>) -> Self {
self.identity_id = Some(input.into());
self
}
/// <p>The globally unique identifier (GUID) of the user or group.</p>
pub fn set_identity_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.identity_id = input;
self
}
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub fn identity_name(mut self, input: impl Into<std::string::String>) -> Self {
self.identity_name = Some(input.into());
self
}
/// <p>The name of the user or group. For more information, see <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName">UserName</a> and <a href="https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName">DisplayName</a> in the <i>Amazon Web Services SSO Identity Store API Reference</i>.</p>
pub fn set_identity_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.identity_name = input;
self
}
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub fn identity_type(mut self, input: crate::model::IdentityType) -> Self {
self.identity_type = Some(input);
self
}
/// <p>Specifies whether the identity mapped to the Amazon EMR Studio is a user or a group.</p>
pub fn set_identity_type(
mut self,
input: std::option::Option<crate::model::IdentityType>,
) -> Self {
self.identity_type = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub fn session_policy_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.session_policy_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the session policy associated with the user or group.</p>
pub fn set_session_policy_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.session_policy_arn = input;
self
}
/// <p>The time the session mapping was created.</p>
pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_time = Some(input);
self
}
/// <p>The time the session mapping was created.</p>
pub fn set_creation_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_time = input;
self
}
/// <p>The time the session mapping was last modified.</p>
pub fn last_modified_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_modified_time = Some(input);
self
}
/// <p>The time the session mapping was last modified.</p>
pub fn set_last_modified_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_modified_time = input;
self
}
/// Consumes the builder and constructs a [`SessionMappingDetail`](crate::model::SessionMappingDetail)
pub fn build(self) -> crate::model::SessionMappingDetail {
crate::model::SessionMappingDetail {
studio_id: self.studio_id,
identity_id: self.identity_id,
identity_name: self.identity_name,
identity_type: self.identity_type,
session_policy_arn: self.session_policy_arn,
creation_time: self.creation_time,
last_modified_time: self.last_modified_time,
}
}
}
}
impl SessionMappingDetail {
/// Creates a new builder-style object to manufacture [`SessionMappingDetail`](crate::model::SessionMappingDetail)
pub fn builder() -> crate::model::session_mapping_detail::Builder {
crate::model::session_mapping_detail::Builder::default()
}
}
/// <p>Properties that describe the Amazon Web Services principal that created the <code>BlockPublicAccessConfiguration</code> using the <code>PutBlockPublicAccessConfiguration</code> action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct BlockPublicAccessConfigurationMetadata {
/// <p>The date and time that the configuration was created.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The Amazon Resource Name that created or last modified the configuration.</p>
pub created_by_arn: std::option::Option<std::string::String>,
}
impl BlockPublicAccessConfigurationMetadata {
/// <p>The date and time that the configuration was created.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The Amazon Resource Name that created or last modified the configuration.</p>
pub fn created_by_arn(&self) -> std::option::Option<&str> {
self.created_by_arn.as_deref()
}
}
impl std::fmt::Debug for BlockPublicAccessConfigurationMetadata {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("BlockPublicAccessConfigurationMetadata");
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("created_by_arn", &self.created_by_arn);
formatter.finish()
}
}
/// See [`BlockPublicAccessConfigurationMetadata`](crate::model::BlockPublicAccessConfigurationMetadata)
pub mod block_public_access_configuration_metadata {
/// A builder for [`BlockPublicAccessConfigurationMetadata`](crate::model::BlockPublicAccessConfigurationMetadata)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) created_by_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The date and time that the configuration was created.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The date and time that the configuration was created.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The Amazon Resource Name that created or last modified the configuration.</p>
pub fn created_by_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.created_by_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name that created or last modified the configuration.</p>
pub fn set_created_by_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.created_by_arn = input;
self
}
/// Consumes the builder and constructs a [`BlockPublicAccessConfigurationMetadata`](crate::model::BlockPublicAccessConfigurationMetadata)
pub fn build(self) -> crate::model::BlockPublicAccessConfigurationMetadata {
crate::model::BlockPublicAccessConfigurationMetadata {
creation_date_time: self.creation_date_time,
created_by_arn: self.created_by_arn,
}
}
}
}
impl BlockPublicAccessConfigurationMetadata {
/// Creates a new builder-style object to manufacture [`BlockPublicAccessConfigurationMetadata`](crate::model::BlockPublicAccessConfigurationMetadata)
pub fn builder() -> crate::model::block_public_access_configuration_metadata::Builder {
crate::model::block_public_access_configuration_metadata::Builder::default()
}
}
/// <p>Details for an Amazon EMR Studio including ID, creation time, name, and so on.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Studio {
/// <p>The ID of the Amazon EMR Studio.</p>
pub studio_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the Amazon EMR Studio.</p>
pub studio_arn: std::option::Option<std::string::String>,
/// <p>The name of the Amazon EMR Studio.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub description: std::option::Option<std::string::String>,
/// <p>Specifies whether the Amazon EMR Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub auth_mode: std::option::Option<crate::model::AuthMode>,
/// <p>The ID of the VPC associated with the Amazon EMR Studio.</p>
pub vpc_id: std::option::Option<std::string::String>,
/// <p>The list of IDs of the subnets associated with the Amazon EMR Studio.</p>
pub subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The name of the IAM role assumed by the Amazon EMR Studio.</p>
pub service_role: std::option::Option<std::string::String>,
/// <p>The name of the IAM role assumed by users logged in to the Amazon EMR Studio. A Studio only requires a <code>UserRole</code> when you use IAM authentication.</p>
pub user_role: std::option::Option<std::string::String>,
/// <p>The ID of the Workspace security group associated with the Amazon EMR Studio. The Workspace security group allows outbound network traffic to resources in the Engine security group and to the internet.</p>
pub workspace_security_group_id: std::option::Option<std::string::String>,
/// <p>The ID of the Engine security group associated with the Amazon EMR Studio. The Engine security group allows inbound network traffic from resources in the Workspace security group.</p>
pub engine_security_group_id: std::option::Option<std::string::String>,
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub url: std::option::Option<std::string::String>,
/// <p>The time the Amazon EMR Studio was created.</p>
pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.</p>
pub default_s3_location: std::option::Option<std::string::String>,
/// <p>Your identity provider's authentication endpoint. Amazon EMR Studio redirects federated users to this endpoint for authentication when logging in to a Studio with the Studio URL.</p>
pub idp_auth_url: std::option::Option<std::string::String>,
/// <p>The name of your identity provider's <code>RelayState</code> parameter.</p>
pub idp_relay_state_parameter_name: std::option::Option<std::string::String>,
/// <p>A list of tags associated with the Amazon EMR Studio.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Studio {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(&self) -> std::option::Option<&str> {
self.studio_id.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the Amazon EMR Studio.</p>
pub fn studio_arn(&self) -> std::option::Option<&str> {
self.studio_arn.as_deref()
}
/// <p>The name of the Amazon EMR Studio.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>Specifies whether the Amazon EMR Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub fn auth_mode(&self) -> std::option::Option<&crate::model::AuthMode> {
self.auth_mode.as_ref()
}
/// <p>The ID of the VPC associated with the Amazon EMR Studio.</p>
pub fn vpc_id(&self) -> std::option::Option<&str> {
self.vpc_id.as_deref()
}
/// <p>The list of IDs of the subnets associated with the Amazon EMR Studio.</p>
pub fn subnet_ids(&self) -> std::option::Option<&[std::string::String]> {
self.subnet_ids.as_deref()
}
/// <p>The name of the IAM role assumed by the Amazon EMR Studio.</p>
pub fn service_role(&self) -> std::option::Option<&str> {
self.service_role.as_deref()
}
/// <p>The name of the IAM role assumed by users logged in to the Amazon EMR Studio. A Studio only requires a <code>UserRole</code> when you use IAM authentication.</p>
pub fn user_role(&self) -> std::option::Option<&str> {
self.user_role.as_deref()
}
/// <p>The ID of the Workspace security group associated with the Amazon EMR Studio. The Workspace security group allows outbound network traffic to resources in the Engine security group and to the internet.</p>
pub fn workspace_security_group_id(&self) -> std::option::Option<&str> {
self.workspace_security_group_id.as_deref()
}
/// <p>The ID of the Engine security group associated with the Amazon EMR Studio. The Engine security group allows inbound network traffic from resources in the Workspace security group.</p>
pub fn engine_security_group_id(&self) -> std::option::Option<&str> {
self.engine_security_group_id.as_deref()
}
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub fn url(&self) -> std::option::Option<&str> {
self.url.as_deref()
}
/// <p>The time the Amazon EMR Studio was created.</p>
pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_time.as_ref()
}
/// <p>The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.</p>
pub fn default_s3_location(&self) -> std::option::Option<&str> {
self.default_s3_location.as_deref()
}
/// <p>Your identity provider's authentication endpoint. Amazon EMR Studio redirects federated users to this endpoint for authentication when logging in to a Studio with the Studio URL.</p>
pub fn idp_auth_url(&self) -> std::option::Option<&str> {
self.idp_auth_url.as_deref()
}
/// <p>The name of your identity provider's <code>RelayState</code> parameter.</p>
pub fn idp_relay_state_parameter_name(&self) -> std::option::Option<&str> {
self.idp_relay_state_parameter_name.as_deref()
}
/// <p>A list of tags associated with the Amazon EMR Studio.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
}
impl std::fmt::Debug for Studio {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Studio");
formatter.field("studio_id", &self.studio_id);
formatter.field("studio_arn", &self.studio_arn);
formatter.field("name", &self.name);
formatter.field("description", &self.description);
formatter.field("auth_mode", &self.auth_mode);
formatter.field("vpc_id", &self.vpc_id);
formatter.field("subnet_ids", &self.subnet_ids);
formatter.field("service_role", &self.service_role);
formatter.field("user_role", &self.user_role);
formatter.field(
"workspace_security_group_id",
&self.workspace_security_group_id,
);
formatter.field("engine_security_group_id", &self.engine_security_group_id);
formatter.field("url", &self.url);
formatter.field("creation_time", &self.creation_time);
formatter.field("default_s3_location", &self.default_s3_location);
formatter.field("idp_auth_url", &self.idp_auth_url);
formatter.field(
"idp_relay_state_parameter_name",
&self.idp_relay_state_parameter_name,
);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`Studio`](crate::model::Studio)
pub mod studio {
/// A builder for [`Studio`](crate::model::Studio)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) studio_id: std::option::Option<std::string::String>,
pub(crate) studio_arn: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) auth_mode: std::option::Option<crate::model::AuthMode>,
pub(crate) vpc_id: std::option::Option<std::string::String>,
pub(crate) subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) service_role: std::option::Option<std::string::String>,
pub(crate) user_role: std::option::Option<std::string::String>,
pub(crate) workspace_security_group_id: std::option::Option<std::string::String>,
pub(crate) engine_security_group_id: std::option::Option<std::string::String>,
pub(crate) url: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) default_s3_location: std::option::Option<std::string::String>,
pub(crate) idp_auth_url: std::option::Option<std::string::String>,
pub(crate) idp_relay_state_parameter_name: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn studio_id(mut self, input: impl Into<std::string::String>) -> Self {
self.studio_id = Some(input.into());
self
}
/// <p>The ID of the Amazon EMR Studio.</p>
pub fn set_studio_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.studio_id = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon EMR Studio.</p>
pub fn studio_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.studio_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon EMR Studio.</p>
pub fn set_studio_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.studio_arn = input;
self
}
/// <p>The name of the Amazon EMR Studio.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the Amazon EMR Studio.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>The detailed description of the Amazon EMR Studio.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>Specifies whether the Amazon EMR Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub fn auth_mode(mut self, input: crate::model::AuthMode) -> Self {
self.auth_mode = Some(input);
self
}
/// <p>Specifies whether the Amazon EMR Studio authenticates users using IAM or Amazon Web Services SSO.</p>
pub fn set_auth_mode(mut self, input: std::option::Option<crate::model::AuthMode>) -> Self {
self.auth_mode = input;
self
}
/// <p>The ID of the VPC associated with the Amazon EMR Studio.</p>
pub fn vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
self.vpc_id = Some(input.into());
self
}
/// <p>The ID of the VPC associated with the Amazon EMR Studio.</p>
pub fn set_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.vpc_id = input;
self
}
/// Appends an item to `subnet_ids`.
///
/// To override the contents of this collection use [`set_subnet_ids`](Self::set_subnet_ids).
///
/// <p>The list of IDs of the subnets associated with the Amazon EMR Studio.</p>
pub fn subnet_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.subnet_ids.unwrap_or_default();
v.push(input.into());
self.subnet_ids = Some(v);
self
}
/// <p>The list of IDs of the subnets associated with the Amazon EMR Studio.</p>
pub fn set_subnet_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.subnet_ids = input;
self
}
/// <p>The name of the IAM role assumed by the Amazon EMR Studio.</p>
pub fn service_role(mut self, input: impl Into<std::string::String>) -> Self {
self.service_role = Some(input.into());
self
}
/// <p>The name of the IAM role assumed by the Amazon EMR Studio.</p>
pub fn set_service_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.service_role = input;
self
}
/// <p>The name of the IAM role assumed by users logged in to the Amazon EMR Studio. A Studio only requires a <code>UserRole</code> when you use IAM authentication.</p>
pub fn user_role(mut self, input: impl Into<std::string::String>) -> Self {
self.user_role = Some(input.into());
self
}
/// <p>The name of the IAM role assumed by users logged in to the Amazon EMR Studio. A Studio only requires a <code>UserRole</code> when you use IAM authentication.</p>
pub fn set_user_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.user_role = input;
self
}
/// <p>The ID of the Workspace security group associated with the Amazon EMR Studio. The Workspace security group allows outbound network traffic to resources in the Engine security group and to the internet.</p>
pub fn workspace_security_group_id(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.workspace_security_group_id = Some(input.into());
self
}
/// <p>The ID of the Workspace security group associated with the Amazon EMR Studio. The Workspace security group allows outbound network traffic to resources in the Engine security group and to the internet.</p>
pub fn set_workspace_security_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.workspace_security_group_id = input;
self
}
/// <p>The ID of the Engine security group associated with the Amazon EMR Studio. The Engine security group allows inbound network traffic from resources in the Workspace security group.</p>
pub fn engine_security_group_id(mut self, input: impl Into<std::string::String>) -> Self {
self.engine_security_group_id = Some(input.into());
self
}
/// <p>The ID of the Engine security group associated with the Amazon EMR Studio. The Engine security group allows inbound network traffic from resources in the Workspace security group.</p>
pub fn set_engine_security_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.engine_security_group_id = input;
self
}
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub fn url(mut self, input: impl Into<std::string::String>) -> Self {
self.url = Some(input.into());
self
}
/// <p>The unique access URL of the Amazon EMR Studio.</p>
pub fn set_url(mut self, input: std::option::Option<std::string::String>) -> Self {
self.url = input;
self
}
/// <p>The time the Amazon EMR Studio was created.</p>
pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_time = Some(input);
self
}
/// <p>The time the Amazon EMR Studio was created.</p>
pub fn set_creation_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_time = input;
self
}
/// <p>The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.</p>
pub fn default_s3_location(mut self, input: impl Into<std::string::String>) -> Self {
self.default_s3_location = Some(input.into());
self
}
/// <p>The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.</p>
pub fn set_default_s3_location(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.default_s3_location = input;
self
}
/// <p>Your identity provider's authentication endpoint. Amazon EMR Studio redirects federated users to this endpoint for authentication when logging in to a Studio with the Studio URL.</p>
pub fn idp_auth_url(mut self, input: impl Into<std::string::String>) -> Self {
self.idp_auth_url = Some(input.into());
self
}
/// <p>Your identity provider's authentication endpoint. Amazon EMR Studio redirects federated users to this endpoint for authentication when logging in to a Studio with the Studio URL.</p>
pub fn set_idp_auth_url(mut self, input: std::option::Option<std::string::String>) -> Self {
self.idp_auth_url = input;
self
}
/// <p>The name of your identity provider's <code>RelayState</code> parameter.</p>
pub fn idp_relay_state_parameter_name(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.idp_relay_state_parameter_name = Some(input.into());
self
}
/// <p>The name of your identity provider's <code>RelayState</code> parameter.</p>
pub fn set_idp_relay_state_parameter_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.idp_relay_state_parameter_name = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of tags associated with the Amazon EMR Studio.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input);
self.tags = Some(v);
self
}
/// <p>A list of tags associated with the Amazon EMR Studio.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`Studio`](crate::model::Studio)
pub fn build(self) -> crate::model::Studio {
crate::model::Studio {
studio_id: self.studio_id,
studio_arn: self.studio_arn,
name: self.name,
description: self.description,
auth_mode: self.auth_mode,
vpc_id: self.vpc_id,
subnet_ids: self.subnet_ids,
service_role: self.service_role,
user_role: self.user_role,
workspace_security_group_id: self.workspace_security_group_id,
engine_security_group_id: self.engine_security_group_id,
url: self.url,
creation_time: self.creation_time,
default_s3_location: self.default_s3_location,
idp_auth_url: self.idp_auth_url,
idp_relay_state_parameter_name: self.idp_relay_state_parameter_name,
tags: self.tags,
}
}
}
}
impl Studio {
/// Creates a new builder-style object to manufacture [`Studio`](crate::model::Studio)
pub fn builder() -> crate::model::studio::Builder {
crate::model::studio::Builder::default()
}
}
/// <p>This represents a step in a cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Step {
/// <p>The identifier of the cluster step.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The name of the cluster step.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The Hadoop job configuration of the cluster step.</p>
pub config: std::option::Option<crate::model::HadoopStepConfig>,
/// <p>The action to take when the cluster step fails. Possible values are <code>TERMINATE_CLUSTER</code>, <code>CANCEL_AND_WAIT</code>, and <code>CONTINUE</code>. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub action_on_failure: std::option::Option<crate::model::ActionOnFailure>,
/// <p>The current execution status details of the cluster step.</p>
pub status: std::option::Option<crate::model::StepStatus>,
}
impl Step {
/// <p>The identifier of the cluster step.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The name of the cluster step.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The Hadoop job configuration of the cluster step.</p>
pub fn config(&self) -> std::option::Option<&crate::model::HadoopStepConfig> {
self.config.as_ref()
}
/// <p>The action to take when the cluster step fails. Possible values are <code>TERMINATE_CLUSTER</code>, <code>CANCEL_AND_WAIT</code>, and <code>CONTINUE</code>. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub fn action_on_failure(&self) -> std::option::Option<&crate::model::ActionOnFailure> {
self.action_on_failure.as_ref()
}
/// <p>The current execution status details of the cluster step.</p>
pub fn status(&self) -> std::option::Option<&crate::model::StepStatus> {
self.status.as_ref()
}
}
impl std::fmt::Debug for Step {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Step");
formatter.field("id", &self.id);
formatter.field("name", &self.name);
formatter.field("config", &self.config);
formatter.field("action_on_failure", &self.action_on_failure);
formatter.field("status", &self.status);
formatter.finish()
}
}
/// See [`Step`](crate::model::Step)
pub mod step {
/// A builder for [`Step`](crate::model::Step)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) config: std::option::Option<crate::model::HadoopStepConfig>,
pub(crate) action_on_failure: std::option::Option<crate::model::ActionOnFailure>,
pub(crate) status: std::option::Option<crate::model::StepStatus>,
}
impl Builder {
/// <p>The identifier of the cluster step.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The identifier of the cluster step.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The name of the cluster step.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the cluster step.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The Hadoop job configuration of the cluster step.</p>
pub fn config(mut self, input: crate::model::HadoopStepConfig) -> Self {
self.config = Some(input);
self
}
/// <p>The Hadoop job configuration of the cluster step.</p>
pub fn set_config(
mut self,
input: std::option::Option<crate::model::HadoopStepConfig>,
) -> Self {
self.config = input;
self
}
/// <p>The action to take when the cluster step fails. Possible values are <code>TERMINATE_CLUSTER</code>, <code>CANCEL_AND_WAIT</code>, and <code>CONTINUE</code>. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub fn action_on_failure(mut self, input: crate::model::ActionOnFailure) -> Self {
self.action_on_failure = Some(input);
self
}
/// <p>The action to take when the cluster step fails. Possible values are <code>TERMINATE_CLUSTER</code>, <code>CANCEL_AND_WAIT</code>, and <code>CONTINUE</code>. <code>TERMINATE_JOB_FLOW</code> is provided for backward compatibility. We recommend using <code>TERMINATE_CLUSTER</code> instead.</p>
/// <p>If a cluster's <code>StepConcurrencyLevel</code> is greater than <code>1</code>, do not use <code>AddJobFlowSteps</code> to submit a step with this parameter set to <code>CANCEL_AND_WAIT</code> or <code>TERMINATE_CLUSTER</code>. The step is not submitted and the action fails with a message that the <code>ActionOnFailure</code> setting is not valid.</p>
/// <p>If you change a cluster's <code>StepConcurrencyLevel</code> to be greater than 1 while a step is running, the <code>ActionOnFailure</code> parameter may not behave as you expect. In this case, for a step that fails with this parameter set to <code>CANCEL_AND_WAIT</code>, pending steps and the running step are not canceled; for a step that fails with this parameter set to <code>TERMINATE_CLUSTER</code>, the cluster does not terminate.</p>
pub fn set_action_on_failure(
mut self,
input: std::option::Option<crate::model::ActionOnFailure>,
) -> Self {
self.action_on_failure = input;
self
}
/// <p>The current execution status details of the cluster step.</p>
pub fn status(mut self, input: crate::model::StepStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current execution status details of the cluster step.</p>
pub fn set_status(mut self, input: std::option::Option<crate::model::StepStatus>) -> Self {
self.status = input;
self
}
/// Consumes the builder and constructs a [`Step`](crate::model::Step)
pub fn build(self) -> crate::model::Step {
crate::model::Step {
id: self.id,
name: self.name,
config: self.config,
action_on_failure: self.action_on_failure,
status: self.status,
}
}
}
}
impl Step {
/// Creates a new builder-style object to manufacture [`Step`](crate::model::Step)
pub fn builder() -> crate::model::step::Builder {
crate::model::step::Builder::default()
}
}
/// <p>The Amazon Linux release specified for a cluster in the RunJobFlow request.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct OsRelease {
/// <p>The Amazon Linux release specified for a cluster in the RunJobFlow request. The format is as shown in <a href="https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-20220218.html"> <i>Amazon Linux 2 Release Notes</i> </a>. For example, 2.0.20220218.1.</p>
pub label: std::option::Option<std::string::String>,
}
impl OsRelease {
/// <p>The Amazon Linux release specified for a cluster in the RunJobFlow request. The format is as shown in <a href="https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-20220218.html"> <i>Amazon Linux 2 Release Notes</i> </a>. For example, 2.0.20220218.1.</p>
pub fn label(&self) -> std::option::Option<&str> {
self.label.as_deref()
}
}
impl std::fmt::Debug for OsRelease {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("OsRelease");
formatter.field("label", &self.label);
formatter.finish()
}
}
/// See [`OsRelease`](crate::model::OsRelease)
pub mod os_release {
/// A builder for [`OsRelease`](crate::model::OsRelease)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) label: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The Amazon Linux release specified for a cluster in the RunJobFlow request. The format is as shown in <a href="https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-20220218.html"> <i>Amazon Linux 2 Release Notes</i> </a>. For example, 2.0.20220218.1.</p>
pub fn label(mut self, input: impl Into<std::string::String>) -> Self {
self.label = Some(input.into());
self
}
/// <p>The Amazon Linux release specified for a cluster in the RunJobFlow request. The format is as shown in <a href="https://docs.aws.amazon.com/AL2/latest/relnotes/relnotes-20220218.html"> <i>Amazon Linux 2 Release Notes</i> </a>. For example, 2.0.20220218.1.</p>
pub fn set_label(mut self, input: std::option::Option<std::string::String>) -> Self {
self.label = input;
self
}
/// Consumes the builder and constructs a [`OsRelease`](crate::model::OsRelease)
pub fn build(self) -> crate::model::OsRelease {
crate::model::OsRelease { label: self.label }
}
}
}
impl OsRelease {
/// Creates a new builder-style object to manufacture [`OsRelease`](crate::model::OsRelease)
pub fn builder() -> crate::model::os_release::Builder {
crate::model::os_release::Builder::default()
}
}
/// <p>The returned release label application names or versions.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SimplifiedApplication {
/// <p>The returned release label application name. For example, <code>hadoop</code>.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The returned release label application version. For example, <code>3.2.1</code>.</p>
pub version: std::option::Option<std::string::String>,
}
impl SimplifiedApplication {
/// <p>The returned release label application name. For example, <code>hadoop</code>.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The returned release label application version. For example, <code>3.2.1</code>.</p>
pub fn version(&self) -> std::option::Option<&str> {
self.version.as_deref()
}
}
impl std::fmt::Debug for SimplifiedApplication {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SimplifiedApplication");
formatter.field("name", &self.name);
formatter.field("version", &self.version);
formatter.finish()
}
}
/// See [`SimplifiedApplication`](crate::model::SimplifiedApplication)
pub mod simplified_application {
/// A builder for [`SimplifiedApplication`](crate::model::SimplifiedApplication)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) version: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The returned release label application name. For example, <code>hadoop</code>.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The returned release label application name. For example, <code>hadoop</code>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The returned release label application version. For example, <code>3.2.1</code>.</p>
pub fn version(mut self, input: impl Into<std::string::String>) -> Self {
self.version = Some(input.into());
self
}
/// <p>The returned release label application version. For example, <code>3.2.1</code>.</p>
pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version = input;
self
}
/// Consumes the builder and constructs a [`SimplifiedApplication`](crate::model::SimplifiedApplication)
pub fn build(self) -> crate::model::SimplifiedApplication {
crate::model::SimplifiedApplication {
name: self.name,
version: self.version,
}
}
}
}
impl SimplifiedApplication {
/// Creates a new builder-style object to manufacture [`SimplifiedApplication`](crate::model::SimplifiedApplication)
pub fn builder() -> crate::model::simplified_application::Builder {
crate::model::simplified_application::Builder::default()
}
}
/// <p>A notebook execution. An execution is a specific instance that an EMR Notebook is run using the <code>StartNotebookExecution</code> action.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NotebookExecution {
/// <p>The unique identifier of a notebook execution.</p>
pub notebook_execution_id: std::option::Option<std::string::String>,
/// <p>The unique identifier of the EMR Notebook that is used for the notebook execution.</p>
pub editor_id: std::option::Option<std::string::String>,
/// <p>The execution engine, such as an EMR cluster, used to run the EMR notebook and perform the notebook execution.</p>
pub execution_engine: std::option::Option<crate::model::ExecutionEngineConfig>,
/// <p>A name for the notebook execution.</p>
pub notebook_execution_name: std::option::Option<std::string::String>,
/// <p>Input parameters in JSON format passed to the EMR Notebook at runtime for execution.</p>
pub notebook_params: std::option::Option<std::string::String>,
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub status: std::option::Option<crate::model::NotebookExecutionStatus>,
/// <p>The timestamp when notebook execution started.</p>
pub start_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The timestamp when notebook execution ended.</p>
pub end_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The Amazon Resource Name (ARN) of the notebook execution.</p>
pub arn: std::option::Option<std::string::String>,
/// <p>The location of the notebook execution's output file in Amazon S3.</p>
pub output_notebook_uri: std::option::Option<std::string::String>,
/// <p>The reason for the latest status change of the notebook execution.</p>
pub last_state_change_reason: std::option::Option<std::string::String>,
/// <p>The unique identifier of the EC2 security group associated with the EMR Notebook instance. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub notebook_instance_security_group_id: std::option::Option<std::string::String>,
/// <p>A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl NotebookExecution {
/// <p>The unique identifier of a notebook execution.</p>
pub fn notebook_execution_id(&self) -> std::option::Option<&str> {
self.notebook_execution_id.as_deref()
}
/// <p>The unique identifier of the EMR Notebook that is used for the notebook execution.</p>
pub fn editor_id(&self) -> std::option::Option<&str> {
self.editor_id.as_deref()
}
/// <p>The execution engine, such as an EMR cluster, used to run the EMR notebook and perform the notebook execution.</p>
pub fn execution_engine(&self) -> std::option::Option<&crate::model::ExecutionEngineConfig> {
self.execution_engine.as_ref()
}
/// <p>A name for the notebook execution.</p>
pub fn notebook_execution_name(&self) -> std::option::Option<&str> {
self.notebook_execution_name.as_deref()
}
/// <p>Input parameters in JSON format passed to the EMR Notebook at runtime for execution.</p>
pub fn notebook_params(&self) -> std::option::Option<&str> {
self.notebook_params.as_deref()
}
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub fn status(&self) -> std::option::Option<&crate::model::NotebookExecutionStatus> {
self.status.as_ref()
}
/// <p>The timestamp when notebook execution started.</p>
pub fn start_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_time.as_ref()
}
/// <p>The timestamp when notebook execution ended.</p>
pub fn end_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_time.as_ref()
}
/// <p>The Amazon Resource Name (ARN) of the notebook execution.</p>
pub fn arn(&self) -> std::option::Option<&str> {
self.arn.as_deref()
}
/// <p>The location of the notebook execution's output file in Amazon S3.</p>
pub fn output_notebook_uri(&self) -> std::option::Option<&str> {
self.output_notebook_uri.as_deref()
}
/// <p>The reason for the latest status change of the notebook execution.</p>
pub fn last_state_change_reason(&self) -> std::option::Option<&str> {
self.last_state_change_reason.as_deref()
}
/// <p>The unique identifier of the EC2 security group associated with the EMR Notebook instance. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub fn notebook_instance_security_group_id(&self) -> std::option::Option<&str> {
self.notebook_instance_security_group_id.as_deref()
}
/// <p>A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
}
impl std::fmt::Debug for NotebookExecution {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NotebookExecution");
formatter.field("notebook_execution_id", &self.notebook_execution_id);
formatter.field("editor_id", &self.editor_id);
formatter.field("execution_engine", &self.execution_engine);
formatter.field("notebook_execution_name", &self.notebook_execution_name);
formatter.field("notebook_params", &self.notebook_params);
formatter.field("status", &self.status);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field("arn", &self.arn);
formatter.field("output_notebook_uri", &self.output_notebook_uri);
formatter.field("last_state_change_reason", &self.last_state_change_reason);
formatter.field(
"notebook_instance_security_group_id",
&self.notebook_instance_security_group_id,
);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`NotebookExecution`](crate::model::NotebookExecution)
pub mod notebook_execution {
/// A builder for [`NotebookExecution`](crate::model::NotebookExecution)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) notebook_execution_id: std::option::Option<std::string::String>,
pub(crate) editor_id: std::option::Option<std::string::String>,
pub(crate) execution_engine: std::option::Option<crate::model::ExecutionEngineConfig>,
pub(crate) notebook_execution_name: std::option::Option<std::string::String>,
pub(crate) notebook_params: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::NotebookExecutionStatus>,
pub(crate) start_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) arn: std::option::Option<std::string::String>,
pub(crate) output_notebook_uri: std::option::Option<std::string::String>,
pub(crate) last_state_change_reason: std::option::Option<std::string::String>,
pub(crate) notebook_instance_security_group_id: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>The unique identifier of a notebook execution.</p>
pub fn notebook_execution_id(mut self, input: impl Into<std::string::String>) -> Self {
self.notebook_execution_id = Some(input.into());
self
}
/// <p>The unique identifier of a notebook execution.</p>
pub fn set_notebook_execution_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notebook_execution_id = input;
self
}
/// <p>The unique identifier of the EMR Notebook that is used for the notebook execution.</p>
pub fn editor_id(mut self, input: impl Into<std::string::String>) -> Self {
self.editor_id = Some(input.into());
self
}
/// <p>The unique identifier of the EMR Notebook that is used for the notebook execution.</p>
pub fn set_editor_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.editor_id = input;
self
}
/// <p>The execution engine, such as an EMR cluster, used to run the EMR notebook and perform the notebook execution.</p>
pub fn execution_engine(mut self, input: crate::model::ExecutionEngineConfig) -> Self {
self.execution_engine = Some(input);
self
}
/// <p>The execution engine, such as an EMR cluster, used to run the EMR notebook and perform the notebook execution.</p>
pub fn set_execution_engine(
mut self,
input: std::option::Option<crate::model::ExecutionEngineConfig>,
) -> Self {
self.execution_engine = input;
self
}
/// <p>A name for the notebook execution.</p>
pub fn notebook_execution_name(mut self, input: impl Into<std::string::String>) -> Self {
self.notebook_execution_name = Some(input.into());
self
}
/// <p>A name for the notebook execution.</p>
pub fn set_notebook_execution_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notebook_execution_name = input;
self
}
/// <p>Input parameters in JSON format passed to the EMR Notebook at runtime for execution.</p>
pub fn notebook_params(mut self, input: impl Into<std::string::String>) -> Self {
self.notebook_params = Some(input.into());
self
}
/// <p>Input parameters in JSON format passed to the EMR Notebook at runtime for execution.</p>
pub fn set_notebook_params(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notebook_params = input;
self
}
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub fn status(mut self, input: crate::model::NotebookExecutionStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of the notebook execution.</p>
/// <ul>
/// <li> <p> <code>START_PENDING</code> indicates that the cluster has received the execution request but execution has not begun.</p> </li>
/// <li> <p> <code>STARTING</code> indicates that the execution is starting on the cluster.</p> </li>
/// <li> <p> <code>RUNNING</code> indicates that the execution is being processed by the cluster.</p> </li>
/// <li> <p> <code>FINISHING</code> indicates that execution processing is in the final stages.</p> </li>
/// <li> <p> <code>FINISHED</code> indicates that the execution has completed without error.</p> </li>
/// <li> <p> <code>FAILING</code> indicates that the execution is failing and will not finish successfully.</p> </li>
/// <li> <p> <code>FAILED</code> indicates that the execution failed.</p> </li>
/// <li> <p> <code>STOP_PENDING</code> indicates that the cluster has received a <code>StopNotebookExecution</code> request and the stop is pending.</p> </li>
/// <li> <p> <code>STOPPING</code> indicates that the cluster is in the process of stopping the execution as a result of a <code>StopNotebookExecution</code> request.</p> </li>
/// <li> <p> <code>STOPPED</code> indicates that the execution stopped because of a <code>StopNotebookExecution</code> request.</p> </li>
/// </ul>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::NotebookExecutionStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The timestamp when notebook execution started.</p>
pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_time = Some(input);
self
}
/// <p>The timestamp when notebook execution started.</p>
pub fn set_start_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_time = input;
self
}
/// <p>The timestamp when notebook execution ended.</p>
pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_time = Some(input);
self
}
/// <p>The timestamp when notebook execution ended.</p>
pub fn set_end_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_time = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the notebook execution.</p>
pub fn arn(mut self, input: impl Into<std::string::String>) -> Self {
self.arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the notebook execution.</p>
pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.arn = input;
self
}
/// <p>The location of the notebook execution's output file in Amazon S3.</p>
pub fn output_notebook_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.output_notebook_uri = Some(input.into());
self
}
/// <p>The location of the notebook execution's output file in Amazon S3.</p>
pub fn set_output_notebook_uri(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.output_notebook_uri = input;
self
}
/// <p>The reason for the latest status change of the notebook execution.</p>
pub fn last_state_change_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.last_state_change_reason = Some(input.into());
self
}
/// <p>The reason for the latest status change of the notebook execution.</p>
pub fn set_last_state_change_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_state_change_reason = input;
self
}
/// <p>The unique identifier of the EC2 security group associated with the EMR Notebook instance. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub fn notebook_instance_security_group_id(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.notebook_instance_security_group_id = Some(input.into());
self
}
/// <p>The unique identifier of the EC2 security group associated with the EMR Notebook instance. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html">Specifying EC2 Security Groups for EMR Notebooks</a> in the <i>EMR Management Guide</i>.</p>
pub fn set_notebook_instance_security_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notebook_instance_security_group_id = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input);
self.tags = Some(v);
self
}
/// <p>A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`NotebookExecution`](crate::model::NotebookExecution)
pub fn build(self) -> crate::model::NotebookExecution {
crate::model::NotebookExecution {
notebook_execution_id: self.notebook_execution_id,
editor_id: self.editor_id,
execution_engine: self.execution_engine,
notebook_execution_name: self.notebook_execution_name,
notebook_params: self.notebook_params,
status: self.status,
start_time: self.start_time,
end_time: self.end_time,
arn: self.arn,
output_notebook_uri: self.output_notebook_uri,
last_state_change_reason: self.last_state_change_reason,
notebook_instance_security_group_id: self.notebook_instance_security_group_id,
tags: self.tags,
}
}
}
}
impl NotebookExecution {
/// Creates a new builder-style object to manufacture [`NotebookExecution`](crate::model::NotebookExecution)
pub fn builder() -> crate::model::notebook_execution::Builder {
crate::model::notebook_execution::Builder::default()
}
}
/// <p>A description of a cluster (job flow).</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobFlowDetail {
/// <p>The job flow identifier.</p>
pub job_flow_id: std::option::Option<std::string::String>,
/// <p>The name of the job flow.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The location in Amazon S3 where log files for the job are stored.</p>
pub log_uri: std::option::Option<std::string::String>,
/// <p>The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.</p>
pub log_encryption_kms_key_id: std::option::Option<std::string::String>,
/// <p>Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, <code>ReleaseLabel</code> is used. To specify a custom AMI, use <code>CustomAmiID</code>.</p>
pub ami_version: std::option::Option<std::string::String>,
/// <p>Describes the execution status of the job flow.</p>
pub execution_status_detail: std::option::Option<crate::model::JobFlowExecutionStatusDetail>,
/// <p>Describes the Amazon EC2 instances of the job flow.</p>
pub instances: std::option::Option<crate::model::JobFlowInstancesDetail>,
/// <p>A list of steps run by the job flow.</p>
pub steps: std::option::Option<std::vec::Vec<crate::model::StepDetail>>,
/// <p>A list of the bootstrap actions run by the job flow.</p>
pub bootstrap_actions: std::option::Option<std::vec::Vec<crate::model::BootstrapActionDetail>>,
/// <p>A list of strings set by third-party software when the job flow is launched. If you are not using third-party software to manage the job flow, this value is empty.</p>
pub supported_products: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub visible_to_all_users: bool,
/// <p>The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.</p>
pub job_flow_role: std::option::Option<std::string::String>,
/// <p>The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.</p>
pub service_role: std::option::Option<std::string::String>,
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.</p>
pub auto_scaling_role: std::option::Option<std::string::String>,
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub scale_down_behavior: std::option::Option<crate::model::ScaleDownBehavior>,
}
impl JobFlowDetail {
/// <p>The job flow identifier.</p>
pub fn job_flow_id(&self) -> std::option::Option<&str> {
self.job_flow_id.as_deref()
}
/// <p>The name of the job flow.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The location in Amazon S3 where log files for the job are stored.</p>
pub fn log_uri(&self) -> std::option::Option<&str> {
self.log_uri.as_deref()
}
/// <p>The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.</p>
pub fn log_encryption_kms_key_id(&self) -> std::option::Option<&str> {
self.log_encryption_kms_key_id.as_deref()
}
/// <p>Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, <code>ReleaseLabel</code> is used. To specify a custom AMI, use <code>CustomAmiID</code>.</p>
pub fn ami_version(&self) -> std::option::Option<&str> {
self.ami_version.as_deref()
}
/// <p>Describes the execution status of the job flow.</p>
pub fn execution_status_detail(
&self,
) -> std::option::Option<&crate::model::JobFlowExecutionStatusDetail> {
self.execution_status_detail.as_ref()
}
/// <p>Describes the Amazon EC2 instances of the job flow.</p>
pub fn instances(&self) -> std::option::Option<&crate::model::JobFlowInstancesDetail> {
self.instances.as_ref()
}
/// <p>A list of steps run by the job flow.</p>
pub fn steps(&self) -> std::option::Option<&[crate::model::StepDetail]> {
self.steps.as_deref()
}
/// <p>A list of the bootstrap actions run by the job flow.</p>
pub fn bootstrap_actions(&self) -> std::option::Option<&[crate::model::BootstrapActionDetail]> {
self.bootstrap_actions.as_deref()
}
/// <p>A list of strings set by third-party software when the job flow is launched. If you are not using third-party software to manage the job flow, this value is empty.</p>
pub fn supported_products(&self) -> std::option::Option<&[std::string::String]> {
self.supported_products.as_deref()
}
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub fn visible_to_all_users(&self) -> bool {
self.visible_to_all_users
}
/// <p>The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.</p>
pub fn job_flow_role(&self) -> std::option::Option<&str> {
self.job_flow_role.as_deref()
}
/// <p>The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.</p>
pub fn service_role(&self) -> std::option::Option<&str> {
self.service_role.as_deref()
}
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.</p>
pub fn auto_scaling_role(&self) -> std::option::Option<&str> {
self.auto_scaling_role.as_deref()
}
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub fn scale_down_behavior(&self) -> std::option::Option<&crate::model::ScaleDownBehavior> {
self.scale_down_behavior.as_ref()
}
}
impl std::fmt::Debug for JobFlowDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobFlowDetail");
formatter.field("job_flow_id", &self.job_flow_id);
formatter.field("name", &self.name);
formatter.field("log_uri", &self.log_uri);
formatter.field("log_encryption_kms_key_id", &self.log_encryption_kms_key_id);
formatter.field("ami_version", &self.ami_version);
formatter.field("execution_status_detail", &self.execution_status_detail);
formatter.field("instances", &self.instances);
formatter.field("steps", &self.steps);
formatter.field("bootstrap_actions", &self.bootstrap_actions);
formatter.field("supported_products", &self.supported_products);
formatter.field("visible_to_all_users", &self.visible_to_all_users);
formatter.field("job_flow_role", &self.job_flow_role);
formatter.field("service_role", &self.service_role);
formatter.field("auto_scaling_role", &self.auto_scaling_role);
formatter.field("scale_down_behavior", &self.scale_down_behavior);
formatter.finish()
}
}
/// See [`JobFlowDetail`](crate::model::JobFlowDetail)
pub mod job_flow_detail {
/// A builder for [`JobFlowDetail`](crate::model::JobFlowDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_flow_id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) log_uri: std::option::Option<std::string::String>,
pub(crate) log_encryption_kms_key_id: std::option::Option<std::string::String>,
pub(crate) ami_version: std::option::Option<std::string::String>,
pub(crate) execution_status_detail:
std::option::Option<crate::model::JobFlowExecutionStatusDetail>,
pub(crate) instances: std::option::Option<crate::model::JobFlowInstancesDetail>,
pub(crate) steps: std::option::Option<std::vec::Vec<crate::model::StepDetail>>,
pub(crate) bootstrap_actions:
std::option::Option<std::vec::Vec<crate::model::BootstrapActionDetail>>,
pub(crate) supported_products: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) visible_to_all_users: std::option::Option<bool>,
pub(crate) job_flow_role: std::option::Option<std::string::String>,
pub(crate) service_role: std::option::Option<std::string::String>,
pub(crate) auto_scaling_role: std::option::Option<std::string::String>,
pub(crate) scale_down_behavior: std::option::Option<crate::model::ScaleDownBehavior>,
}
impl Builder {
/// <p>The job flow identifier.</p>
pub fn job_flow_id(mut self, input: impl Into<std::string::String>) -> Self {
self.job_flow_id = Some(input.into());
self
}
/// <p>The job flow identifier.</p>
pub fn set_job_flow_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_flow_id = input;
self
}
/// <p>The name of the job flow.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the job flow.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The location in Amazon S3 where log files for the job are stored.</p>
pub fn log_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.log_uri = Some(input.into());
self
}
/// <p>The location in Amazon S3 where log files for the job are stored.</p>
pub fn set_log_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
self.log_uri = input;
self
}
/// <p>The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.</p>
pub fn log_encryption_kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
self.log_encryption_kms_key_id = Some(input.into());
self
}
/// <p>The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.</p>
pub fn set_log_encryption_kms_key_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.log_encryption_kms_key_id = input;
self
}
/// <p>Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, <code>ReleaseLabel</code> is used. To specify a custom AMI, use <code>CustomAmiID</code>.</p>
pub fn ami_version(mut self, input: impl Into<std::string::String>) -> Self {
self.ami_version = Some(input.into());
self
}
/// <p>Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, <code>ReleaseLabel</code> is used. To specify a custom AMI, use <code>CustomAmiID</code>.</p>
pub fn set_ami_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ami_version = input;
self
}
/// <p>Describes the execution status of the job flow.</p>
pub fn execution_status_detail(
mut self,
input: crate::model::JobFlowExecutionStatusDetail,
) -> Self {
self.execution_status_detail = Some(input);
self
}
/// <p>Describes the execution status of the job flow.</p>
pub fn set_execution_status_detail(
mut self,
input: std::option::Option<crate::model::JobFlowExecutionStatusDetail>,
) -> Self {
self.execution_status_detail = input;
self
}
/// <p>Describes the Amazon EC2 instances of the job flow.</p>
pub fn instances(mut self, input: crate::model::JobFlowInstancesDetail) -> Self {
self.instances = Some(input);
self
}
/// <p>Describes the Amazon EC2 instances of the job flow.</p>
pub fn set_instances(
mut self,
input: std::option::Option<crate::model::JobFlowInstancesDetail>,
) -> Self {
self.instances = input;
self
}
/// Appends an item to `steps`.
///
/// To override the contents of this collection use [`set_steps`](Self::set_steps).
///
/// <p>A list of steps run by the job flow.</p>
pub fn steps(mut self, input: crate::model::StepDetail) -> Self {
let mut v = self.steps.unwrap_or_default();
v.push(input);
self.steps = Some(v);
self
}
/// <p>A list of steps run by the job flow.</p>
pub fn set_steps(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::StepDetail>>,
) -> Self {
self.steps = input;
self
}
/// Appends an item to `bootstrap_actions`.
///
/// To override the contents of this collection use [`set_bootstrap_actions`](Self::set_bootstrap_actions).
///
/// <p>A list of the bootstrap actions run by the job flow.</p>
pub fn bootstrap_actions(mut self, input: crate::model::BootstrapActionDetail) -> Self {
let mut v = self.bootstrap_actions.unwrap_or_default();
v.push(input);
self.bootstrap_actions = Some(v);
self
}
/// <p>A list of the bootstrap actions run by the job flow.</p>
pub fn set_bootstrap_actions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::BootstrapActionDetail>>,
) -> Self {
self.bootstrap_actions = input;
self
}
/// Appends an item to `supported_products`.
///
/// To override the contents of this collection use [`set_supported_products`](Self::set_supported_products).
///
/// <p>A list of strings set by third-party software when the job flow is launched. If you are not using third-party software to manage the job flow, this value is empty.</p>
pub fn supported_products(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.supported_products.unwrap_or_default();
v.push(input.into());
self.supported_products = Some(v);
self
}
/// <p>A list of strings set by third-party software when the job flow is launched. If you are not using third-party software to manage the job flow, this value is empty.</p>
pub fn set_supported_products(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.supported_products = input;
self
}
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub fn visible_to_all_users(mut self, input: bool) -> Self {
self.visible_to_all_users = Some(input);
self
}
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub fn set_visible_to_all_users(mut self, input: std::option::Option<bool>) -> Self {
self.visible_to_all_users = input;
self
}
/// <p>The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.</p>
pub fn job_flow_role(mut self, input: impl Into<std::string::String>) -> Self {
self.job_flow_role = Some(input.into());
self
}
/// <p>The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.</p>
pub fn set_job_flow_role(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_flow_role = input;
self
}
/// <p>The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.</p>
pub fn service_role(mut self, input: impl Into<std::string::String>) -> Self {
self.service_role = Some(input.into());
self
}
/// <p>The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.</p>
pub fn set_service_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.service_role = input;
self
}
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.</p>
pub fn auto_scaling_role(mut self, input: impl Into<std::string::String>) -> Self {
self.auto_scaling_role = Some(input.into());
self
}
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.</p>
pub fn set_auto_scaling_role(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.auto_scaling_role = input;
self
}
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub fn scale_down_behavior(mut self, input: crate::model::ScaleDownBehavior) -> Self {
self.scale_down_behavior = Some(input);
self
}
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub fn set_scale_down_behavior(
mut self,
input: std::option::Option<crate::model::ScaleDownBehavior>,
) -> Self {
self.scale_down_behavior = input;
self
}
/// Consumes the builder and constructs a [`JobFlowDetail`](crate::model::JobFlowDetail)
pub fn build(self) -> crate::model::JobFlowDetail {
crate::model::JobFlowDetail {
job_flow_id: self.job_flow_id,
name: self.name,
log_uri: self.log_uri,
log_encryption_kms_key_id: self.log_encryption_kms_key_id,
ami_version: self.ami_version,
execution_status_detail: self.execution_status_detail,
instances: self.instances,
steps: self.steps,
bootstrap_actions: self.bootstrap_actions,
supported_products: self.supported_products,
visible_to_all_users: self.visible_to_all_users.unwrap_or_default(),
job_flow_role: self.job_flow_role,
service_role: self.service_role,
auto_scaling_role: self.auto_scaling_role,
scale_down_behavior: self.scale_down_behavior,
}
}
}
}
impl JobFlowDetail {
/// Creates a new builder-style object to manufacture [`JobFlowDetail`](crate::model::JobFlowDetail)
pub fn builder() -> crate::model::job_flow_detail::Builder {
crate::model::job_flow_detail::Builder::default()
}
}
/// <p>Reports the configuration of a bootstrap action in a cluster (job flow).</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct BootstrapActionDetail {
/// <p>A description of the bootstrap action.</p>
pub bootstrap_action_config: std::option::Option<crate::model::BootstrapActionConfig>,
}
impl BootstrapActionDetail {
/// <p>A description of the bootstrap action.</p>
pub fn bootstrap_action_config(
&self,
) -> std::option::Option<&crate::model::BootstrapActionConfig> {
self.bootstrap_action_config.as_ref()
}
}
impl std::fmt::Debug for BootstrapActionDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("BootstrapActionDetail");
formatter.field("bootstrap_action_config", &self.bootstrap_action_config);
formatter.finish()
}
}
/// See [`BootstrapActionDetail`](crate::model::BootstrapActionDetail)
pub mod bootstrap_action_detail {
/// A builder for [`BootstrapActionDetail`](crate::model::BootstrapActionDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) bootstrap_action_config:
std::option::Option<crate::model::BootstrapActionConfig>,
}
impl Builder {
/// <p>A description of the bootstrap action.</p>
pub fn bootstrap_action_config(
mut self,
input: crate::model::BootstrapActionConfig,
) -> Self {
self.bootstrap_action_config = Some(input);
self
}
/// <p>A description of the bootstrap action.</p>
pub fn set_bootstrap_action_config(
mut self,
input: std::option::Option<crate::model::BootstrapActionConfig>,
) -> Self {
self.bootstrap_action_config = input;
self
}
/// Consumes the builder and constructs a [`BootstrapActionDetail`](crate::model::BootstrapActionDetail)
pub fn build(self) -> crate::model::BootstrapActionDetail {
crate::model::BootstrapActionDetail {
bootstrap_action_config: self.bootstrap_action_config,
}
}
}
}
impl BootstrapActionDetail {
/// Creates a new builder-style object to manufacture [`BootstrapActionDetail`](crate::model::BootstrapActionDetail)
pub fn builder() -> crate::model::bootstrap_action_detail::Builder {
crate::model::bootstrap_action_detail::Builder::default()
}
}
/// <p>Combines the execution state and configuration of a step.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepDetail {
/// <p>The step configuration.</p>
pub step_config: std::option::Option<crate::model::StepConfig>,
/// <p>The description of the step status.</p>
pub execution_status_detail: std::option::Option<crate::model::StepExecutionStatusDetail>,
}
impl StepDetail {
/// <p>The step configuration.</p>
pub fn step_config(&self) -> std::option::Option<&crate::model::StepConfig> {
self.step_config.as_ref()
}
/// <p>The description of the step status.</p>
pub fn execution_status_detail(
&self,
) -> std::option::Option<&crate::model::StepExecutionStatusDetail> {
self.execution_status_detail.as_ref()
}
}
impl std::fmt::Debug for StepDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepDetail");
formatter.field("step_config", &self.step_config);
formatter.field("execution_status_detail", &self.execution_status_detail);
formatter.finish()
}
}
/// See [`StepDetail`](crate::model::StepDetail)
pub mod step_detail {
/// A builder for [`StepDetail`](crate::model::StepDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) step_config: std::option::Option<crate::model::StepConfig>,
pub(crate) execution_status_detail:
std::option::Option<crate::model::StepExecutionStatusDetail>,
}
impl Builder {
/// <p>The step configuration.</p>
pub fn step_config(mut self, input: crate::model::StepConfig) -> Self {
self.step_config = Some(input);
self
}
/// <p>The step configuration.</p>
pub fn set_step_config(
mut self,
input: std::option::Option<crate::model::StepConfig>,
) -> Self {
self.step_config = input;
self
}
/// <p>The description of the step status.</p>
pub fn execution_status_detail(
mut self,
input: crate::model::StepExecutionStatusDetail,
) -> Self {
self.execution_status_detail = Some(input);
self
}
/// <p>The description of the step status.</p>
pub fn set_execution_status_detail(
mut self,
input: std::option::Option<crate::model::StepExecutionStatusDetail>,
) -> Self {
self.execution_status_detail = input;
self
}
/// Consumes the builder and constructs a [`StepDetail`](crate::model::StepDetail)
pub fn build(self) -> crate::model::StepDetail {
crate::model::StepDetail {
step_config: self.step_config,
execution_status_detail: self.execution_status_detail,
}
}
}
}
impl StepDetail {
/// Creates a new builder-style object to manufacture [`StepDetail`](crate::model::StepDetail)
pub fn builder() -> crate::model::step_detail::Builder {
crate::model::step_detail::Builder::default()
}
}
/// <p>The execution state of a step.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StepExecutionStatusDetail {
/// <p>The state of the step.</p>
pub state: std::option::Option<crate::model::StepExecutionState>,
/// <p>The creation date and time of the step.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The start date and time of the step.</p>
pub start_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The completion date and time of the step.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>A description of the step's current state.</p>
pub last_state_change_reason: std::option::Option<std::string::String>,
}
impl StepExecutionStatusDetail {
/// <p>The state of the step.</p>
pub fn state(&self) -> std::option::Option<&crate::model::StepExecutionState> {
self.state.as_ref()
}
/// <p>The creation date and time of the step.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The start date and time of the step.</p>
pub fn start_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_date_time.as_ref()
}
/// <p>The completion date and time of the step.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
/// <p>A description of the step's current state.</p>
pub fn last_state_change_reason(&self) -> std::option::Option<&str> {
self.last_state_change_reason.as_deref()
}
}
impl std::fmt::Debug for StepExecutionStatusDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StepExecutionStatusDetail");
formatter.field("state", &self.state);
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("start_date_time", &self.start_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.field("last_state_change_reason", &self.last_state_change_reason);
formatter.finish()
}
}
/// See [`StepExecutionStatusDetail`](crate::model::StepExecutionStatusDetail)
pub mod step_execution_status_detail {
/// A builder for [`StepExecutionStatusDetail`](crate::model::StepExecutionStatusDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::StepExecutionState>,
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) start_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) last_state_change_reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The state of the step.</p>
pub fn state(mut self, input: crate::model::StepExecutionState) -> Self {
self.state = Some(input);
self
}
/// <p>The state of the step.</p>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::StepExecutionState>,
) -> Self {
self.state = input;
self
}
/// <p>The creation date and time of the step.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The creation date and time of the step.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The start date and time of the step.</p>
pub fn start_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_date_time = Some(input);
self
}
/// <p>The start date and time of the step.</p>
pub fn set_start_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_date_time = input;
self
}
/// <p>The completion date and time of the step.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The completion date and time of the step.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// <p>A description of the step's current state.</p>
pub fn last_state_change_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.last_state_change_reason = Some(input.into());
self
}
/// <p>A description of the step's current state.</p>
pub fn set_last_state_change_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_state_change_reason = input;
self
}
/// Consumes the builder and constructs a [`StepExecutionStatusDetail`](crate::model::StepExecutionStatusDetail)
pub fn build(self) -> crate::model::StepExecutionStatusDetail {
crate::model::StepExecutionStatusDetail {
state: self.state,
creation_date_time: self.creation_date_time,
start_date_time: self.start_date_time,
end_date_time: self.end_date_time,
last_state_change_reason: self.last_state_change_reason,
}
}
}
}
impl StepExecutionStatusDetail {
/// Creates a new builder-style object to manufacture [`StepExecutionStatusDetail`](crate::model::StepExecutionStatusDetail)
pub fn builder() -> crate::model::step_execution_status_detail::Builder {
crate::model::step_execution_status_detail::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum StepExecutionState {
#[allow(missing_docs)] // documentation missing in model
Cancelled,
#[allow(missing_docs)] // documentation missing in model
Completed,
#[allow(missing_docs)] // documentation missing in model
Continue,
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Interrupted,
#[allow(missing_docs)] // documentation missing in model
Pending,
#[allow(missing_docs)] // documentation missing in model
Running,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for StepExecutionState {
fn from(s: &str) -> Self {
match s {
"CANCELLED" => StepExecutionState::Cancelled,
"COMPLETED" => StepExecutionState::Completed,
"CONTINUE" => StepExecutionState::Continue,
"FAILED" => StepExecutionState::Failed,
"INTERRUPTED" => StepExecutionState::Interrupted,
"PENDING" => StepExecutionState::Pending,
"RUNNING" => StepExecutionState::Running,
other => StepExecutionState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for StepExecutionState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(StepExecutionState::from(s))
}
}
impl StepExecutionState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
StepExecutionState::Cancelled => "CANCELLED",
StepExecutionState::Completed => "COMPLETED",
StepExecutionState::Continue => "CONTINUE",
StepExecutionState::Failed => "FAILED",
StepExecutionState::Interrupted => "INTERRUPTED",
StepExecutionState::Pending => "PENDING",
StepExecutionState::Running => "RUNNING",
StepExecutionState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CANCELLED",
"COMPLETED",
"CONTINUE",
"FAILED",
"INTERRUPTED",
"PENDING",
"RUNNING",
]
}
}
impl AsRef<str> for StepExecutionState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Specify the type of Amazon EC2 instances that the cluster (job flow) runs on.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobFlowInstancesDetail {
/// <p>The Amazon EC2 master node instance type.</p>
pub master_instance_type: std::option::Option<std::string::String>,
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub master_public_dns_name: std::option::Option<std::string::String>,
/// <p>The Amazon EC2 instance identifier of the master node.</p>
pub master_instance_id: std::option::Option<std::string::String>,
/// <p>The Amazon EC2 core and task node instance type.</p>
pub slave_instance_type: std::option::Option<std::string::String>,
/// <p>The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.</p>
pub instance_count: std::option::Option<i32>,
/// <p>Details about the instance groups in a cluster.</p>
pub instance_groups: std::option::Option<std::vec::Vec<crate::model::InstanceGroupDetail>>,
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.</p>
pub normalized_instance_hours: std::option::Option<i32>,
/// <p>The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.</p>
pub ec2_key_name: std::option::Option<std::string::String>,
/// <p>For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.</p>
pub ec2_subnet_id: std::option::Option<std::string::String>,
/// <p>The Amazon EC2 Availability Zone for the cluster.</p>
pub placement: std::option::Option<crate::model::PlacementType>,
/// <p>Specifies whether the cluster should remain available after completing all steps.</p>
pub keep_job_flow_alive_when_no_steps: bool,
/// <p>Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.</p>
pub termination_protected: bool,
/// <p>The Hadoop version for the cluster.</p>
pub hadoop_version: std::option::Option<std::string::String>,
}
impl JobFlowInstancesDetail {
/// <p>The Amazon EC2 master node instance type.</p>
pub fn master_instance_type(&self) -> std::option::Option<&str> {
self.master_instance_type.as_deref()
}
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub fn master_public_dns_name(&self) -> std::option::Option<&str> {
self.master_public_dns_name.as_deref()
}
/// <p>The Amazon EC2 instance identifier of the master node.</p>
pub fn master_instance_id(&self) -> std::option::Option<&str> {
self.master_instance_id.as_deref()
}
/// <p>The Amazon EC2 core and task node instance type.</p>
pub fn slave_instance_type(&self) -> std::option::Option<&str> {
self.slave_instance_type.as_deref()
}
/// <p>The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.</p>
pub fn instance_count(&self) -> std::option::Option<i32> {
self.instance_count
}
/// <p>Details about the instance groups in a cluster.</p>
pub fn instance_groups(&self) -> std::option::Option<&[crate::model::InstanceGroupDetail]> {
self.instance_groups.as_deref()
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn normalized_instance_hours(&self) -> std::option::Option<i32> {
self.normalized_instance_hours
}
/// <p>The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.</p>
pub fn ec2_key_name(&self) -> std::option::Option<&str> {
self.ec2_key_name.as_deref()
}
/// <p>For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.</p>
pub fn ec2_subnet_id(&self) -> std::option::Option<&str> {
self.ec2_subnet_id.as_deref()
}
/// <p>The Amazon EC2 Availability Zone for the cluster.</p>
pub fn placement(&self) -> std::option::Option<&crate::model::PlacementType> {
self.placement.as_ref()
}
/// <p>Specifies whether the cluster should remain available after completing all steps.</p>
pub fn keep_job_flow_alive_when_no_steps(&self) -> bool {
self.keep_job_flow_alive_when_no_steps
}
/// <p>Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.</p>
pub fn termination_protected(&self) -> bool {
self.termination_protected
}
/// <p>The Hadoop version for the cluster.</p>
pub fn hadoop_version(&self) -> std::option::Option<&str> {
self.hadoop_version.as_deref()
}
}
impl std::fmt::Debug for JobFlowInstancesDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobFlowInstancesDetail");
formatter.field("master_instance_type", &self.master_instance_type);
formatter.field("master_public_dns_name", &self.master_public_dns_name);
formatter.field("master_instance_id", &self.master_instance_id);
formatter.field("slave_instance_type", &self.slave_instance_type);
formatter.field("instance_count", &self.instance_count);
formatter.field("instance_groups", &self.instance_groups);
formatter.field("normalized_instance_hours", &self.normalized_instance_hours);
formatter.field("ec2_key_name", &self.ec2_key_name);
formatter.field("ec2_subnet_id", &self.ec2_subnet_id);
formatter.field("placement", &self.placement);
formatter.field(
"keep_job_flow_alive_when_no_steps",
&self.keep_job_flow_alive_when_no_steps,
);
formatter.field("termination_protected", &self.termination_protected);
formatter.field("hadoop_version", &self.hadoop_version);
formatter.finish()
}
}
/// See [`JobFlowInstancesDetail`](crate::model::JobFlowInstancesDetail)
pub mod job_flow_instances_detail {
/// A builder for [`JobFlowInstancesDetail`](crate::model::JobFlowInstancesDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) master_instance_type: std::option::Option<std::string::String>,
pub(crate) master_public_dns_name: std::option::Option<std::string::String>,
pub(crate) master_instance_id: std::option::Option<std::string::String>,
pub(crate) slave_instance_type: std::option::Option<std::string::String>,
pub(crate) instance_count: std::option::Option<i32>,
pub(crate) instance_groups:
std::option::Option<std::vec::Vec<crate::model::InstanceGroupDetail>>,
pub(crate) normalized_instance_hours: std::option::Option<i32>,
pub(crate) ec2_key_name: std::option::Option<std::string::String>,
pub(crate) ec2_subnet_id: std::option::Option<std::string::String>,
pub(crate) placement: std::option::Option<crate::model::PlacementType>,
pub(crate) keep_job_flow_alive_when_no_steps: std::option::Option<bool>,
pub(crate) termination_protected: std::option::Option<bool>,
pub(crate) hadoop_version: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The Amazon EC2 master node instance type.</p>
pub fn master_instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.master_instance_type = Some(input.into());
self
}
/// <p>The Amazon EC2 master node instance type.</p>
pub fn set_master_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.master_instance_type = input;
self
}
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub fn master_public_dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.master_public_dns_name = Some(input.into());
self
}
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub fn set_master_public_dns_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.master_public_dns_name = input;
self
}
/// <p>The Amazon EC2 instance identifier of the master node.</p>
pub fn master_instance_id(mut self, input: impl Into<std::string::String>) -> Self {
self.master_instance_id = Some(input.into());
self
}
/// <p>The Amazon EC2 instance identifier of the master node.</p>
pub fn set_master_instance_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.master_instance_id = input;
self
}
/// <p>The Amazon EC2 core and task node instance type.</p>
pub fn slave_instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.slave_instance_type = Some(input.into());
self
}
/// <p>The Amazon EC2 core and task node instance type.</p>
pub fn set_slave_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.slave_instance_type = input;
self
}
/// <p>The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.</p>
pub fn instance_count(mut self, input: i32) -> Self {
self.instance_count = Some(input);
self
}
/// <p>The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.</p>
pub fn set_instance_count(mut self, input: std::option::Option<i32>) -> Self {
self.instance_count = input;
self
}
/// Appends an item to `instance_groups`.
///
/// To override the contents of this collection use [`set_instance_groups`](Self::set_instance_groups).
///
/// <p>Details about the instance groups in a cluster.</p>
pub fn instance_groups(mut self, input: crate::model::InstanceGroupDetail) -> Self {
let mut v = self.instance_groups.unwrap_or_default();
v.push(input);
self.instance_groups = Some(v);
self
}
/// <p>Details about the instance groups in a cluster.</p>
pub fn set_instance_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InstanceGroupDetail>>,
) -> Self {
self.instance_groups = input;
self
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn normalized_instance_hours(mut self, input: i32) -> Self {
self.normalized_instance_hours = Some(input);
self
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn set_normalized_instance_hours(mut self, input: std::option::Option<i32>) -> Self {
self.normalized_instance_hours = input;
self
}
/// <p>The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.</p>
pub fn ec2_key_name(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_key_name = Some(input.into());
self
}
/// <p>The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.</p>
pub fn set_ec2_key_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ec2_key_name = input;
self
}
/// <p>For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.</p>
pub fn ec2_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_subnet_id = Some(input.into());
self
}
/// <p>For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.</p>
pub fn set_ec2_subnet_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ec2_subnet_id = input;
self
}
/// <p>The Amazon EC2 Availability Zone for the cluster.</p>
pub fn placement(mut self, input: crate::model::PlacementType) -> Self {
self.placement = Some(input);
self
}
/// <p>The Amazon EC2 Availability Zone for the cluster.</p>
pub fn set_placement(
mut self,
input: std::option::Option<crate::model::PlacementType>,
) -> Self {
self.placement = input;
self
}
/// <p>Specifies whether the cluster should remain available after completing all steps.</p>
pub fn keep_job_flow_alive_when_no_steps(mut self, input: bool) -> Self {
self.keep_job_flow_alive_when_no_steps = Some(input);
self
}
/// <p>Specifies whether the cluster should remain available after completing all steps.</p>
pub fn set_keep_job_flow_alive_when_no_steps(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.keep_job_flow_alive_when_no_steps = input;
self
}
/// <p>Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.</p>
pub fn termination_protected(mut self, input: bool) -> Self {
self.termination_protected = Some(input);
self
}
/// <p>Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.</p>
pub fn set_termination_protected(mut self, input: std::option::Option<bool>) -> Self {
self.termination_protected = input;
self
}
/// <p>The Hadoop version for the cluster.</p>
pub fn hadoop_version(mut self, input: impl Into<std::string::String>) -> Self {
self.hadoop_version = Some(input.into());
self
}
/// <p>The Hadoop version for the cluster.</p>
pub fn set_hadoop_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.hadoop_version = input;
self
}
/// Consumes the builder and constructs a [`JobFlowInstancesDetail`](crate::model::JobFlowInstancesDetail)
pub fn build(self) -> crate::model::JobFlowInstancesDetail {
crate::model::JobFlowInstancesDetail {
master_instance_type: self.master_instance_type,
master_public_dns_name: self.master_public_dns_name,
master_instance_id: self.master_instance_id,
slave_instance_type: self.slave_instance_type,
instance_count: self.instance_count,
instance_groups: self.instance_groups,
normalized_instance_hours: self.normalized_instance_hours,
ec2_key_name: self.ec2_key_name,
ec2_subnet_id: self.ec2_subnet_id,
placement: self.placement,
keep_job_flow_alive_when_no_steps: self
.keep_job_flow_alive_when_no_steps
.unwrap_or_default(),
termination_protected: self.termination_protected.unwrap_or_default(),
hadoop_version: self.hadoop_version,
}
}
}
}
impl JobFlowInstancesDetail {
/// Creates a new builder-style object to manufacture [`JobFlowInstancesDetail`](crate::model::JobFlowInstancesDetail)
pub fn builder() -> crate::model::job_flow_instances_detail::Builder {
crate::model::job_flow_instances_detail::Builder::default()
}
}
/// <p>Detailed information about an instance group.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceGroupDetail {
/// <p>Unique identifier for the instance group.</p>
pub instance_group_id: std::option::Option<std::string::String>,
/// <p>Friendly name for the instance group.</p>
pub name: std::option::Option<std::string::String>,
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub market: std::option::Option<crate::model::MarketType>,
/// <p>Instance group role in the cluster</p>
pub instance_role: std::option::Option<crate::model::InstanceRoleType>,
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub bid_price: std::option::Option<std::string::String>,
/// <p>EC2 instance type.</p>
pub instance_type: std::option::Option<std::string::String>,
/// <p>Target number of instances to run in the instance group.</p>
pub instance_request_count: std::option::Option<i32>,
/// <p>Actual count of running instances.</p>
pub instance_running_count: std::option::Option<i32>,
/// <p>State of instance group. The following values are no longer supported: STARTING, TERMINATED, and FAILED.</p>
pub state: std::option::Option<crate::model::InstanceGroupState>,
/// <p>Details regarding the state of the instance group.</p>
pub last_state_change_reason: std::option::Option<std::string::String>,
/// <p>The date/time the instance group was created.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date/time the instance group was started.</p>
pub start_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date/time the instance group was available to the cluster.</p>
pub ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date/time the instance group was terminated.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub custom_ami_id: std::option::Option<std::string::String>,
}
impl InstanceGroupDetail {
/// <p>Unique identifier for the instance group.</p>
pub fn instance_group_id(&self) -> std::option::Option<&str> {
self.instance_group_id.as_deref()
}
/// <p>Friendly name for the instance group.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub fn market(&self) -> std::option::Option<&crate::model::MarketType> {
self.market.as_ref()
}
/// <p>Instance group role in the cluster</p>
pub fn instance_role(&self) -> std::option::Option<&crate::model::InstanceRoleType> {
self.instance_role.as_ref()
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn bid_price(&self) -> std::option::Option<&str> {
self.bid_price.as_deref()
}
/// <p>EC2 instance type.</p>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>Target number of instances to run in the instance group.</p>
pub fn instance_request_count(&self) -> std::option::Option<i32> {
self.instance_request_count
}
/// <p>Actual count of running instances.</p>
pub fn instance_running_count(&self) -> std::option::Option<i32> {
self.instance_running_count
}
/// <p>State of instance group. The following values are no longer supported: STARTING, TERMINATED, and FAILED.</p>
pub fn state(&self) -> std::option::Option<&crate::model::InstanceGroupState> {
self.state.as_ref()
}
/// <p>Details regarding the state of the instance group.</p>
pub fn last_state_change_reason(&self) -> std::option::Option<&str> {
self.last_state_change_reason.as_deref()
}
/// <p>The date/time the instance group was created.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The date/time the instance group was started.</p>
pub fn start_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_date_time.as_ref()
}
/// <p>The date/time the instance group was available to the cluster.</p>
pub fn ready_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.ready_date_time.as_ref()
}
/// <p>The date/time the instance group was terminated.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn custom_ami_id(&self) -> std::option::Option<&str> {
self.custom_ami_id.as_deref()
}
}
impl std::fmt::Debug for InstanceGroupDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceGroupDetail");
formatter.field("instance_group_id", &self.instance_group_id);
formatter.field("name", &self.name);
formatter.field("market", &self.market);
formatter.field("instance_role", &self.instance_role);
formatter.field("bid_price", &self.bid_price);
formatter.field("instance_type", &self.instance_type);
formatter.field("instance_request_count", &self.instance_request_count);
formatter.field("instance_running_count", &self.instance_running_count);
formatter.field("state", &self.state);
formatter.field("last_state_change_reason", &self.last_state_change_reason);
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("start_date_time", &self.start_date_time);
formatter.field("ready_date_time", &self.ready_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.field("custom_ami_id", &self.custom_ami_id);
formatter.finish()
}
}
/// See [`InstanceGroupDetail`](crate::model::InstanceGroupDetail)
pub mod instance_group_detail {
/// A builder for [`InstanceGroupDetail`](crate::model::InstanceGroupDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_group_id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) market: std::option::Option<crate::model::MarketType>,
pub(crate) instance_role: std::option::Option<crate::model::InstanceRoleType>,
pub(crate) bid_price: std::option::Option<std::string::String>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) instance_request_count: std::option::Option<i32>,
pub(crate) instance_running_count: std::option::Option<i32>,
pub(crate) state: std::option::Option<crate::model::InstanceGroupState>,
pub(crate) last_state_change_reason: std::option::Option<std::string::String>,
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) start_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) custom_ami_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Unique identifier for the instance group.</p>
pub fn instance_group_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_group_id = Some(input.into());
self
}
/// <p>Unique identifier for the instance group.</p>
pub fn set_instance_group_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_group_id = input;
self
}
/// <p>Friendly name for the instance group.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>Friendly name for the instance group.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub fn market(mut self, input: crate::model::MarketType) -> Self {
self.market = Some(input);
self
}
/// <p>Market type of the EC2 instances used to create a cluster node.</p>
pub fn set_market(mut self, input: std::option::Option<crate::model::MarketType>) -> Self {
self.market = input;
self
}
/// <p>Instance group role in the cluster</p>
pub fn instance_role(mut self, input: crate::model::InstanceRoleType) -> Self {
self.instance_role = Some(input);
self
}
/// <p>Instance group role in the cluster</p>
pub fn set_instance_role(
mut self,
input: std::option::Option<crate::model::InstanceRoleType>,
) -> Self {
self.instance_role = input;
self
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn bid_price(mut self, input: impl Into<std::string::String>) -> Self {
self.bid_price = Some(input.into());
self
}
/// <p>If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify <code>OnDemandPrice</code> to set the amount equal to the On-Demand price, or specify an amount in USD.</p>
pub fn set_bid_price(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bid_price = input;
self
}
/// <p>EC2 instance type.</p>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>EC2 instance type.</p>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// <p>Target number of instances to run in the instance group.</p>
pub fn instance_request_count(mut self, input: i32) -> Self {
self.instance_request_count = Some(input);
self
}
/// <p>Target number of instances to run in the instance group.</p>
pub fn set_instance_request_count(mut self, input: std::option::Option<i32>) -> Self {
self.instance_request_count = input;
self
}
/// <p>Actual count of running instances.</p>
pub fn instance_running_count(mut self, input: i32) -> Self {
self.instance_running_count = Some(input);
self
}
/// <p>Actual count of running instances.</p>
pub fn set_instance_running_count(mut self, input: std::option::Option<i32>) -> Self {
self.instance_running_count = input;
self
}
/// <p>State of instance group. The following values are no longer supported: STARTING, TERMINATED, and FAILED.</p>
pub fn state(mut self, input: crate::model::InstanceGroupState) -> Self {
self.state = Some(input);
self
}
/// <p>State of instance group. The following values are no longer supported: STARTING, TERMINATED, and FAILED.</p>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::InstanceGroupState>,
) -> Self {
self.state = input;
self
}
/// <p>Details regarding the state of the instance group.</p>
pub fn last_state_change_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.last_state_change_reason = Some(input.into());
self
}
/// <p>Details regarding the state of the instance group.</p>
pub fn set_last_state_change_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_state_change_reason = input;
self
}
/// <p>The date/time the instance group was created.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The date/time the instance group was created.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The date/time the instance group was started.</p>
pub fn start_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_date_time = Some(input);
self
}
/// <p>The date/time the instance group was started.</p>
pub fn set_start_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_date_time = input;
self
}
/// <p>The date/time the instance group was available to the cluster.</p>
pub fn ready_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.ready_date_time = Some(input);
self
}
/// <p>The date/time the instance group was available to the cluster.</p>
pub fn set_ready_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.ready_date_time = input;
self
}
/// <p>The date/time the instance group was terminated.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The date/time the instance group was terminated.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn custom_ami_id(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_ami_id = Some(input.into());
self
}
/// <p>The custom AMI ID to use for the provisioned instance group.</p>
pub fn set_custom_ami_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_ami_id = input;
self
}
/// Consumes the builder and constructs a [`InstanceGroupDetail`](crate::model::InstanceGroupDetail)
pub fn build(self) -> crate::model::InstanceGroupDetail {
crate::model::InstanceGroupDetail {
instance_group_id: self.instance_group_id,
name: self.name,
market: self.market,
instance_role: self.instance_role,
bid_price: self.bid_price,
instance_type: self.instance_type,
instance_request_count: self.instance_request_count,
instance_running_count: self.instance_running_count,
state: self.state,
last_state_change_reason: self.last_state_change_reason,
creation_date_time: self.creation_date_time,
start_date_time: self.start_date_time,
ready_date_time: self.ready_date_time,
end_date_time: self.end_date_time,
custom_ami_id: self.custom_ami_id,
}
}
}
}
impl InstanceGroupDetail {
/// Creates a new builder-style object to manufacture [`InstanceGroupDetail`](crate::model::InstanceGroupDetail)
pub fn builder() -> crate::model::instance_group_detail::Builder {
crate::model::instance_group_detail::Builder::default()
}
}
/// <p>Describes the status of the cluster (job flow).</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobFlowExecutionStatusDetail {
/// <p>The state of the job flow.</p>
pub state: std::option::Option<crate::model::JobFlowExecutionState>,
/// <p>The creation date and time of the job flow.</p>
pub creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The start date and time of the job flow.</p>
pub start_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The date and time when the job flow was ready to start running bootstrap actions.</p>
pub ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The completion date and time of the job flow.</p>
pub end_date_time: std::option::Option<aws_smithy_types::DateTime>,
/// <p>Description of the job flow last changed state.</p>
pub last_state_change_reason: std::option::Option<std::string::String>,
}
impl JobFlowExecutionStatusDetail {
/// <p>The state of the job flow.</p>
pub fn state(&self) -> std::option::Option<&crate::model::JobFlowExecutionState> {
self.state.as_ref()
}
/// <p>The creation date and time of the job flow.</p>
pub fn creation_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.creation_date_time.as_ref()
}
/// <p>The start date and time of the job flow.</p>
pub fn start_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.start_date_time.as_ref()
}
/// <p>The date and time when the job flow was ready to start running bootstrap actions.</p>
pub fn ready_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.ready_date_time.as_ref()
}
/// <p>The completion date and time of the job flow.</p>
pub fn end_date_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.end_date_time.as_ref()
}
/// <p>Description of the job flow last changed state.</p>
pub fn last_state_change_reason(&self) -> std::option::Option<&str> {
self.last_state_change_reason.as_deref()
}
}
impl std::fmt::Debug for JobFlowExecutionStatusDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobFlowExecutionStatusDetail");
formatter.field("state", &self.state);
formatter.field("creation_date_time", &self.creation_date_time);
formatter.field("start_date_time", &self.start_date_time);
formatter.field("ready_date_time", &self.ready_date_time);
formatter.field("end_date_time", &self.end_date_time);
formatter.field("last_state_change_reason", &self.last_state_change_reason);
formatter.finish()
}
}
/// See [`JobFlowExecutionStatusDetail`](crate::model::JobFlowExecutionStatusDetail)
pub mod job_flow_execution_status_detail {
/// A builder for [`JobFlowExecutionStatusDetail`](crate::model::JobFlowExecutionStatusDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) state: std::option::Option<crate::model::JobFlowExecutionState>,
pub(crate) creation_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) start_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) ready_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) end_date_time: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) last_state_change_reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The state of the job flow.</p>
pub fn state(mut self, input: crate::model::JobFlowExecutionState) -> Self {
self.state = Some(input);
self
}
/// <p>The state of the job flow.</p>
pub fn set_state(
mut self,
input: std::option::Option<crate::model::JobFlowExecutionState>,
) -> Self {
self.state = input;
self
}
/// <p>The creation date and time of the job flow.</p>
pub fn creation_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.creation_date_time = Some(input);
self
}
/// <p>The creation date and time of the job flow.</p>
pub fn set_creation_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.creation_date_time = input;
self
}
/// <p>The start date and time of the job flow.</p>
pub fn start_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.start_date_time = Some(input);
self
}
/// <p>The start date and time of the job flow.</p>
pub fn set_start_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.start_date_time = input;
self
}
/// <p>The date and time when the job flow was ready to start running bootstrap actions.</p>
pub fn ready_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.ready_date_time = Some(input);
self
}
/// <p>The date and time when the job flow was ready to start running bootstrap actions.</p>
pub fn set_ready_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.ready_date_time = input;
self
}
/// <p>The completion date and time of the job flow.</p>
pub fn end_date_time(mut self, input: aws_smithy_types::DateTime) -> Self {
self.end_date_time = Some(input);
self
}
/// <p>The completion date and time of the job flow.</p>
pub fn set_end_date_time(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.end_date_time = input;
self
}
/// <p>Description of the job flow last changed state.</p>
pub fn last_state_change_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.last_state_change_reason = Some(input.into());
self
}
/// <p>Description of the job flow last changed state.</p>
pub fn set_last_state_change_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_state_change_reason = input;
self
}
/// Consumes the builder and constructs a [`JobFlowExecutionStatusDetail`](crate::model::JobFlowExecutionStatusDetail)
pub fn build(self) -> crate::model::JobFlowExecutionStatusDetail {
crate::model::JobFlowExecutionStatusDetail {
state: self.state,
creation_date_time: self.creation_date_time,
start_date_time: self.start_date_time,
ready_date_time: self.ready_date_time,
end_date_time: self.end_date_time,
last_state_change_reason: self.last_state_change_reason,
}
}
}
}
impl JobFlowExecutionStatusDetail {
/// Creates a new builder-style object to manufacture [`JobFlowExecutionStatusDetail`](crate::model::JobFlowExecutionStatusDetail)
pub fn builder() -> crate::model::job_flow_execution_status_detail::Builder {
crate::model::job_flow_execution_status_detail::Builder::default()
}
}
/// <p>The type of instance.</p>
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum JobFlowExecutionState {
#[allow(missing_docs)] // documentation missing in model
Bootstrapping,
#[allow(missing_docs)] // documentation missing in model
Completed,
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
ShuttingDown,
#[allow(missing_docs)] // documentation missing in model
Starting,
#[allow(missing_docs)] // documentation missing in model
Terminated,
#[allow(missing_docs)] // documentation missing in model
Waiting,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for JobFlowExecutionState {
fn from(s: &str) -> Self {
match s {
"BOOTSTRAPPING" => JobFlowExecutionState::Bootstrapping,
"COMPLETED" => JobFlowExecutionState::Completed,
"FAILED" => JobFlowExecutionState::Failed,
"RUNNING" => JobFlowExecutionState::Running,
"SHUTTING_DOWN" => JobFlowExecutionState::ShuttingDown,
"STARTING" => JobFlowExecutionState::Starting,
"TERMINATED" => JobFlowExecutionState::Terminated,
"WAITING" => JobFlowExecutionState::Waiting,
other => JobFlowExecutionState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for JobFlowExecutionState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(JobFlowExecutionState::from(s))
}
}
impl JobFlowExecutionState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
JobFlowExecutionState::Bootstrapping => "BOOTSTRAPPING",
JobFlowExecutionState::Completed => "COMPLETED",
JobFlowExecutionState::Failed => "FAILED",
JobFlowExecutionState::Running => "RUNNING",
JobFlowExecutionState::ShuttingDown => "SHUTTING_DOWN",
JobFlowExecutionState::Starting => "STARTING",
JobFlowExecutionState::Terminated => "TERMINATED",
JobFlowExecutionState::Waiting => "WAITING",
JobFlowExecutionState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"BOOTSTRAPPING",
"COMPLETED",
"FAILED",
"RUNNING",
"SHUTTING_DOWN",
"STARTING",
"TERMINATED",
"WAITING",
]
}
}
impl AsRef<str> for JobFlowExecutionState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The detailed description of the cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Cluster {
/// <p>The unique identifier for the cluster.</p>
pub id: std::option::Option<std::string::String>,
/// <p>The name of the cluster.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The current status details about the cluster.</p>
pub status: std::option::Option<crate::model::ClusterStatus>,
/// <p>Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.</p>
pub ec2_instance_attributes: std::option::Option<crate::model::Ec2InstanceAttributes>,
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>The instance group configuration of the cluster. A value of <code>INSTANCE_GROUP</code> indicates a uniform instance group configuration. A value of <code>INSTANCE_FLEET</code> indicates an instance fleets configuration.</p>
pub instance_collection_type: std::option::Option<crate::model::InstanceCollectionType>,
/// <p>The path to the Amazon S3 location where logs for this cluster are stored.</p>
pub log_uri: std::option::Option<std::string::String>,
/// <p> The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0. </p>
pub log_encryption_kms_key_id: std::option::Option<std::string::String>,
/// <p>The AMI version requested for this cluster.</p>
pub requested_ami_version: std::option::Option<std::string::String>,
/// <p>The AMI version running on this cluster.</p>
pub running_ami_version: std::option::Option<std::string::String>,
/// <p>The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form <code>emr-x.x.x</code>, where x.x.x is an Amazon EMR release version such as <code>emr-5.14.0</code>. For more information about Amazon EMR release versions and included application versions and features, see <a href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/">https://docs.aws.amazon.com/emr/latest/ReleaseGuide/</a>. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use <code>AmiVersion</code>.</p>
pub release_label: std::option::Option<std::string::String>,
/// <p>Specifies whether the cluster should terminate after completing all steps.</p>
pub auto_terminate: bool,
/// <p>Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.</p>
pub termination_protected: bool,
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub visible_to_all_users: bool,
/// <p>The applications installed on this cluster.</p>
pub applications: std::option::Option<std::vec::Vec<crate::model::Application>>,
/// <p>A list of tags associated with a cluster.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
/// <p>The IAM role that Amazon EMR assumes in order to access Amazon Web Services resources on your behalf.</p>
pub service_role: std::option::Option<std::string::String>,
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub normalized_instance_hours: std::option::Option<i32>,
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub master_public_dns_name: std::option::Option<std::string::String>,
/// <p>Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.</p>
pub configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
/// <p>The name of the security configuration applied to the cluster.</p>
pub security_configuration: std::option::Option<std::string::String>,
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.</p>
pub auto_scaling_role: std::option::Option<std::string::String>,
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub scale_down_behavior: std::option::Option<crate::model::ScaleDownBehavior>,
/// <p>Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.</p>
pub custom_ami_id: std::option::Option<std::string::String>,
/// <p>The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.</p>
pub ebs_root_volume_size: std::option::Option<i32>,
/// <p>Applies only when <code>CustomAmiID</code> is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.</p>
pub repo_upgrade_on_boot: std::option::Option<crate::model::RepoUpgradeOnBoot>,
/// <p>Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-kerberos.html">Use Kerberos Authentication</a> in the <i>Amazon EMR Management Guide</i>.</p>
pub kerberos_attributes: std::option::Option<crate::model::KerberosAttributes>,
/// <p>The Amazon Resource Name of the cluster.</p>
pub cluster_arn: std::option::Option<std::string::String>,
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub outpost_arn: std::option::Option<std::string::String>,
/// <p>Specifies the number of steps that can be executed concurrently.</p>
pub step_concurrency_level: std::option::Option<i32>,
/// <p>Placement group configured for an Amazon EMR cluster.</p>
pub placement_groups: std::option::Option<std::vec::Vec<crate::model::PlacementGroupConfig>>,
/// <p>The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.</p>
pub os_release_label: std::option::Option<std::string::String>,
}
impl Cluster {
/// <p>The unique identifier for the cluster.</p>
pub fn id(&self) -> std::option::Option<&str> {
self.id.as_deref()
}
/// <p>The name of the cluster.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The current status details about the cluster.</p>
pub fn status(&self) -> std::option::Option<&crate::model::ClusterStatus> {
self.status.as_ref()
}
/// <p>Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.</p>
pub fn ec2_instance_attributes(
&self,
) -> std::option::Option<&crate::model::Ec2InstanceAttributes> {
self.ec2_instance_attributes.as_ref()
}
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>The instance group configuration of the cluster. A value of <code>INSTANCE_GROUP</code> indicates a uniform instance group configuration. A value of <code>INSTANCE_FLEET</code> indicates an instance fleets configuration.</p>
pub fn instance_collection_type(
&self,
) -> std::option::Option<&crate::model::InstanceCollectionType> {
self.instance_collection_type.as_ref()
}
/// <p>The path to the Amazon S3 location where logs for this cluster are stored.</p>
pub fn log_uri(&self) -> std::option::Option<&str> {
self.log_uri.as_deref()
}
/// <p> The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0. </p>
pub fn log_encryption_kms_key_id(&self) -> std::option::Option<&str> {
self.log_encryption_kms_key_id.as_deref()
}
/// <p>The AMI version requested for this cluster.</p>
pub fn requested_ami_version(&self) -> std::option::Option<&str> {
self.requested_ami_version.as_deref()
}
/// <p>The AMI version running on this cluster.</p>
pub fn running_ami_version(&self) -> std::option::Option<&str> {
self.running_ami_version.as_deref()
}
/// <p>The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form <code>emr-x.x.x</code>, where x.x.x is an Amazon EMR release version such as <code>emr-5.14.0</code>. For more information about Amazon EMR release versions and included application versions and features, see <a href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/">https://docs.aws.amazon.com/emr/latest/ReleaseGuide/</a>. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use <code>AmiVersion</code>.</p>
pub fn release_label(&self) -> std::option::Option<&str> {
self.release_label.as_deref()
}
/// <p>Specifies whether the cluster should terminate after completing all steps.</p>
pub fn auto_terminate(&self) -> bool {
self.auto_terminate
}
/// <p>Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.</p>
pub fn termination_protected(&self) -> bool {
self.termination_protected
}
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub fn visible_to_all_users(&self) -> bool {
self.visible_to_all_users
}
/// <p>The applications installed on this cluster.</p>
pub fn applications(&self) -> std::option::Option<&[crate::model::Application]> {
self.applications.as_deref()
}
/// <p>A list of tags associated with a cluster.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
/// <p>The IAM role that Amazon EMR assumes in order to access Amazon Web Services resources on your behalf.</p>
pub fn service_role(&self) -> std::option::Option<&str> {
self.service_role.as_deref()
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn normalized_instance_hours(&self) -> std::option::Option<i32> {
self.normalized_instance_hours
}
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub fn master_public_dns_name(&self) -> std::option::Option<&str> {
self.master_public_dns_name.as_deref()
}
/// <p>Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.</p>
pub fn configurations(&self) -> std::option::Option<&[crate::model::Configuration]> {
self.configurations.as_deref()
}
/// <p>The name of the security configuration applied to the cluster.</p>
pub fn security_configuration(&self) -> std::option::Option<&str> {
self.security_configuration.as_deref()
}
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.</p>
pub fn auto_scaling_role(&self) -> std::option::Option<&str> {
self.auto_scaling_role.as_deref()
}
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub fn scale_down_behavior(&self) -> std::option::Option<&crate::model::ScaleDownBehavior> {
self.scale_down_behavior.as_ref()
}
/// <p>Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.</p>
pub fn custom_ami_id(&self) -> std::option::Option<&str> {
self.custom_ami_id.as_deref()
}
/// <p>The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.</p>
pub fn ebs_root_volume_size(&self) -> std::option::Option<i32> {
self.ebs_root_volume_size
}
/// <p>Applies only when <code>CustomAmiID</code> is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.</p>
pub fn repo_upgrade_on_boot(&self) -> std::option::Option<&crate::model::RepoUpgradeOnBoot> {
self.repo_upgrade_on_boot.as_ref()
}
/// <p>Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-kerberos.html">Use Kerberos Authentication</a> in the <i>Amazon EMR Management Guide</i>.</p>
pub fn kerberos_attributes(&self) -> std::option::Option<&crate::model::KerberosAttributes> {
self.kerberos_attributes.as_ref()
}
/// <p>The Amazon Resource Name of the cluster.</p>
pub fn cluster_arn(&self) -> std::option::Option<&str> {
self.cluster_arn.as_deref()
}
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub fn outpost_arn(&self) -> std::option::Option<&str> {
self.outpost_arn.as_deref()
}
/// <p>Specifies the number of steps that can be executed concurrently.</p>
pub fn step_concurrency_level(&self) -> std::option::Option<i32> {
self.step_concurrency_level
}
/// <p>Placement group configured for an Amazon EMR cluster.</p>
pub fn placement_groups(&self) -> std::option::Option<&[crate::model::PlacementGroupConfig]> {
self.placement_groups.as_deref()
}
/// <p>The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.</p>
pub fn os_release_label(&self) -> std::option::Option<&str> {
self.os_release_label.as_deref()
}
}
impl std::fmt::Debug for Cluster {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Cluster");
formatter.field("id", &self.id);
formatter.field("name", &self.name);
formatter.field("status", &self.status);
formatter.field("ec2_instance_attributes", &self.ec2_instance_attributes);
formatter.field("instance_collection_type", &self.instance_collection_type);
formatter.field("log_uri", &self.log_uri);
formatter.field("log_encryption_kms_key_id", &self.log_encryption_kms_key_id);
formatter.field("requested_ami_version", &self.requested_ami_version);
formatter.field("running_ami_version", &self.running_ami_version);
formatter.field("release_label", &self.release_label);
formatter.field("auto_terminate", &self.auto_terminate);
formatter.field("termination_protected", &self.termination_protected);
formatter.field("visible_to_all_users", &self.visible_to_all_users);
formatter.field("applications", &self.applications);
formatter.field("tags", &self.tags);
formatter.field("service_role", &self.service_role);
formatter.field("normalized_instance_hours", &self.normalized_instance_hours);
formatter.field("master_public_dns_name", &self.master_public_dns_name);
formatter.field("configurations", &self.configurations);
formatter.field("security_configuration", &self.security_configuration);
formatter.field("auto_scaling_role", &self.auto_scaling_role);
formatter.field("scale_down_behavior", &self.scale_down_behavior);
formatter.field("custom_ami_id", &self.custom_ami_id);
formatter.field("ebs_root_volume_size", &self.ebs_root_volume_size);
formatter.field("repo_upgrade_on_boot", &self.repo_upgrade_on_boot);
formatter.field("kerberos_attributes", &self.kerberos_attributes);
formatter.field("cluster_arn", &self.cluster_arn);
formatter.field("outpost_arn", &self.outpost_arn);
formatter.field("step_concurrency_level", &self.step_concurrency_level);
formatter.field("placement_groups", &self.placement_groups);
formatter.field("os_release_label", &self.os_release_label);
formatter.finish()
}
}
/// See [`Cluster`](crate::model::Cluster)
pub mod cluster {
/// A builder for [`Cluster`](crate::model::Cluster)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::ClusterStatus>,
pub(crate) ec2_instance_attributes:
std::option::Option<crate::model::Ec2InstanceAttributes>,
pub(crate) instance_collection_type:
std::option::Option<crate::model::InstanceCollectionType>,
pub(crate) log_uri: std::option::Option<std::string::String>,
pub(crate) log_encryption_kms_key_id: std::option::Option<std::string::String>,
pub(crate) requested_ami_version: std::option::Option<std::string::String>,
pub(crate) running_ami_version: std::option::Option<std::string::String>,
pub(crate) release_label: std::option::Option<std::string::String>,
pub(crate) auto_terminate: std::option::Option<bool>,
pub(crate) termination_protected: std::option::Option<bool>,
pub(crate) visible_to_all_users: std::option::Option<bool>,
pub(crate) applications: std::option::Option<std::vec::Vec<crate::model::Application>>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
pub(crate) service_role: std::option::Option<std::string::String>,
pub(crate) normalized_instance_hours: std::option::Option<i32>,
pub(crate) master_public_dns_name: std::option::Option<std::string::String>,
pub(crate) configurations: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
pub(crate) security_configuration: std::option::Option<std::string::String>,
pub(crate) auto_scaling_role: std::option::Option<std::string::String>,
pub(crate) scale_down_behavior: std::option::Option<crate::model::ScaleDownBehavior>,
pub(crate) custom_ami_id: std::option::Option<std::string::String>,
pub(crate) ebs_root_volume_size: std::option::Option<i32>,
pub(crate) repo_upgrade_on_boot: std::option::Option<crate::model::RepoUpgradeOnBoot>,
pub(crate) kerberos_attributes: std::option::Option<crate::model::KerberosAttributes>,
pub(crate) cluster_arn: std::option::Option<std::string::String>,
pub(crate) outpost_arn: std::option::Option<std::string::String>,
pub(crate) step_concurrency_level: std::option::Option<i32>,
pub(crate) placement_groups:
std::option::Option<std::vec::Vec<crate::model::PlacementGroupConfig>>,
pub(crate) os_release_label: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The unique identifier for the cluster.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.id = Some(input.into());
self
}
/// <p>The unique identifier for the cluster.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.id = input;
self
}
/// <p>The name of the cluster.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the cluster.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The current status details about the cluster.</p>
pub fn status(mut self, input: crate::model::ClusterStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status details about the cluster.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::ClusterStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.</p>
pub fn ec2_instance_attributes(
mut self,
input: crate::model::Ec2InstanceAttributes,
) -> Self {
self.ec2_instance_attributes = Some(input);
self
}
/// <p>Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.</p>
pub fn set_ec2_instance_attributes(
mut self,
input: std::option::Option<crate::model::Ec2InstanceAttributes>,
) -> Self {
self.ec2_instance_attributes = input;
self
}
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>The instance group configuration of the cluster. A value of <code>INSTANCE_GROUP</code> indicates a uniform instance group configuration. A value of <code>INSTANCE_FLEET</code> indicates an instance fleets configuration.</p>
pub fn instance_collection_type(
mut self,
input: crate::model::InstanceCollectionType,
) -> Self {
self.instance_collection_type = Some(input);
self
}
/// <note>
/// <p>The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.</p>
/// </note>
/// <p>The instance group configuration of the cluster. A value of <code>INSTANCE_GROUP</code> indicates a uniform instance group configuration. A value of <code>INSTANCE_FLEET</code> indicates an instance fleets configuration.</p>
pub fn set_instance_collection_type(
mut self,
input: std::option::Option<crate::model::InstanceCollectionType>,
) -> Self {
self.instance_collection_type = input;
self
}
/// <p>The path to the Amazon S3 location where logs for this cluster are stored.</p>
pub fn log_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.log_uri = Some(input.into());
self
}
/// <p>The path to the Amazon S3 location where logs for this cluster are stored.</p>
pub fn set_log_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
self.log_uri = input;
self
}
/// <p> The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0. </p>
pub fn log_encryption_kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
self.log_encryption_kms_key_id = Some(input.into());
self
}
/// <p> The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0. </p>
pub fn set_log_encryption_kms_key_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.log_encryption_kms_key_id = input;
self
}
/// <p>The AMI version requested for this cluster.</p>
pub fn requested_ami_version(mut self, input: impl Into<std::string::String>) -> Self {
self.requested_ami_version = Some(input.into());
self
}
/// <p>The AMI version requested for this cluster.</p>
pub fn set_requested_ami_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.requested_ami_version = input;
self
}
/// <p>The AMI version running on this cluster.</p>
pub fn running_ami_version(mut self, input: impl Into<std::string::String>) -> Self {
self.running_ami_version = Some(input.into());
self
}
/// <p>The AMI version running on this cluster.</p>
pub fn set_running_ami_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.running_ami_version = input;
self
}
/// <p>The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form <code>emr-x.x.x</code>, where x.x.x is an Amazon EMR release version such as <code>emr-5.14.0</code>. For more information about Amazon EMR release versions and included application versions and features, see <a href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/">https://docs.aws.amazon.com/emr/latest/ReleaseGuide/</a>. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use <code>AmiVersion</code>.</p>
pub fn release_label(mut self, input: impl Into<std::string::String>) -> Self {
self.release_label = Some(input.into());
self
}
/// <p>The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form <code>emr-x.x.x</code>, where x.x.x is an Amazon EMR release version such as <code>emr-5.14.0</code>. For more information about Amazon EMR release versions and included application versions and features, see <a href="https://docs.aws.amazon.com/emr/latest/ReleaseGuide/">https://docs.aws.amazon.com/emr/latest/ReleaseGuide/</a>. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use <code>AmiVersion</code>.</p>
pub fn set_release_label(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.release_label = input;
self
}
/// <p>Specifies whether the cluster should terminate after completing all steps.</p>
pub fn auto_terminate(mut self, input: bool) -> Self {
self.auto_terminate = Some(input);
self
}
/// <p>Specifies whether the cluster should terminate after completing all steps.</p>
pub fn set_auto_terminate(mut self, input: std::option::Option<bool>) -> Self {
self.auto_terminate = input;
self
}
/// <p>Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.</p>
pub fn termination_protected(mut self, input: bool) -> Self {
self.termination_protected = Some(input);
self
}
/// <p>Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.</p>
pub fn set_termination_protected(mut self, input: std::option::Option<bool>) -> Self {
self.termination_protected = input;
self
}
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub fn visible_to_all_users(mut self, input: bool) -> Self {
self.visible_to_all_users = Some(input);
self
}
/// <p>Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When <code>true</code>, IAM principals in the Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When <code>false</code>, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.</p>
/// <p>The default value is <code>true</code> if a value is not provided when creating a cluster using the EMR API <code>RunJobFlow</code> command, the CLI <a href="https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html">create-cluster</a> command, or the Amazon Web Services Management Console.</p>
pub fn set_visible_to_all_users(mut self, input: std::option::Option<bool>) -> Self {
self.visible_to_all_users = input;
self
}
/// Appends an item to `applications`.
///
/// To override the contents of this collection use [`set_applications`](Self::set_applications).
///
/// <p>The applications installed on this cluster.</p>
pub fn applications(mut self, input: crate::model::Application) -> Self {
let mut v = self.applications.unwrap_or_default();
v.push(input);
self.applications = Some(v);
self
}
/// <p>The applications installed on this cluster.</p>
pub fn set_applications(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Application>>,
) -> Self {
self.applications = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of tags associated with a cluster.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input);
self.tags = Some(v);
self
}
/// <p>A list of tags associated with a cluster.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// <p>The IAM role that Amazon EMR assumes in order to access Amazon Web Services resources on your behalf.</p>
pub fn service_role(mut self, input: impl Into<std::string::String>) -> Self {
self.service_role = Some(input.into());
self
}
/// <p>The IAM role that Amazon EMR assumes in order to access Amazon Web Services resources on your behalf.</p>
pub fn set_service_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.service_role = input;
self
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn normalized_instance_hours(mut self, input: i32) -> Self {
self.normalized_instance_hours = Some(input);
self
}
/// <p>An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.</p>
pub fn set_normalized_instance_hours(mut self, input: std::option::Option<i32>) -> Self {
self.normalized_instance_hours = input;
self
}
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub fn master_public_dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.master_public_dns_name = Some(input.into());
self
}
/// <p>The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.</p>
pub fn set_master_public_dns_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.master_public_dns_name = input;
self
}
/// Appends an item to `configurations`.
///
/// To override the contents of this collection use [`set_configurations`](Self::set_configurations).
///
/// <p>Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.</p>
pub fn configurations(mut self, input: crate::model::Configuration) -> Self {
let mut v = self.configurations.unwrap_or_default();
v.push(input);
self.configurations = Some(v);
self
}
/// <p>Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.</p>
pub fn set_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Configuration>>,
) -> Self {
self.configurations = input;
self
}
/// <p>The name of the security configuration applied to the cluster.</p>
pub fn security_configuration(mut self, input: impl Into<std::string::String>) -> Self {
self.security_configuration = Some(input.into());
self
}
/// <p>The name of the security configuration applied to the cluster.</p>
pub fn set_security_configuration(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.security_configuration = input;
self
}
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.</p>
pub fn auto_scaling_role(mut self, input: impl Into<std::string::String>) -> Self {
self.auto_scaling_role = Some(input.into());
self
}
/// <p>An IAM role for automatic scaling policies. The default role is <code>EMR_AutoScaling_DefaultRole</code>. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.</p>
pub fn set_auto_scaling_role(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.auto_scaling_role = input;
self
}
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub fn scale_down_behavior(mut self, input: crate::model::ScaleDownBehavior) -> Self {
self.scale_down_behavior = Some(input);
self
}
/// <p>The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. <code>TERMINATE_AT_INSTANCE_HOUR</code> indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. <code>TERMINATE_AT_TASK_COMPLETION</code> indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. <code>TERMINATE_AT_TASK_COMPLETION</code> is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.</p>
pub fn set_scale_down_behavior(
mut self,
input: std::option::Option<crate::model::ScaleDownBehavior>,
) -> Self {
self.scale_down_behavior = input;
self
}
/// <p>Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.</p>
pub fn custom_ami_id(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_ami_id = Some(input.into());
self
}
/// <p>Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.</p>
pub fn set_custom_ami_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_ami_id = input;
self
}
/// <p>The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.</p>
pub fn ebs_root_volume_size(mut self, input: i32) -> Self {
self.ebs_root_volume_size = Some(input);
self
}
/// <p>The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.</p>
pub fn set_ebs_root_volume_size(mut self, input: std::option::Option<i32>) -> Self {
self.ebs_root_volume_size = input;
self
}
/// <p>Applies only when <code>CustomAmiID</code> is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.</p>
pub fn repo_upgrade_on_boot(mut self, input: crate::model::RepoUpgradeOnBoot) -> Self {
self.repo_upgrade_on_boot = Some(input);
self
}
/// <p>Applies only when <code>CustomAmiID</code> is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.</p>
pub fn set_repo_upgrade_on_boot(
mut self,
input: std::option::Option<crate::model::RepoUpgradeOnBoot>,
) -> Self {
self.repo_upgrade_on_boot = input;
self
}
/// <p>Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-kerberos.html">Use Kerberos Authentication</a> in the <i>Amazon EMR Management Guide</i>.</p>
pub fn kerberos_attributes(mut self, input: crate::model::KerberosAttributes) -> Self {
self.kerberos_attributes = Some(input);
self
}
/// <p>Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see <a href="https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-kerberos.html">Use Kerberos Authentication</a> in the <i>Amazon EMR Management Guide</i>.</p>
pub fn set_kerberos_attributes(
mut self,
input: std::option::Option<crate::model::KerberosAttributes>,
) -> Self {
self.kerberos_attributes = input;
self
}
/// <p>The Amazon Resource Name of the cluster.</p>
pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name of the cluster.</p>
pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.cluster_arn = input;
self
}
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub fn outpost_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.outpost_arn = Some(input.into());
self
}
/// <p> The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. </p>
pub fn set_outpost_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.outpost_arn = input;
self
}
/// <p>Specifies the number of steps that can be executed concurrently.</p>
pub fn step_concurrency_level(mut self, input: i32) -> Self {
self.step_concurrency_level = Some(input);
self
}
/// <p>Specifies the number of steps that can be executed concurrently.</p>
pub fn set_step_concurrency_level(mut self, input: std::option::Option<i32>) -> Self {
self.step_concurrency_level = input;
self
}
/// Appends an item to `placement_groups`.
///
/// To override the contents of this collection use [`set_placement_groups`](Self::set_placement_groups).
///
/// <p>Placement group configured for an Amazon EMR cluster.</p>
pub fn placement_groups(mut self, input: crate::model::PlacementGroupConfig) -> Self {
let mut v = self.placement_groups.unwrap_or_default();
v.push(input);
self.placement_groups = Some(v);
self
}
/// <p>Placement group configured for an Amazon EMR cluster.</p>
pub fn set_placement_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PlacementGroupConfig>>,
) -> Self {
self.placement_groups = input;
self
}
/// <p>The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.</p>
pub fn os_release_label(mut self, input: impl Into<std::string::String>) -> Self {
self.os_release_label = Some(input.into());
self
}
/// <p>The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.</p>
pub fn set_os_release_label(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.os_release_label = input;
self
}
/// Consumes the builder and constructs a [`Cluster`](crate::model::Cluster)
pub fn build(self) -> crate::model::Cluster {
crate::model::Cluster {
id: self.id,
name: self.name,
status: self.status,
ec2_instance_attributes: self.ec2_instance_attributes,
instance_collection_type: self.instance_collection_type,
log_uri: self.log_uri,
log_encryption_kms_key_id: self.log_encryption_kms_key_id,
requested_ami_version: self.requested_ami_version,
running_ami_version: self.running_ami_version,
release_label: self.release_label,
auto_terminate: self.auto_terminate.unwrap_or_default(),
termination_protected: self.termination_protected.unwrap_or_default(),
visible_to_all_users: self.visible_to_all_users.unwrap_or_default(),
applications: self.applications,
tags: self.tags,
service_role: self.service_role,
normalized_instance_hours: self.normalized_instance_hours,
master_public_dns_name: self.master_public_dns_name,
configurations: self.configurations,
security_configuration: self.security_configuration,
auto_scaling_role: self.auto_scaling_role,
scale_down_behavior: self.scale_down_behavior,
custom_ami_id: self.custom_ami_id,
ebs_root_volume_size: self.ebs_root_volume_size,
repo_upgrade_on_boot: self.repo_upgrade_on_boot,
kerberos_attributes: self.kerberos_attributes,
cluster_arn: self.cluster_arn,
outpost_arn: self.outpost_arn,
step_concurrency_level: self.step_concurrency_level,
placement_groups: self.placement_groups,
os_release_label: self.os_release_label,
}
}
}
}
impl Cluster {
/// Creates a new builder-style object to manufacture [`Cluster`](crate::model::Cluster)
pub fn builder() -> crate::model::cluster::Builder {
crate::model::cluster::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceCollectionType {
#[allow(missing_docs)] // documentation missing in model
InstanceFleet,
#[allow(missing_docs)] // documentation missing in model
InstanceGroup,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceCollectionType {
fn from(s: &str) -> Self {
match s {
"INSTANCE_FLEET" => InstanceCollectionType::InstanceFleet,
"INSTANCE_GROUP" => InstanceCollectionType::InstanceGroup,
other => InstanceCollectionType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceCollectionType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceCollectionType::from(s))
}
}
impl InstanceCollectionType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
InstanceCollectionType::InstanceFleet => "INSTANCE_FLEET",
InstanceCollectionType::InstanceGroup => "INSTANCE_GROUP",
InstanceCollectionType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["INSTANCE_FLEET", "INSTANCE_GROUP"]
}
}
impl AsRef<str> for InstanceCollectionType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Ec2InstanceAttributes {
/// <p>The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named "hadoop".</p>
pub ec2_key_name: std::option::Option<std::string::String>,
/// <p>Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub ec2_subnet_id: std::option::Option<std::string::String>,
/// <p>Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of <code>RequestedEc2SubnetIds</code>, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses <code>RequestedEc2AvailabilityZones</code> instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub requested_ec2_subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Availability Zone in which the cluster will run. </p>
pub ec2_availability_zone: std::option::Option<std::string::String>,
/// <p>Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of <code>RequestedEc2AvailabilityZones</code>, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub requested_ec2_availability_zones: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The IAM role that was specified when the cluster was launched. The EC2 instances of the cluster assume this role.</p>
pub iam_instance_profile: std::option::Option<std::string::String>,
/// <p>The identifier of the Amazon EC2 security group for the master node.</p>
pub emr_managed_master_security_group: std::option::Option<std::string::String>,
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes.</p>
pub emr_managed_slave_security_group: std::option::Option<std::string::String>,
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub service_access_security_group: std::option::Option<std::string::String>,
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub additional_master_security_groups: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub additional_slave_security_groups: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Ec2InstanceAttributes {
/// <p>The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named "hadoop".</p>
pub fn ec2_key_name(&self) -> std::option::Option<&str> {
self.ec2_key_name.as_deref()
}
/// <p>Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub fn ec2_subnet_id(&self) -> std::option::Option<&str> {
self.ec2_subnet_id.as_deref()
}
/// <p>Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of <code>RequestedEc2SubnetIds</code>, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses <code>RequestedEc2AvailabilityZones</code> instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub fn requested_ec2_subnet_ids(&self) -> std::option::Option<&[std::string::String]> {
self.requested_ec2_subnet_ids.as_deref()
}
/// <p>The Availability Zone in which the cluster will run. </p>
pub fn ec2_availability_zone(&self) -> std::option::Option<&str> {
self.ec2_availability_zone.as_deref()
}
/// <p>Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of <code>RequestedEc2AvailabilityZones</code>, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub fn requested_ec2_availability_zones(&self) -> std::option::Option<&[std::string::String]> {
self.requested_ec2_availability_zones.as_deref()
}
/// <p>The IAM role that was specified when the cluster was launched. The EC2 instances of the cluster assume this role.</p>
pub fn iam_instance_profile(&self) -> std::option::Option<&str> {
self.iam_instance_profile.as_deref()
}
/// <p>The identifier of the Amazon EC2 security group for the master node.</p>
pub fn emr_managed_master_security_group(&self) -> std::option::Option<&str> {
self.emr_managed_master_security_group.as_deref()
}
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes.</p>
pub fn emr_managed_slave_security_group(&self) -> std::option::Option<&str> {
self.emr_managed_slave_security_group.as_deref()
}
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub fn service_access_security_group(&self) -> std::option::Option<&str> {
self.service_access_security_group.as_deref()
}
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub fn additional_master_security_groups(&self) -> std::option::Option<&[std::string::String]> {
self.additional_master_security_groups.as_deref()
}
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub fn additional_slave_security_groups(&self) -> std::option::Option<&[std::string::String]> {
self.additional_slave_security_groups.as_deref()
}
}
impl std::fmt::Debug for Ec2InstanceAttributes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Ec2InstanceAttributes");
formatter.field("ec2_key_name", &self.ec2_key_name);
formatter.field("ec2_subnet_id", &self.ec2_subnet_id);
formatter.field("requested_ec2_subnet_ids", &self.requested_ec2_subnet_ids);
formatter.field("ec2_availability_zone", &self.ec2_availability_zone);
formatter.field(
"requested_ec2_availability_zones",
&self.requested_ec2_availability_zones,
);
formatter.field("iam_instance_profile", &self.iam_instance_profile);
formatter.field(
"emr_managed_master_security_group",
&self.emr_managed_master_security_group,
);
formatter.field(
"emr_managed_slave_security_group",
&self.emr_managed_slave_security_group,
);
formatter.field(
"service_access_security_group",
&self.service_access_security_group,
);
formatter.field(
"additional_master_security_groups",
&self.additional_master_security_groups,
);
formatter.field(
"additional_slave_security_groups",
&self.additional_slave_security_groups,
);
formatter.finish()
}
}
/// See [`Ec2InstanceAttributes`](crate::model::Ec2InstanceAttributes)
pub mod ec2_instance_attributes {
/// A builder for [`Ec2InstanceAttributes`](crate::model::Ec2InstanceAttributes)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) ec2_key_name: std::option::Option<std::string::String>,
pub(crate) ec2_subnet_id: std::option::Option<std::string::String>,
pub(crate) requested_ec2_subnet_ids:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) ec2_availability_zone: std::option::Option<std::string::String>,
pub(crate) requested_ec2_availability_zones:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) iam_instance_profile: std::option::Option<std::string::String>,
pub(crate) emr_managed_master_security_group: std::option::Option<std::string::String>,
pub(crate) emr_managed_slave_security_group: std::option::Option<std::string::String>,
pub(crate) service_access_security_group: std::option::Option<std::string::String>,
pub(crate) additional_master_security_groups:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) additional_slave_security_groups:
std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named "hadoop".</p>
pub fn ec2_key_name(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_key_name = Some(input.into());
self
}
/// <p>The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named "hadoop".</p>
pub fn set_ec2_key_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ec2_key_name = input;
self
}
/// <p>Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub fn ec2_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_subnet_id = Some(input.into());
self
}
/// <p>Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.</p>
pub fn set_ec2_subnet_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ec2_subnet_id = input;
self
}
/// Appends an item to `requested_ec2_subnet_ids`.
///
/// To override the contents of this collection use [`set_requested_ec2_subnet_ids`](Self::set_requested_ec2_subnet_ids).
///
/// <p>Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of <code>RequestedEc2SubnetIds</code>, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses <code>RequestedEc2AvailabilityZones</code> instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub fn requested_ec2_subnet_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.requested_ec2_subnet_ids.unwrap_or_default();
v.push(input.into());
self.requested_ec2_subnet_ids = Some(v);
self
}
/// <p>Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of <code>RequestedEc2SubnetIds</code>, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses <code>RequestedEc2AvailabilityZones</code> instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub fn set_requested_ec2_subnet_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.requested_ec2_subnet_ids = input;
self
}
/// <p>The Availability Zone in which the cluster will run. </p>
pub fn ec2_availability_zone(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_availability_zone = Some(input.into());
self
}
/// <p>The Availability Zone in which the cluster will run. </p>
pub fn set_ec2_availability_zone(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ec2_availability_zone = input;
self
}
/// Appends an item to `requested_ec2_availability_zones`.
///
/// To override the contents of this collection use [`set_requested_ec2_availability_zones`](Self::set_requested_ec2_availability_zones).
///
/// <p>Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of <code>RequestedEc2AvailabilityZones</code>, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub fn requested_ec2_availability_zones(
mut self,
input: impl Into<std::string::String>,
) -> Self {
let mut v = self.requested_ec2_availability_zones.unwrap_or_default();
v.push(input.into());
self.requested_ec2_availability_zones = Some(v);
self
}
/// <p>Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of <code>RequestedEc2AvailabilityZones</code>, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. <code>RequestedEc2SubnetIDs</code> and <code>RequestedEc2AvailabilityZones</code> cannot be specified together.</p>
pub fn set_requested_ec2_availability_zones(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.requested_ec2_availability_zones = input;
self
}
/// <p>The IAM role that was specified when the cluster was launched. The EC2 instances of the cluster assume this role.</p>
pub fn iam_instance_profile(mut self, input: impl Into<std::string::String>) -> Self {
self.iam_instance_profile = Some(input.into());
self
}
/// <p>The IAM role that was specified when the cluster was launched. The EC2 instances of the cluster assume this role.</p>
pub fn set_iam_instance_profile(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.iam_instance_profile = input;
self
}
/// <p>The identifier of the Amazon EC2 security group for the master node.</p>
pub fn emr_managed_master_security_group(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.emr_managed_master_security_group = Some(input.into());
self
}
/// <p>The identifier of the Amazon EC2 security group for the master node.</p>
pub fn set_emr_managed_master_security_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.emr_managed_master_security_group = input;
self
}
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes.</p>
pub fn emr_managed_slave_security_group(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.emr_managed_slave_security_group = Some(input.into());
self
}
/// <p>The identifier of the Amazon EC2 security group for the core and task nodes.</p>
pub fn set_emr_managed_slave_security_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.emr_managed_slave_security_group = input;
self
}
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub fn service_access_security_group(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.service_access_security_group = Some(input.into());
self
}
/// <p>The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.</p>
pub fn set_service_access_security_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.service_access_security_group = input;
self
}
/// Appends an item to `additional_master_security_groups`.
///
/// To override the contents of this collection use [`set_additional_master_security_groups`](Self::set_additional_master_security_groups).
///
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub fn additional_master_security_groups(
mut self,
input: impl Into<std::string::String>,
) -> Self {
let mut v = self.additional_master_security_groups.unwrap_or_default();
v.push(input.into());
self.additional_master_security_groups = Some(v);
self
}
/// <p>A list of additional Amazon EC2 security group IDs for the master node.</p>
pub fn set_additional_master_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.additional_master_security_groups = input;
self
}
/// Appends an item to `additional_slave_security_groups`.
///
/// To override the contents of this collection use [`set_additional_slave_security_groups`](Self::set_additional_slave_security_groups).
///
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub fn additional_slave_security_groups(
mut self,
input: impl Into<std::string::String>,
) -> Self {
let mut v = self.additional_slave_security_groups.unwrap_or_default();
v.push(input.into());
self.additional_slave_security_groups = Some(v);
self
}
/// <p>A list of additional Amazon EC2 security group IDs for the core and task nodes.</p>
pub fn set_additional_slave_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.additional_slave_security_groups = input;
self
}
/// Consumes the builder and constructs a [`Ec2InstanceAttributes`](crate::model::Ec2InstanceAttributes)
pub fn build(self) -> crate::model::Ec2InstanceAttributes {
crate::model::Ec2InstanceAttributes {
ec2_key_name: self.ec2_key_name,
ec2_subnet_id: self.ec2_subnet_id,
requested_ec2_subnet_ids: self.requested_ec2_subnet_ids,
ec2_availability_zone: self.ec2_availability_zone,
requested_ec2_availability_zones: self.requested_ec2_availability_zones,
iam_instance_profile: self.iam_instance_profile,
emr_managed_master_security_group: self.emr_managed_master_security_group,
emr_managed_slave_security_group: self.emr_managed_slave_security_group,
service_access_security_group: self.service_access_security_group,
additional_master_security_groups: self.additional_master_security_groups,
additional_slave_security_groups: self.additional_slave_security_groups,
}
}
}
}
impl Ec2InstanceAttributes {
/// Creates a new builder-style object to manufacture [`Ec2InstanceAttributes`](crate::model::Ec2InstanceAttributes)
pub fn builder() -> crate::model::ec2_instance_attributes::Builder {
crate::model::ec2_instance_attributes::Builder::default()
}
}
/// <p>Specification of the status of a CancelSteps request. Available only in Amazon EMR version 4.8.0 and later, excluding version 5.0.0.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CancelStepsInfo {
/// <p>The encrypted StepId of a step.</p>
pub step_id: std::option::Option<std::string::String>,
/// <p>The status of a CancelSteps Request. The value may be SUBMITTED or FAILED.</p>
pub status: std::option::Option<crate::model::CancelStepsRequestStatus>,
/// <p>The reason for the failure if the CancelSteps request fails.</p>
pub reason: std::option::Option<std::string::String>,
}
impl CancelStepsInfo {
/// <p>The encrypted StepId of a step.</p>
pub fn step_id(&self) -> std::option::Option<&str> {
self.step_id.as_deref()
}
/// <p>The status of a CancelSteps Request. The value may be SUBMITTED or FAILED.</p>
pub fn status(&self) -> std::option::Option<&crate::model::CancelStepsRequestStatus> {
self.status.as_ref()
}
/// <p>The reason for the failure if the CancelSteps request fails.</p>
pub fn reason(&self) -> std::option::Option<&str> {
self.reason.as_deref()
}
}
impl std::fmt::Debug for CancelStepsInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CancelStepsInfo");
formatter.field("step_id", &self.step_id);
formatter.field("status", &self.status);
formatter.field("reason", &self.reason);
formatter.finish()
}
}
/// See [`CancelStepsInfo`](crate::model::CancelStepsInfo)
pub mod cancel_steps_info {
/// A builder for [`CancelStepsInfo`](crate::model::CancelStepsInfo)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) step_id: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::CancelStepsRequestStatus>,
pub(crate) reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The encrypted StepId of a step.</p>
pub fn step_id(mut self, input: impl Into<std::string::String>) -> Self {
self.step_id = Some(input.into());
self
}
/// <p>The encrypted StepId of a step.</p>
pub fn set_step_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.step_id = input;
self
}
/// <p>The status of a CancelSteps Request. The value may be SUBMITTED or FAILED.</p>
pub fn status(mut self, input: crate::model::CancelStepsRequestStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of a CancelSteps Request. The value may be SUBMITTED or FAILED.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::CancelStepsRequestStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The reason for the failure if the CancelSteps request fails.</p>
pub fn reason(mut self, input: impl Into<std::string::String>) -> Self {
self.reason = Some(input.into());
self
}
/// <p>The reason for the failure if the CancelSteps request fails.</p>
pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.reason = input;
self
}
/// Consumes the builder and constructs a [`CancelStepsInfo`](crate::model::CancelStepsInfo)
pub fn build(self) -> crate::model::CancelStepsInfo {
crate::model::CancelStepsInfo {
step_id: self.step_id,
status: self.status,
reason: self.reason,
}
}
}
}
impl CancelStepsInfo {
/// Creates a new builder-style object to manufacture [`CancelStepsInfo`](crate::model::CancelStepsInfo)
pub fn builder() -> crate::model::cancel_steps_info::Builder {
crate::model::cancel_steps_info::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CancelStepsRequestStatus {
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Submitted,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CancelStepsRequestStatus {
fn from(s: &str) -> Self {
match s {
"FAILED" => CancelStepsRequestStatus::Failed,
"SUBMITTED" => CancelStepsRequestStatus::Submitted,
other => CancelStepsRequestStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CancelStepsRequestStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CancelStepsRequestStatus::from(s))
}
}
impl CancelStepsRequestStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
CancelStepsRequestStatus::Failed => "FAILED",
CancelStepsRequestStatus::Submitted => "SUBMITTED",
CancelStepsRequestStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["FAILED", "SUBMITTED"]
}
}
impl AsRef<str> for CancelStepsRequestStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum StepCancellationOption {
#[allow(missing_docs)] // documentation missing in model
SendInterrupt,
#[allow(missing_docs)] // documentation missing in model
TerminateProcess,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for StepCancellationOption {
fn from(s: &str) -> Self {
match s {
"SEND_INTERRUPT" => StepCancellationOption::SendInterrupt,
"TERMINATE_PROCESS" => StepCancellationOption::TerminateProcess,
other => StepCancellationOption::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for StepCancellationOption {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(StepCancellationOption::from(s))
}
}
impl StepCancellationOption {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
StepCancellationOption::SendInterrupt => "SEND_INTERRUPT",
StepCancellationOption::TerminateProcess => "TERMINATE_PROCESS",
StepCancellationOption::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["SEND_INTERRUPT", "TERMINATE_PROCESS"]
}
}
impl AsRef<str> for StepCancellationOption {
fn as_ref(&self) -> &str {
self.as_str()
}
}
| 52.801318 | 981 | 0.653549 |
debe3d5df96557d45ad4c7985961b9d148bbd057 | 3,848 | use crate::Result;
/// Represents a single NBT tag
#[derive(Clone, PartialEq, Debug)]
pub enum NBT {
End,
Byte(i8),
Short(i16),
Int(i32),
Long(i64),
Float(f32),
Double(f64),
ByteArray(Vec<i8>),
String(Vec<u8>),
List(Vec<NBT>),
Compound(Vec<(Vec<u8>, NBT)>),
IntArray(Vec<i32>),
}
impl NBT {
pub fn get<S: AsRef<[u8]>>(&self, val: S) -> Option<&NBT> {
let s = match self {
NBT::Compound(s) => s,
_ => return None,
};
for (i, v) in s {
if i == &val.as_ref() {
return Some(v);
}
}
None
}
pub fn get_err(&self, val: &[u8]) -> Result<&NBT> {
match self {
NBT::Compound(_) => (),
_ => bail!("NBT was {}, not compound", self.type_string()),
}
self.get(val).ok_or_else(|| format_err!("No value in compound {}", String::from_utf8_lossy(val)))
}
/// Returns the type of the tag as an English string
pub fn type_string(&self) -> &str {
match self {
&NBT::End => "End",
&NBT::Byte(..) => "Byte",
&NBT::Short(..) => "Short",
&NBT::Int(..) => "Int",
&NBT::Long(..) => "Long",
&NBT::Float(..) => "Float",
&NBT::Double(..) => "Double",
&NBT::ByteArray(..) => "ByteArray",
&NBT::String(..) => "String",
&NBT::List(..) => "List",
&NBT::Compound(..) => "Compound",
&NBT::IntArray(..) => "IntArray",
}
}
/// Returns the type of the tag as a single u8
pub fn type_byte(&self) -> u8 {
match self {
&NBT::End => 0,
&NBT::Byte(..) => 1,
&NBT::Short(..) => 2,
&NBT::Int(..) => 3,
&NBT::Long(..) => 4,
&NBT::Float(..) => 5,
&NBT::Double(..) => 6,
&NBT::ByteArray(..) => 7,
&NBT::String(..) => 8,
&NBT::List(..) => 9,
&NBT::Compound(..) => 10,
&NBT::IntArray(..) => 11,
}
}
}
/// Represents the different compression formats NBT files can be in
#[derive(Clone, PartialEq, Debug)]
pub enum Compression {
None,
Gzip,
Zlib,
}
impl Compression {
/// Returns the type of compression as an English string
pub fn to_str(&self) -> &str {
match self {
&Compression::None => "None",
&Compression::Gzip => "Gzip",
&Compression::Zlib => "Zlib",
}
}
/// Given the name of a type of compression, return the corresponding
/// Compression enum. Returns Some(Compression) if it exists, and None if no
/// such Compression type exists
pub fn from_str(string: &str) -> Option<Self> {
match string {
"None" => Some(Compression::None),
"Gzip" => Some(Compression::Gzip),
"Zlib" => Some(Compression::Zlib),
_ => None,
}
}
/// Given the first byte from an NBT file, return the type of Compression
/// used in that file. Returns Some(Compression) if the type of compression
/// is known, and None else.
pub fn from_first_byte(byte: u8) -> Option<Self> {
/* On compression: To identify how an nbt file is compressed, peek
* at the first byte in the file, with the following meanings: */
match byte {
0x0a => Some(Compression::None),
0x1f => Some(Compression::Gzip),
0x78 => Some(Compression::Zlib),
_ => None,
}
}
}
/// Represents a single NBT file, that is all the NBT data, as well as a
/// compression type.
///
/// The root NBT tag will always be an NBT::Compound
#[derive(PartialEq, Debug)]
pub struct NBTFile {
pub root: NBT,
pub compression: Compression,
}
| 29.6 | 105 | 0.494802 |
89d44692b46e0e503eab9274108e0a7b69a31bef | 1,732 | // fn main() {
// println!("Hello, world!");
// }
/**
* rust 拷贝区别
*
* rust 二次复制不是浅拷贝 而是稀释首次声明的值,也就是无效,或者是移动了指针
* rust 建议深克隆的时候,使用clone函数,
* 当出现 clone 调用时,你知道一些特定的代码被执行而且这些代码可能相当消耗资源。你很容易察觉到一些不寻常的事情正在发生
*
*
*/
fn main() {
let mut s = String::from("hello");
s.push_str("sdadad");
print!("{}", s);
main_clone();
main_copy();
main2();
main3();
main4();
}
fn main_clone() {
let s = String::from("11-23");
let q = s.clone(); //堆上的数据 确实 被复制了
print!("q={}, s={}", q, s)
}
fn main_copy() {
let x = 5;
let y = x; //已知大小,rust特点不需要clone函数
println!("x = {}, y = {}", x, y);
}
fn main2() {
let s = String::from("hello"); // s 进入作用域
takes_ownership(s); // s 的值移动到函数里 ...
// ... 所以到这里不再有效
let x = 5; // x 进入作用域
makes_copy(x); // x 应该移动函数里,
// 但 i32 是 Copy 的,
// 所以在后面可继续使用 x
} // 这里, x 先移出了作用域,然后是 s。但因为 s 的值已被移走,
// 没有特殊之处
fn takes_ownership(some_string: String) {
// some_string 进入作用域
println!("{}", some_string);
} // 这里,some_string 移出作用域并调用 `drop` 方法。
// 占用的内存被释放
fn makes_copy(some_integer: i32) {
// some_integer 进入作用域
println!("{}", some_integer);
} // 这里,some_integer 移出作用域。没有特殊之处
fn main3() {
let s1 = String::from("hello");
let (s2, len) = calculate_length(s1);
println!("The length of '{}' is {}.", s2, len);
}
fn calculate_length(s: String) -> (String, usize) {
let length = s.len(); // len() 返回字符串的长度
(s, length)
}
// 引用
fn main4() {
let s1 = String::from("hello world");
let len = calculate_length2(&s1);
println!("The length of '{}' is {}.", s1, len);
}
fn calculate_length2(s: &String) -> usize {
s.len()
}
| 18.826087 | 65 | 0.54388 |
f90131a59ecbdbd379a92d54a34ef03f6a7228af | 3,464 | //! Tests for the `cargo fetch` command.
use cargo_test_support::registry::Package;
use cargo_test_support::rustc_host;
use cargo_test_support::{basic_manifest, cross_compile, project};
#[cargo_test]
fn no_deps() {
let p = project()
.file("src/main.rs", "mod a; fn main() {}")
.file("src/a.rs", "")
.build();
p.cargo("fetch").with_stdout("").run();
}
#[cargo_test]
fn fetch_all_platform_dependencies_when_no_target_is_given() {
if cross_compile::disabled() {
return;
}
Package::new("d1", "1.2.3")
.file("Cargo.toml", &basic_manifest("d1", "1.2.3"))
.file("src/lib.rs", "")
.publish();
Package::new("d2", "0.1.2")
.file("Cargo.toml", &basic_manifest("d2", "0.1.2"))
.file("src/lib.rs", "")
.publish();
let target = cross_compile::alternate();
let host = rustc_host();
let p = project()
.file(
"Cargo.toml",
&format!(
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[target.{host}.dependencies]
d1 = "1.2.3"
[target.{target}.dependencies]
d2 = "0.1.2"
"#,
host = host,
target = target
),
)
.file("src/lib.rs", "")
.build();
p.cargo("fetch")
.with_stderr_contains("[DOWNLOADED] d1 v1.2.3 [..]")
.with_stderr_contains("[DOWNLOADED] d2 v0.1.2 [..]")
.run();
}
#[cargo_test]
fn fetch_platform_specific_dependencies() {
if cross_compile::disabled() {
return;
}
Package::new("d1", "1.2.3")
.file("Cargo.toml", &basic_manifest("d1", "1.2.3"))
.file("src/lib.rs", "")
.publish();
Package::new("d2", "0.1.2")
.file("Cargo.toml", &basic_manifest("d2", "0.1.2"))
.file("src/lib.rs", "")
.publish();
let target = cross_compile::alternate();
let host = rustc_host();
let p = project()
.file(
"Cargo.toml",
&format!(
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[target.{host}.dependencies]
d1 = "1.2.3"
[target.{target}.dependencies]
d2 = "0.1.2"
"#,
host = host,
target = target
),
)
.file("src/lib.rs", "")
.build();
p.cargo("fetch --target")
.arg(&host)
.with_stderr_contains("[DOWNLOADED] d1 v1.2.3 [..]")
.with_stderr_does_not_contain("[DOWNLOADED] d2 v0.1.2 [..]")
.run();
p.cargo("fetch --target")
.arg(&target)
.with_stderr_contains("[DOWNLOADED] d2 v0.1.2[..]")
.with_stderr_does_not_contain("[DOWNLOADED] d1 v1.2.3 [..]")
.run();
}
#[cargo_test]
fn fetch_warning() {
let p = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "1.0.0"
misspelled = "wut"
"#,
)
.file("src/lib.rs", "")
.build();
p.cargo("fetch")
.with_stderr("[WARNING] unused manifest key: package.misspelled")
.run();
}
| 25.470588 | 73 | 0.449769 |
9cc81ef6cb33fa0bcd2ca00957dcb9cf5c0a4ef6 | 14,540 | use super::*;
use crate::errors::GraderResult;
use crate::utils::tests::get_example_dir;
use dotenv::dotenv;
use std::fs;
#[test]
fn should_complete_initialize_submission() {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.cpp")).unwrap();
let _submission = Submission::from("a_plus_b", "000000", "cpp", &[code], None);
}
#[test]
fn should_compile_cpp_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000001", "cpp", &vec![code], None)?;
submission.compile()?;
Ok(())
}
#[test]
fn should_compile_python_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.py")).unwrap();
let mut submission = Submission::from("a_plus_b", "000002", "python", &vec![code], None)?;
submission.compile()?;
Ok(())
}
#[test]
fn should_compile_rust_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.rs")).unwrap();
let mut submission = Submission::from("a_plus_b", "000003", "rust", &vec![code], None)?;
submission.compile()?;
Ok(())
}
#[test]
fn should_remove_tmp_dir_after_out_of_scope() -> GraderResult<()> {
dotenv().ok();
let tmp_path;
{
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000004", "cpp", &vec![code], None)?;
submission.compile()?;
tmp_path = submission.tmp_path.clone();
}
assert!(!tmp_path.exists());
Ok(())
}
#[test]
fn should_run_cpp_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000005", "cpp", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
Ok(())
}
#[test]
fn should_run_cpp_tle_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_TLE.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000006", "cpp", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Time Limit Exceeded"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Time Limit Exceeded"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_cpp_mle_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_MLE.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000007", "cpp", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Memory Limit Exceeded"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Memory Limit Exceeded"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_cpp_re_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_RE.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000008", "cpp", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Runtime Error"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Runtime Error"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_cpp_sg_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_SG.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b", "000009", "cpp", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(_result.group_result[0].run_result[0].status, "Signal Error");
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(_result.group_result[1].run_result[0].status, "Signal Error");
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_cpp_with_header_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_h.cpp")).unwrap();
let mut submission = Submission::from("a_plus_b_h", "000010", "cpp", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
Ok(())
}
#[test]
fn should_run_python_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.py")).unwrap();
let mut submission = Submission::from("a_plus_b", "000011", "python", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
Ok(())
}
#[test]
fn should_run_python_tle_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_TLE.py")).unwrap();
let mut submission = Submission::from("a_plus_b", "000012", "python", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Time Limit Exceeded"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Time Limit Exceeded"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_python_mle_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_MLE.py")).unwrap();
let mut submission = Submission::from("a_plus_b", "000013", "python", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Memory Limit Exceeded"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Memory Limit Exceeded"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_python_re_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_RE.py")).unwrap();
let mut submission = Submission::from("a_plus_b", "000014", "python", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Runtime Error"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Runtime Error"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_rust_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.rs")).unwrap();
let mut submission = Submission::from("a_plus_b", "000015", "rust", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
Ok(())
}
#[test]
fn should_run_rust_tle_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_TLE.rs")).unwrap();
let mut submission = Submission::from("a_plus_b", "000016", "rust", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Time Limit Exceeded"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Time Limit Exceeded"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_rust_mle_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_MLE.rs")).unwrap();
let mut submission = Submission::from("a_plus_b", "000017", "rust", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Memory Limit Exceeded"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Memory Limit Exceeded"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_rust_re_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_RE.rs")).unwrap();
let mut submission = Submission::from("a_plus_b", "000018", "rust", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(
_result.group_result[0].run_result[0].status,
"Runtime Error"
);
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(
_result.group_result[1].run_result[0].status,
"Runtime Error"
);
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_run_rust_sg_skipped() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b_SG.rs")).unwrap();
let mut submission = Submission::from("a_plus_b", "000019", "rust", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 0.0);
assert_eq!(_result.group_result[0].score, 0.0);
assert_eq!(_result.group_result[0].run_result[0].status, "Signal Error");
assert_eq!(_result.group_result[0].run_result[1].status, "");
assert_eq!(_result.group_result[1].score, 0.0);
assert_eq!(_result.group_result[1].run_result[0].status, "Signal Error");
assert_eq!(_result.group_result[1].run_result[1].status, "");
Ok(())
}
#[test]
fn should_compile_go_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.go")).unwrap();
let mut submission = Submission::from("a_plus_b", "000020", "go", &vec![code], None)?;
submission.compile()?;
Ok(())
}
#[test]
fn should_run_go_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.go")).unwrap();
let mut submission = Submission::from("a_plus_b", "000021", "go", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
Ok(())
}
#[test]
fn should_compile_java_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.java")).unwrap();
let mut submission = Submission::from("a_plus_b", "000022", "java", &vec![code], None)?;
submission.compile()?;
Ok(())
}
#[test]
fn should_run_java_successfully() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.java")).unwrap();
let mut submission = Submission::from("a_plus_b", "000023", "java", &vec![code], None)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
Ok(())
}
#[test]
fn should_handle_messaging() -> GraderResult<()> {
dotenv().ok();
let code = fs::read_to_string(get_example_dir().join("etc").join("a_plus_b.cpp")).unwrap();
let mut v: Vec<SubmissionMessage> = Vec::new();
{
let mut submission = Submission::from(
"a_plus_b",
"000024",
"cpp",
&vec![code],
Some(Box::new(|msg| {
v.push(msg);
})),
)?;
submission.compile()?;
let _result = submission.run()?;
assert_eq!(_result.score, 100.0);
}
Ok(())
}
| 27.590133 | 99 | 0.63033 |
298dc171f09d63a4d3d776f9af8935545f30ed2a | 2,539 | // Copyright 2021 The Hypatia Authors
// All rights reserved
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
use core::marker::PhantomData;
pub trait PortSized {}
impl PortSized for u8 {}
impl PortSized for u16 {}
impl PortSized for u32 {}
pub trait PortMarker<T: PortSized> {
fn addr(&mut self) -> u16;
}
pub trait Out<T: PortSized>: PortMarker<T> {}
pub trait In<T: PortSized>: PortMarker<T> {}
pub trait Receiver<T: PortSized> {
fn recv(&mut self) -> T;
}
impl<T: PortMarker<u8> + In<u8>> Receiver<u8> for T {
fn recv(&mut self) -> u8 {
unsafe { x86::io::inb(self.addr()) }
}
}
impl<T: PortMarker<u16> + In<u16>> Receiver<u16> for T {
fn recv(&mut self) -> u16 {
unsafe { x86::io::inw(self.addr()) }
}
}
impl<T: PortMarker<u32> + In<u32>> Receiver<u32> for T {
fn recv(&mut self) -> u32 {
unsafe { x86::io::inl(self.addr()) }
}
}
pub trait Sender<T: PortSized> {
fn send(&mut self, datum: T);
}
impl<T: PortMarker<u8> + Out<u8>> Sender<u8> for T {
fn send(&mut self, datum: u8) {
unsafe {
x86::io::outb(self.addr(), datum);
}
}
}
impl<T: PortMarker<u16> + Out<u16>> Sender<u16> for T {
fn send(&mut self, datum: u16) {
unsafe {
x86::io::outw(self.addr(), datum);
}
}
}
impl<T: PortMarker<u32> + Out<u32>> Sender<u32> for T {
fn send(&mut self, datum: u32) {
unsafe {
x86::io::outl(self.addr(), datum);
}
}
}
pub struct OutPort<T>(u16, PhantomData<T>);
impl<T> OutPort<T> {
pub const fn new(addr: u16) -> OutPort<T> {
OutPort(addr, PhantomData)
}
}
impl<T: PortSized> PortMarker<T> for OutPort<T> {
fn addr(&mut self) -> u16 {
self.0
}
}
impl<T: PortSized> Out<T> for OutPort<T> {}
pub struct InPort<T>(u16, PhantomData<T>);
impl<T> InPort<T> {
pub const fn new(addr: u16) -> InPort<T> {
InPort(addr, PhantomData)
}
}
impl<T: PortSized> PortMarker<T> for InPort<T> {
fn addr(&mut self) -> u16 {
self.0
}
}
impl<T: PortSized> In<T> for InPort<T> {}
pub struct Port<T>(u16, PhantomData<T>);
impl<T> Port<T> {
pub const fn new(port: u16) -> Port<T> {
Port(port, PhantomData)
}
}
impl<T: PortSized> PortMarker<T> for Port<T> {
fn addr(&mut self) -> u16 {
self.0
}
}
impl<T: PortSized> Out<T> for Port<T> {}
impl<T: PortSized> In<T> for Port<T> {}
| 21.158333 | 56 | 0.577393 |
1191cfa2600b8a231f1d5d3c5dc4e148e492fcf5 | 727 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unboxed_closures)]
// Test that an unboxed closure that mutates a free variable will
// cause borrow conflicts.
fn main() {
let mut x = 0_usize;
let f = || x += 1;
let _y = x; //~ ERROR cannot use `x` because it was mutably borrowed
}
| 34.619048 | 72 | 0.711142 |
c1250035a16e571e48d39392831e50a3956b627c | 243,722 | #![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! Reset and clock control
use crate::{RORegister, RWRegister};
#[cfg(not(feature = "nosync"))]
use core::marker::PhantomData;
/// clock control register
pub mod CR {
/// Internal high-speed clock enable
pub mod HSION {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Clock Off
pub const Off: u32 = 0b0;
/// 0b1: Clock On
pub const On: u32 = 0b1;
}
}
/// High Speed Internal clock enable in Stop mode
pub mod HSIKERON {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// HSI clock ready flag
pub mod HSIRDY {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values
pub mod R {
/// 0b0: Clock not ready
pub const NotReady: u32 = 0b0;
/// 0b1: Clock ready
pub const Ready: u32 = 0b1;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSI clock divider
pub mod HSIDIV {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (2 bits: 0b11 << 3)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: No division
pub const Div1: u32 = 0b00;
/// 0b01: Division by 2
pub const Div2: u32 = 0b01;
/// 0b10: Division by 4
pub const Div4: u32 = 0b10;
/// 0b11: Division by 8
pub const Div8: u32 = 0b11;
}
}
/// HSI divider flag
pub mod HSIDIVF {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values
pub mod R {
/// 0b0: New HSIDIV ratio has not yet propagated to hsi_ck
pub const NotPropagated: u32 = 0b0;
/// 0b1: HSIDIV ratio has propagated to hsi_ck
pub const Propagated: u32 = 0b1;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// CSI clock enable
pub mod CSION {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// CSI clock ready flag
pub mod CSIRDY {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// CSI clock enable in Stop mode
pub mod CSIKERON {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// RC48 clock enable
pub mod HSI48ON {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// RC48 clock ready flag
pub mod HSI48RDY {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// D1 domain clocks ready flag
pub mod D1CKRDY {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// D2 domain clocks ready flag
pub mod D2CKRDY {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSE clock enable
pub mod HSEON {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// HSE clock ready flag
pub mod HSERDY {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSE clock bypass
pub mod HSEBYP {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: HSE crystal oscillator not bypassed
pub const NotBypassed: u32 = 0b0;
/// 0b1: HSE crystal oscillator bypassed with external clock
pub const Bypassed: u32 = 0b1;
}
}
/// HSE Clock Security System enable
pub mod HSECSSON {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// PLL1 enable
pub mod PLL1ON {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// PLL1 clock ready flag
pub mod PLL1RDY {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL2 enable
pub mod PLL2ON {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// PLL2 clock ready flag
pub mod PLL2RDY {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL3 enable
pub mod PLL3ON {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HSION::RW;
}
/// PLL3 clock ready flag
pub mod PLL3RDY {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
pub use super::HSIRDY::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC Clock Recovery RC Register
pub mod CRRCR {
/// Internal RC 48 MHz clock calibration
pub mod HSI48CAL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (10 bits: 0x3ff << 0)
pub const mask: u32 = 0x3ff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC Clock Configuration Register
pub mod CFGR {
/// System clock switch
pub mod SW {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: HSI selected as system clock
pub const HSI: u32 = 0b000;
/// 0b001: CSI selected as system clock
pub const CSI: u32 = 0b001;
/// 0b010: HSE selected as system clock
pub const HSE: u32 = 0b010;
/// 0b011: PLL1 selected as system clock
pub const PLL1: u32 = 0b011;
}
}
/// System clock switch status
pub mod SWS {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (3 bits: 0b111 << 3)
pub const mask: u32 = 0b111 << offset;
/// Read-only values
pub mod R {
/// 0b000: HSI oscillator used as system clock
pub const HSI: u32 = 0b000;
/// 0b001: CSI oscillator used as system clock
pub const CSI: u32 = 0b001;
/// 0b010: HSE oscillator used as system clock
pub const HSE: u32 = 0b010;
/// 0b011: PLL1 used as system clock
pub const PLL1: u32 = 0b011;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// System clock selection after a wake up from system Stop
pub mod STOPWUCK {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: HSI selected as wake up clock from system Stop
pub const HSI: u32 = 0b0;
/// 0b1: CSI selected as wake up clock from system Stop
pub const CSI: u32 = 0b1;
}
}
/// Kernel clock selection after a wake up from system Stop
pub mod STOPKERWUCK {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::STOPWUCK::RW;
}
/// HSE division factor for RTC clock
pub mod RTCPRE {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (6 bits: 0x3f << 8)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// High Resolution Timer clock prescaler selection
pub mod HRTIMSEL {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The HRTIM prescaler clock source is the same as other timers (rcc_timy_ker_ck)
pub const TIMY_KER: u32 = 0b0;
/// 0b1: The HRTIM prescaler clock source is the CPU clock (c_ck)
pub const C_CK: u32 = 0b1;
}
}
/// Timers clocks prescaler selection
pub mod TIMPRE {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Timer kernel clock equal to 2x pclk by default
pub const DefaultX2: u32 = 0b0;
/// 0b1: Timer kernel clock equal to 4x pclk by default
pub const DefaultX4: u32 = 0b1;
}
}
/// MCO1 prescaler
pub mod MCO1PRE {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (4 bits: 0b1111 << 18)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Micro-controller clock output 1
pub mod MCO1 {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (3 bits: 0b111 << 22)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: HSI selected for micro-controller clock output
pub const HSI: u32 = 0b000;
/// 0b001: LSE selected for micro-controller clock output
pub const LSE: u32 = 0b001;
/// 0b010: HSE selected for micro-controller clock output
pub const HSE: u32 = 0b010;
/// 0b011: pll1_q selected for micro-controller clock output
pub const PLL1_Q: u32 = 0b011;
/// 0b100: HSI48 selected for micro-controller clock output
pub const HSI48: u32 = 0b100;
}
}
/// MCO2 prescaler
pub mod MCO2PRE {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (4 bits: 0b1111 << 25)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Micro-controller clock output 2
pub mod MCO2 {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (3 bits: 0b111 << 29)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: System clock selected for micro-controller clock output
pub const SYSCLK: u32 = 0b000;
/// 0b001: pll2_p selected for micro-controller clock output
pub const PLL2_P: u32 = 0b001;
/// 0b010: HSE selected for micro-controller clock output
pub const HSE: u32 = 0b010;
/// 0b011: pll1_p selected for micro-controller clock output
pub const PLL1_P: u32 = 0b011;
/// 0b100: CSI selected for micro-controller clock output
pub const CSI: u32 = 0b100;
/// 0b101: LSI selected for micro-controller clock output
pub const LSI: u32 = 0b101;
}
}
}
/// RCC Domain 1 Clock Configuration Register
pub mod D1CFGR {
/// D1 domain AHB prescaler
pub mod HPRE {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (4 bits: 0b1111 << 0)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0000: sys_ck not divided
pub const Div1: u32 = 0b0000;
/// 0b1000: sys_ck divided by 2
pub const Div2: u32 = 0b1000;
/// 0b1001: sys_ck divided by 4
pub const Div4: u32 = 0b1001;
/// 0b1010: sys_ck divided by 8
pub const Div8: u32 = 0b1010;
/// 0b1011: sys_ck divided by 16
pub const Div16: u32 = 0b1011;
/// 0b1100: sys_ck divided by 64
pub const Div64: u32 = 0b1100;
/// 0b1101: sys_ck divided by 128
pub const Div128: u32 = 0b1101;
/// 0b1110: sys_ck divided by 256
pub const Div256: u32 = 0b1110;
/// 0b1111: sys_ck divided by 512
pub const Div512: u32 = 0b1111;
}
}
/// D1 domain APB3 prescaler
pub mod D1PPRE {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (3 bits: 0b111 << 4)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_hclk not divided
pub const Div1: u32 = 0b000;
/// 0b100: rcc_hclk divided by 2
pub const Div2: u32 = 0b100;
/// 0b101: rcc_hclk divided by 4
pub const Div4: u32 = 0b101;
/// 0b110: rcc_hclk divided by 8
pub const Div8: u32 = 0b110;
/// 0b111: rcc_hclk divided by 16
pub const Div16: u32 = 0b111;
}
}
/// D1 domain Core prescaler
pub mod D1CPRE {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (4 bits: 0b1111 << 8)
pub const mask: u32 = 0b1111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::HPRE::RW;
}
}
/// RCC Domain 2 Clock Configuration Register
pub mod D2CFGR {
/// D2 domain APB1 prescaler
pub mod D2PPRE1 {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (3 bits: 0b111 << 4)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_hclk not divided
pub const Div1: u32 = 0b000;
/// 0b100: rcc_hclk divided by 2
pub const Div2: u32 = 0b100;
/// 0b101: rcc_hclk divided by 4
pub const Div4: u32 = 0b101;
/// 0b110: rcc_hclk divided by 8
pub const Div8: u32 = 0b110;
/// 0b111: rcc_hclk divided by 16
pub const Div16: u32 = 0b111;
}
}
/// D2 domain APB2 prescaler
pub mod D2PPRE2 {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (3 bits: 0b111 << 8)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::D2PPRE1::RW;
}
}
/// RCC Domain 3 Clock Configuration Register
pub mod D3CFGR {
/// D3 domain APB4 prescaler
pub mod D3PPRE {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (3 bits: 0b111 << 4)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_hclk not divided
pub const Div1: u32 = 0b000;
/// 0b100: rcc_hclk divided by 2
pub const Div2: u32 = 0b100;
/// 0b101: rcc_hclk divided by 4
pub const Div4: u32 = 0b101;
/// 0b110: rcc_hclk divided by 8
pub const Div8: u32 = 0b110;
/// 0b111: rcc_hclk divided by 16
pub const Div16: u32 = 0b111;
}
}
}
/// RCC PLLs Clock Source Selection Register
pub mod PLLCKSELR {
/// DIVMx and PLLs clock source selection
pub mod PLLSRC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (2 bits: 0b11 << 0)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: HSI selected as PLL clock
pub const HSI: u32 = 0b00;
/// 0b01: CSI selected as PLL clock
pub const CSI: u32 = 0b01;
/// 0b10: HSE selected as PLL clock
pub const HSE: u32 = 0b10;
/// 0b11: No clock sent to DIVMx dividers and PLLs
pub const None: u32 = 0b11;
}
}
/// Prescaler for PLL1
pub mod DIVM1 {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (6 bits: 0x3f << 4)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Prescaler for PLL2
pub mod DIVM2 {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (6 bits: 0x3f << 12)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Prescaler for PLL3
pub mod DIVM3 {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (6 bits: 0x3f << 20)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC PLLs Configuration Register
pub mod PLLCFGR {
/// PLL1 fractional latch enable
pub mod PLL1FRACEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Reset latch to tranfer FRACN to the Sigma-Delta modulator
pub const Reset: u32 = 0b0;
/// 0b1: Set latch to tranfer FRACN to the Sigma-Delta modulator
pub const Set: u32 = 0b1;
}
}
/// PLL1 VCO selection
pub mod PLL1VCOSEL {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: VCO frequency range 192 to 836 MHz
pub const WideVCO: u32 = 0b0;
/// 0b1: VCO frequency range 150 to 420 MHz
pub const MediumVCO: u32 = 0b1;
}
}
/// PLL1 input frequency range
pub mod PLL1RGE {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (2 bits: 0b11 << 2)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: Frequency is between 1 and 2 MHz
pub const Range1: u32 = 0b00;
/// 0b01: Frequency is between 2 and 4 MHz
pub const Range2: u32 = 0b01;
/// 0b10: Frequency is between 4 and 8 MHz
pub const Range4: u32 = 0b10;
/// 0b11: Frequency is between 8 and 16 MHz
pub const Range8: u32 = 0b11;
}
}
/// PLL2 fractional latch enable
pub mod PLL2FRACEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::PLL1FRACEN::RW;
}
/// PLL2 VCO selection
pub mod PLL2VCOSEL {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::PLL1VCOSEL::RW;
}
/// PLL2 input frequency range
pub mod PLL2RGE {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (2 bits: 0b11 << 6)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::PLL1RGE::RW;
}
/// PLL3 fractional latch enable
pub mod PLL3FRACEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::PLL1FRACEN::RW;
}
/// PLL3 VCO selection
pub mod PLL3VCOSEL {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::PLL1VCOSEL::RW;
}
/// PLL3 input frequency range
pub mod PLL3RGE {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (2 bits: 0b11 << 10)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::PLL1RGE::RW;
}
/// PLL1 DIVP divider output enable
pub mod DIVP1EN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Clock ouput is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: Clock output is enabled
pub const Enabled: u32 = 0b1;
}
}
/// PLL1 DIVQ divider output enable
pub mod DIVQ1EN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL1 DIVR divider output enable
pub mod DIVR1EN {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL2 DIVP divider output enable
pub mod DIVP2EN {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL2 DIVQ divider output enable
pub mod DIVQ2EN {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL2 DIVR divider output enable
pub mod DIVR2EN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL3 DIVP divider output enable
pub mod DIVP3EN {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL3 DIVQ divider output enable
pub mod DIVQ3EN {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
/// PLL3 DIVR divider output enable
pub mod DIVR3EN {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DIVP1EN::RW;
}
}
/// RCC PLL1 Dividers Configuration Register
pub mod PLL1DIVR {
/// Multiplication factor for PLL1 VCO
pub mod DIVN1 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (9 bits: 0x1ff << 0)
pub const mask: u32 = 0x1ff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL1 DIVP division factor
pub mod DIVP1 {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (7 bits: 0x7f << 9)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0000000: pll_p_ck = vco_ck
pub const Div1: u32 = 0b0000000;
/// 0b0000001: pll_p_ck = vco_ck / 2
pub const Div2: u32 = 0b0000001;
/// 0b0000011: pll_p_ck = vco_ck / 4
pub const Div4: u32 = 0b0000011;
/// 0b0000101: pll_p_ck = vco_ck / 6
pub const Div6: u32 = 0b0000101;
/// 0b0000111: pll_p_ck = vco_ck / 8
pub const Div8: u32 = 0b0000111;
/// 0b0001001: pll_p_ck = vco_ck / 10
pub const Div10: u32 = 0b0001001;
/// 0b0001011: pll_p_ck = vco_ck / 12
pub const Div12: u32 = 0b0001011;
/// 0b0001101: pll_p_ck = vco_ck / 14
pub const Div14: u32 = 0b0001101;
/// 0b0001111: pll_p_ck = vco_ck / 16
pub const Div16: u32 = 0b0001111;
/// 0b0010001: pll_p_ck = vco_ck / 18
pub const Div18: u32 = 0b0010001;
/// 0b0010011: pll_p_ck = vco_ck / 20
pub const Div20: u32 = 0b0010011;
/// 0b0010101: pll_p_ck = vco_ck / 22
pub const Div22: u32 = 0b0010101;
/// 0b0010111: pll_p_ck = vco_ck / 24
pub const Div24: u32 = 0b0010111;
/// 0b0011001: pll_p_ck = vco_ck / 26
pub const Div26: u32 = 0b0011001;
/// 0b0011011: pll_p_ck = vco_ck / 28
pub const Div28: u32 = 0b0011011;
/// 0b0011101: pll_p_ck = vco_ck / 30
pub const Div30: u32 = 0b0011101;
/// 0b0011111: pll_p_ck = vco_ck / 32
pub const Div32: u32 = 0b0011111;
/// 0b0100001: pll_p_ck = vco_ck / 34
pub const Div34: u32 = 0b0100001;
/// 0b0100011: pll_p_ck = vco_ck / 36
pub const Div36: u32 = 0b0100011;
/// 0b0100101: pll_p_ck = vco_ck / 38
pub const Div38: u32 = 0b0100101;
/// 0b0100111: pll_p_ck = vco_ck / 40
pub const Div40: u32 = 0b0100111;
/// 0b0101001: pll_p_ck = vco_ck / 42
pub const Div42: u32 = 0b0101001;
/// 0b0101011: pll_p_ck = vco_ck / 44
pub const Div44: u32 = 0b0101011;
/// 0b0101101: pll_p_ck = vco_ck / 46
pub const Div46: u32 = 0b0101101;
/// 0b0101111: pll_p_ck = vco_ck / 48
pub const Div48: u32 = 0b0101111;
/// 0b0110001: pll_p_ck = vco_ck / 50
pub const Div50: u32 = 0b0110001;
/// 0b0110011: pll_p_ck = vco_ck / 52
pub const Div52: u32 = 0b0110011;
/// 0b0110101: pll_p_ck = vco_ck / 54
pub const Div54: u32 = 0b0110101;
/// 0b0110111: pll_p_ck = vco_ck / 56
pub const Div56: u32 = 0b0110111;
/// 0b0111001: pll_p_ck = vco_ck / 58
pub const Div58: u32 = 0b0111001;
/// 0b0111011: pll_p_ck = vco_ck / 60
pub const Div60: u32 = 0b0111011;
/// 0b0111101: pll_p_ck = vco_ck / 62
pub const Div62: u32 = 0b0111101;
/// 0b0111111: pll_p_ck = vco_ck / 64
pub const Div64: u32 = 0b0111111;
/// 0b1000001: pll_p_ck = vco_ck / 66
pub const Div66: u32 = 0b1000001;
/// 0b1000011: pll_p_ck = vco_ck / 68
pub const Div68: u32 = 0b1000011;
/// 0b1000101: pll_p_ck = vco_ck / 70
pub const Div70: u32 = 0b1000101;
/// 0b1000111: pll_p_ck = vco_ck / 72
pub const Div72: u32 = 0b1000111;
/// 0b1001001: pll_p_ck = vco_ck / 74
pub const Div74: u32 = 0b1001001;
/// 0b1001011: pll_p_ck = vco_ck / 76
pub const Div76: u32 = 0b1001011;
/// 0b1001101: pll_p_ck = vco_ck / 78
pub const Div78: u32 = 0b1001101;
/// 0b1001111: pll_p_ck = vco_ck / 80
pub const Div80: u32 = 0b1001111;
/// 0b1010001: pll_p_ck = vco_ck / 82
pub const Div82: u32 = 0b1010001;
/// 0b1010011: pll_p_ck = vco_ck / 84
pub const Div84: u32 = 0b1010011;
/// 0b1010101: pll_p_ck = vco_ck / 86
pub const Div86: u32 = 0b1010101;
/// 0b1010111: pll_p_ck = vco_ck / 88
pub const Div88: u32 = 0b1010111;
/// 0b1011001: pll_p_ck = vco_ck / 90
pub const Div90: u32 = 0b1011001;
/// 0b1011011: pll_p_ck = vco_ck / 92
pub const Div92: u32 = 0b1011011;
/// 0b1011101: pll_p_ck = vco_ck / 94
pub const Div94: u32 = 0b1011101;
/// 0b1011111: pll_p_ck = vco_ck / 96
pub const Div96: u32 = 0b1011111;
/// 0b1100001: pll_p_ck = vco_ck / 98
pub const Div98: u32 = 0b1100001;
/// 0b1100011: pll_p_ck = vco_ck / 100
pub const Div100: u32 = 0b1100011;
/// 0b1100101: pll_p_ck = vco_ck / 102
pub const Div102: u32 = 0b1100101;
/// 0b1100111: pll_p_ck = vco_ck / 104
pub const Div104: u32 = 0b1100111;
/// 0b1101001: pll_p_ck = vco_ck / 106
pub const Div106: u32 = 0b1101001;
/// 0b1101011: pll_p_ck = vco_ck / 108
pub const Div108: u32 = 0b1101011;
/// 0b1101101: pll_p_ck = vco_ck / 110
pub const Div110: u32 = 0b1101101;
/// 0b1101111: pll_p_ck = vco_ck / 112
pub const Div112: u32 = 0b1101111;
/// 0b1110001: pll_p_ck = vco_ck / 114
pub const Div114: u32 = 0b1110001;
/// 0b1110011: pll_p_ck = vco_ck / 116
pub const Div116: u32 = 0b1110011;
/// 0b1110101: pll_p_ck = vco_ck / 118
pub const Div118: u32 = 0b1110101;
/// 0b1110111: pll_p_ck = vco_ck / 120
pub const Div120: u32 = 0b1110111;
/// 0b1111001: pll_p_ck = vco_ck / 122
pub const Div122: u32 = 0b1111001;
/// 0b1111011: pll_p_ck = vco_ck / 124
pub const Div124: u32 = 0b1111011;
/// 0b1111101: pll_p_ck = vco_ck / 126
pub const Div126: u32 = 0b1111101;
/// 0b1111111: pll_p_ck = vco_ck / 128
pub const Div128: u32 = 0b1111111;
}
}
/// PLL1 DIVQ division factor
pub mod DIVQ1 {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL1 DIVR division factor
pub mod DIVR1 {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (7 bits: 0x7f << 24)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC PLL1 Fractional Divider Register
pub mod PLL1FRACR {
/// Fractional part of the multiplication factor for PLL1 VCO
pub mod FRACN1 {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (13 bits: 0x1fff << 3)
pub const mask: u32 = 0x1fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC PLL2 Dividers Configuration Register
pub mod PLL2DIVR {
/// Multiplication factor for PLL1 VCO
pub mod DIVN2 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (9 bits: 0x1ff << 0)
pub const mask: u32 = 0x1ff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL1 DIVP division factor
pub mod DIVP2 {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (7 bits: 0x7f << 9)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL1 DIVQ division factor
pub mod DIVQ2 {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL1 DIVR division factor
pub mod DIVR2 {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (7 bits: 0x7f << 24)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC PLL2 Fractional Divider Register
pub mod PLL2FRACR {
/// Fractional part of the multiplication factor for PLL VCO
pub mod FRACN2 {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (13 bits: 0x1fff << 3)
pub const mask: u32 = 0x1fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC PLL3 Dividers Configuration Register
pub mod PLL3DIVR {
/// Multiplication factor for PLL1 VCO
pub mod DIVN3 {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (9 bits: 0x1ff << 0)
pub const mask: u32 = 0x1ff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL DIVP division factor
pub mod DIVP3 {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (7 bits: 0x7f << 9)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL DIVQ division factor
pub mod DIVQ3 {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (7 bits: 0x7f << 16)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL DIVR division factor
pub mod DIVR3 {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (7 bits: 0x7f << 24)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC PLL3 Fractional Divider Register
pub mod PLL3FRACR {
/// Fractional part of the multiplication factor for PLL3 VCO
pub mod FRACN3 {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (13 bits: 0x1fff << 3)
pub const mask: u32 = 0x1fff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC Domain 1 Kernel Clock Configuration Register
pub mod D1CCIPR {
/// FMC kernel clock source selection
pub mod FMCSEL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (2 bits: 0b11 << 0)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: rcc_hclk3 selected as peripheral clock
pub const RCC_HCLK3: u32 = 0b00;
/// 0b01: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b01;
/// 0b10: pll2_r selected as peripheral clock
pub const PLL2_R: u32 = 0b10;
/// 0b11: PER selected as peripheral clock
pub const PER: u32 = 0b11;
}
}
/// QUADSPI kernel clock source selection
pub mod QSPISEL {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (2 bits: 0b11 << 4)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::FMCSEL::RW;
}
/// SDMMC kernel clock source selection
pub mod SDMMCSEL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b0;
/// 0b1: pll2_r selected as peripheral clock
pub const PLL2_R: u32 = 0b1;
}
}
/// per_ck clock source selection
pub mod CKPERSEL {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (2 bits: 0b11 << 28)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: HSI selected as peripheral clock
pub const HSI: u32 = 0b00;
/// 0b01: CSI selected as peripheral clock
pub const CSI: u32 = 0b01;
/// 0b10: HSE selected as peripheral clock
pub const HSE: u32 = 0b10;
}
}
/// kernel clock source selection
pub mod DSISEL {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC Domain 2 Kernel Clock Configuration Register
pub mod D2CCIP1R {
/// SAI1 and DFSDM1 kernel Aclk clock source selection
pub mod SAI1SEL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b000;
/// 0b001: pll2_p selected as peripheral clock
pub const PLL2_P: u32 = 0b001;
/// 0b010: pll3_p selected as peripheral clock
pub const PLL3_P: u32 = 0b010;
/// 0b011: I2S_CKIN selected as peripheral clock
pub const I2S_CKIN: u32 = 0b011;
/// 0b100: PER selected as peripheral clock
pub const PER: u32 = 0b100;
}
}
/// SAI2 and SAI3 kernel clock source selection
pub mod SAI23SEL {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (3 bits: 0b111 << 6)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SAI1SEL::RW;
}
/// SPI/I2S1,2 and 3 kernel clock source selection
pub mod SPI123SEL {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (3 bits: 0b111 << 12)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SAI1SEL::RW;
}
/// SPI4 and 5 kernel clock source selection
pub mod SPI45SEL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (3 bits: 0b111 << 16)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: APB clock selected as peripheral clock
pub const APB: u32 = 0b000;
/// 0b001: pll2_q selected as peripheral clock
pub const PLL2_Q: u32 = 0b001;
/// 0b010: pll3_q selected as peripheral clock
pub const PLL3_Q: u32 = 0b010;
/// 0b011: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b011;
/// 0b100: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b100;
/// 0b101: HSE selected as peripheral clock
pub const HSE: u32 = 0b101;
}
}
/// SPDIFRX kernel clock source selection
pub mod SPDIFSEL {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (2 bits: 0b11 << 20)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b00;
/// 0b01: pll2_r selected as peripheral clock
pub const PLL2_R: u32 = 0b01;
/// 0b10: pll3_r selected as peripheral clock
pub const PLL3_R: u32 = 0b10;
/// 0b11: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b11;
}
}
/// DFSDM1 kernel Clk clock source selection
pub mod DFSDM1SEL {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: rcc_pclk2 selected as peripheral clock
pub const RCC_PCLK2: u32 = 0b0;
/// 0b1: System clock selected as peripheral clock
pub const SYS: u32 = 0b1;
}
}
/// FDCAN kernel clock source selection
pub mod FDCANSEL {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (2 bits: 0b11 << 28)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: HSE selected as peripheral clock
pub const HSE: u32 = 0b00;
/// 0b01: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b01;
/// 0b10: pll2_q selected as peripheral clock
pub const PLL2_Q: u32 = 0b10;
}
}
/// SWPMI kernel clock source selection
pub mod SWPSEL {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: pclk selected as peripheral clock
pub const PCLK: u32 = 0b0;
/// 0b1: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b1;
}
}
}
/// RCC Domain 2 Kernel Clock Configuration Register
pub mod D2CCIP2R {
/// USART2/3, UART4,5, 7/8 (APB1) kernel clock source selection
pub mod USART234578SEL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_pclk1 selected as peripheral clock
pub const RCC_PCLK1: u32 = 0b000;
/// 0b001: pll2_q selected as peripheral clock
pub const PLL2_Q: u32 = 0b001;
/// 0b010: pll3_q selected as peripheral clock
pub const PLL3_Q: u32 = 0b010;
/// 0b011: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b011;
/// 0b100: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b100;
/// 0b101: LSE selected as peripheral clock
pub const LSE: u32 = 0b101;
}
}
/// USART1 and 6 kernel clock source selection
pub mod USART16SEL {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (3 bits: 0b111 << 3)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_pclk2 selected as peripheral clock
pub const RCC_PCLK2: u32 = 0b000;
/// 0b001: pll2_q selected as peripheral clock
pub const PLL2_Q: u32 = 0b001;
/// 0b010: pll3_q selected as peripheral clock
pub const PLL3_Q: u32 = 0b010;
/// 0b011: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b011;
/// 0b100: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b100;
/// 0b101: LSE selected as peripheral clock
pub const LSE: u32 = 0b101;
}
}
/// RNG kernel clock source selection
pub mod RNGSEL {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (2 bits: 0b11 << 8)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: HSI48 selected as peripheral clock
pub const HSI48: u32 = 0b00;
/// 0b01: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b01;
/// 0b10: LSE selected as peripheral clock
pub const LSE: u32 = 0b10;
/// 0b11: LSI selected as peripheral clock
pub const LSI: u32 = 0b11;
}
}
/// I2C1,2,3 kernel clock source selection
pub mod I2C123SEL {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (2 bits: 0b11 << 12)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: rcc_pclk1 selected as peripheral clock
pub const RCC_PCLK1: u32 = 0b00;
/// 0b01: pll3_r selected as peripheral clock
pub const PLL3_R: u32 = 0b01;
/// 0b10: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b10;
/// 0b11: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b11;
}
}
/// USBOTG 1 and 2 kernel clock source selection
pub mod USBSEL {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (2 bits: 0b11 << 20)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: Disable the kernel clock
pub const DISABLE: u32 = 0b00;
/// 0b01: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b01;
/// 0b10: pll3_q selected as peripheral clock
pub const PLL3_Q: u32 = 0b10;
/// 0b11: HSI48 selected as peripheral clock
pub const HSI48: u32 = 0b11;
}
}
/// HDMI-CEC kernel clock source selection
pub mod CECSEL {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (2 bits: 0b11 << 22)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: LSE selected as peripheral clock
pub const LSE: u32 = 0b00;
/// 0b01: LSI selected as peripheral clock
pub const LSI: u32 = 0b01;
/// 0b10: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b10;
}
}
/// LPTIM1 kernel clock source selection
pub mod LPTIM1SEL {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (3 bits: 0b111 << 28)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_pclk1 selected as peripheral clock
pub const RCC_PCLK1: u32 = 0b000;
/// 0b001: pll2_p selected as peripheral clock
pub const PLL2_P: u32 = 0b001;
/// 0b010: pll3_r selected as peripheral clock
pub const PLL3_R: u32 = 0b010;
/// 0b011: LSE selected as peripheral clock
pub const LSE: u32 = 0b011;
/// 0b100: LSI selected as peripheral clock
pub const LSI: u32 = 0b100;
/// 0b101: PER selected as peripheral clock
pub const PER: u32 = 0b101;
}
}
}
/// RCC Domain 3 Kernel Clock Configuration Register
pub mod D3CCIPR {
/// LPUART1 kernel clock source selection
pub mod LPUART1SEL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (3 bits: 0b111 << 0)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_pclk_d3 selected as peripheral clock
pub const RCC_PCLK_D3: u32 = 0b000;
/// 0b001: pll2_q selected as peripheral clock
pub const PLL2_Q: u32 = 0b001;
/// 0b010: pll3_q selected as peripheral clock
pub const PLL3_Q: u32 = 0b010;
/// 0b011: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b011;
/// 0b100: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b100;
/// 0b101: LSE selected as peripheral clock
pub const LSE: u32 = 0b101;
}
}
/// I2C4 kernel clock source selection
pub mod I2C4SEL {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (2 bits: 0b11 << 8)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: rcc_pclk4 selected as peripheral clock
pub const RCC_PCLK4: u32 = 0b00;
/// 0b01: pll3_r selected as peripheral clock
pub const PLL3_R: u32 = 0b01;
/// 0b10: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b10;
/// 0b11: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b11;
}
}
/// LPTIM2 kernel clock source selection
pub mod LPTIM2SEL {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (3 bits: 0b111 << 10)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_pclk4 selected as peripheral clock
pub const RCC_PCLK4: u32 = 0b000;
/// 0b001: pll2_p selected as peripheral clock
pub const PLL2_P: u32 = 0b001;
/// 0b010: pll3_r selected as peripheral clock
pub const PLL3_R: u32 = 0b010;
/// 0b011: LSE selected as peripheral clock
pub const LSE: u32 = 0b011;
/// 0b100: LSI selected as peripheral clock
pub const LSI: u32 = 0b100;
/// 0b101: PER selected as peripheral clock
pub const PER: u32 = 0b101;
}
}
/// LPTIM3,4,5 kernel clock source selection
pub mod LPTIM345SEL {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (3 bits: 0b111 << 13)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LPTIM2SEL::RW;
}
/// SAR ADC kernel clock source selection
pub mod ADCSEL {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (2 bits: 0b11 << 16)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: pll2_p selected as peripheral clock
pub const PLL2_P: u32 = 0b00;
/// 0b01: pll3_r selected as peripheral clock
pub const PLL3_R: u32 = 0b01;
/// 0b10: PER selected as peripheral clock
pub const PER: u32 = 0b10;
}
}
/// Sub-Block A of SAI4 kernel clock source selection
pub mod SAI4ASEL {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (3 bits: 0b111 << 21)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: pll1_q selected as peripheral clock
pub const PLL1_Q: u32 = 0b000;
/// 0b001: pll2_p selected as peripheral clock
pub const PLL2_P: u32 = 0b001;
/// 0b010: pll3_p selected as peripheral clock
pub const PLL3_P: u32 = 0b010;
/// 0b011: i2s_ckin selected as peripheral clock
pub const I2S_CKIN: u32 = 0b011;
/// 0b100: PER selected as peripheral clock
pub const PER: u32 = 0b100;
}
}
/// Sub-Block B of SAI4 kernel clock source selection
pub mod SAI4BSEL {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (3 bits: 0b111 << 24)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SAI4ASEL::RW;
}
/// SPI6 kernel clock source selection
pub mod SPI6SEL {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (3 bits: 0b111 << 28)
pub const mask: u32 = 0b111 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b000: rcc_pclk4 selected as peripheral clock
pub const RCC_PCLK4: u32 = 0b000;
/// 0b001: pll2_q selected as peripheral clock
pub const PLL2_Q: u32 = 0b001;
/// 0b010: pll3_q selected as peripheral clock
pub const PLL3_Q: u32 = 0b010;
/// 0b011: hsi_ker selected as peripheral clock
pub const HSI_KER: u32 = 0b011;
/// 0b100: csi_ker selected as peripheral clock
pub const CSI_KER: u32 = 0b100;
/// 0b101: HSE selected as peripheral clock
pub const HSE: u32 = 0b101;
}
}
}
/// RCC Clock Source Interrupt Enable Register
pub mod CIER {
/// LSI ready Interrupt Enable
pub mod LSIRDYIE {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Interrupt disabled
pub const Disabled: u32 = 0b0;
/// 0b1: Interrupt enabled
pub const Enabled: u32 = 0b1;
}
}
/// LSE ready Interrupt Enable
pub mod LSERDYIE {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// HSI ready Interrupt Enable
pub mod HSIRDYIE {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// HSE ready Interrupt Enable
pub mod HSERDYIE {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// CSI ready Interrupt Enable
pub mod CSIRDYIE {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// RC48 ready Interrupt Enable
pub mod HSI48RDYIE {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// PLL1 ready Interrupt Enable
pub mod PLL1RDYIE {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// PLL2 ready Interrupt Enable
pub mod PLL2RDYIE {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// PLL3 ready Interrupt Enable
pub mod PLL3RDYIE {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
/// LSE clock security system Interrupt Enable
pub mod LSECSSIE {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYIE::RW;
}
}
/// RCC Clock Source Interrupt Flag Register
pub mod CIFR {
/// LSI ready Interrupt Flag
pub mod LSIRDYF {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// LSE ready Interrupt Flag
pub mod LSERDYF {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSI ready Interrupt Flag
pub mod HSIRDYF {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSE ready Interrupt Flag
pub mod HSERDYF {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// CSI ready Interrupt Flag
pub mod CSIRDY {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// RC48 ready Interrupt Flag
pub mod HSI48RDYF {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL1 ready Interrupt Flag
pub mod PLL1RDYF {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL2 ready Interrupt Flag
pub mod PLL2RDYF {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// PLL3 ready Interrupt Flag
pub mod PLL3RDYF {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// LSE clock security system Interrupt Flag
pub mod LSECSSF {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSE clock security system Interrupt Flag
pub mod HSECSSF {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC Clock Source Interrupt Clear Register
pub mod CICR {
/// LSI ready Interrupt Clear
pub mod LSIRDYC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Clear interrupt flag
pub const Clear: u32 = 0b1;
}
}
/// LSE ready Interrupt Clear
pub mod LSERDYC {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// HSI ready Interrupt Clear
pub mod HSIRDYC {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// HSE ready Interrupt Clear
pub mod HSERDYC {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// CSI ready Interrupt Clear
pub mod HSE_ready_Interrupt_Clear {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// RC48 ready Interrupt Clear
pub mod HSI48RDYC {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// PLL1 ready Interrupt Clear
pub mod PLL1RDYC {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// PLL2 ready Interrupt Clear
pub mod PLL2RDYC {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// PLL3 ready Interrupt Clear
pub mod PLL3RDYC {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// LSE clock security system Interrupt Clear
pub mod LSECSSC {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
/// HSE clock security system Interrupt Clear
pub mod HSECSSC {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LSIRDYC::RW;
}
}
/// RCC Backup Domain Control Register
pub mod BDCR {
/// LSE oscillator enabled
pub mod LSEON {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: LSE oscillator Off
pub const Off: u32 = 0b0;
/// 0b1: LSE oscillator On
pub const On: u32 = 0b1;
}
}
/// LSE oscillator ready
pub mod LSERDY {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values
pub mod R {
/// 0b0: LSE oscillator not ready
pub const NotReady: u32 = 0b0;
/// 0b1: LSE oscillator ready
pub const Ready: u32 = 0b1;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// LSE oscillator bypass
pub mod LSEBYP {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: LSE crystal oscillator not bypassed
pub const NotBypassed: u32 = 0b0;
/// 0b1: LSE crystal oscillator bypassed with external clock
pub const Bypassed: u32 = 0b1;
}
}
/// LSE oscillator driving capability
pub mod LSEDRV {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (2 bits: 0b11 << 3)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: Lowest LSE oscillator driving capability
pub const Lowest: u32 = 0b00;
/// 0b01: Medium low LSE oscillator driving capability
pub const MediumLow: u32 = 0b01;
/// 0b10: Medium high LSE oscillator driving capability
pub const MediumHigh: u32 = 0b10;
/// 0b11: Highest LSE oscillator driving capability
pub const Highest: u32 = 0b11;
}
}
/// LSE clock security system enable
pub mod LSECSSON {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Clock security system on 32 kHz oscillator off
pub const SecurityOff: u32 = 0b0;
/// 0b1: Clock security system on 32 kHz oscillator on
pub const SecurityOn: u32 = 0b1;
}
}
/// LSE clock security system failure detection
pub mod LSECSSD {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values
pub mod R {
/// 0b0: No failure detected on 32 kHz oscillator
pub const NoFailure: u32 = 0b0;
/// 0b1: Failure detected on 32 kHz oscillator
pub const Failure: u32 = 0b1;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// RTC clock source selection
pub mod RTCSEL {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (2 bits: 0b11 << 8)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b00: No clock
pub const NoClock: u32 = 0b00;
/// 0b01: LSE oscillator clock used as RTC clock
pub const LSE: u32 = 0b01;
/// 0b10: LSI oscillator clock used as RTC clock
pub const LSI: u32 = 0b10;
/// 0b11: HSE oscillator clock divided by a prescaler used as RTC clock
pub const HSE: u32 = 0b11;
}
}
/// RTC clock enable
pub mod RTCEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: RTC clock disabled
pub const Disabled: u32 = 0b0;
/// 0b1: RTC clock enabled
pub const Enabled: u32 = 0b1;
}
}
/// VSwitch domain software reset
pub mod BDRST {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Resets the entire VSW domain
pub const Reset: u32 = 0b1;
}
}
}
/// RCC Clock Control and Status Register
pub mod CSR {
/// LSI oscillator enable
pub mod LSION {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: LSI oscillator Off
pub const Off: u32 = 0b0;
/// 0b1: LSI oscillator On
pub const On: u32 = 0b1;
}
}
/// LSI oscillator ready
pub mod LSIRDY {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values
pub mod R {
/// 0b0: LSI oscillator not ready
pub const NotReady: u32 = 0b0;
/// 0b1: LSI oscillator ready
pub const Ready: u32 = 0b1;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC AHB3 Reset Register
pub mod AHB3RSTR {
/// MDMA block reset
pub mod MDMARST {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// DMA2D block reset
pub mod DMA2DRST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMARST::RW;
}
/// JPGDEC block reset
pub mod JPGDECRST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMARST::RW;
}
/// FMC block reset
pub mod FMCRST {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMARST::RW;
}
/// QUADSPI and QUADSPI delay block reset
pub mod QSPIRST {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMARST::RW;
}
/// SDMMC1 and SDMMC1 delay block reset
pub mod SDMMC1RST {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMARST::RW;
}
/// CPU reset
pub mod CPURST {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMARST::RW;
}
}
/// RCC AHB1 Peripheral Reset Register
pub mod AHB1RSTR {
/// DMA1 block reset
pub mod DMA1RST {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// DMA2 block reset
pub mod DMA2RST {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1RST::RW;
}
/// ADC1&2 block reset
pub mod ADC12RST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1RST::RW;
}
/// ETH1MAC block reset
pub mod ETH1MACRST {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1RST::RW;
}
/// USB1OTG block reset
pub mod USB1OTGRST {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1RST::RW;
}
/// USB2OTG block reset
pub mod USB2OTGRST {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1RST::RW;
}
/// ART block reset
pub mod ARTRST {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1RST::RW;
}
}
/// RCC AHB2 Peripheral Reset Register
pub mod AHB2RSTR {
/// CAMITF block reset
pub mod CAMITFRST {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// Cryptography block reset
pub mod CRYPTRST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CAMITFRST::RW;
}
/// Hash block reset
pub mod HASHRST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CAMITFRST::RW;
}
/// Random Number Generator block reset
pub mod RNGRST {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CAMITFRST::RW;
}
/// SDMMC2 and SDMMC2 Delay block reset
pub mod SDMMC2RST {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CAMITFRST::RW;
}
}
/// RCC AHB4 Peripheral Reset Register
pub mod AHB4RSTR {
/// GPIO block reset
pub mod GPIOARST {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// GPIO block reset
pub mod GPIOBRST {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOCRST {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIODRST {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOERST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOFRST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOGRST {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOHRST {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOIRST {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOJRST {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// GPIO block reset
pub mod GPIOKRST {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// CRC block reset
pub mod CRCRST {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// BDMA block reset
pub mod BDMARST {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// ADC3 block reset
pub mod ADC3RST {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
/// HSEM block reset
pub mod HSEMRST {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOARST::RW;
}
}
/// RCC APB3 Peripheral Reset Register
pub mod APB3RSTR {
/// LTDC block reset
pub mod LTDCRST {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// DSI block reset
pub mod DSIRST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LTDCRST::RW;
}
}
/// RCC APB1 Peripheral Reset Register
pub mod APB1LRSTR {
/// TIM block reset
pub mod TIM2RST {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// TIM block reset
pub mod TIM3RST {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM4RST {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM5RST {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM6RST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM7RST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM12RST {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM13RST {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod TIM14RST {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// TIM block reset
pub mod LPTIM1RST {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// SPI2 block reset
pub mod SPI2RST {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// SPI3 block reset
pub mod SPI3RST {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// SPDIFRX block reset
pub mod SPDIFRXRST {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// USART2 block reset
pub mod USART2RST {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// USART3 block reset
pub mod USART3RST {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// UART4 block reset
pub mod UART4RST {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// UART5 block reset
pub mod UART5RST {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// I2C1 block reset
pub mod I2C1RST {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// I2C2 block reset
pub mod I2C2RST {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// I2C3 block reset
pub mod I2C3RST {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// HDMI-CEC block reset
pub mod CECRST {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// DAC1 and 2 Blocks Reset
pub mod DAC12RST {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// UART7 block reset
pub mod UART7RST {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
/// UART8 block reset
pub mod UART8RST {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2RST::RW;
}
}
/// RCC APB1 Peripheral Reset Register
pub mod APB1HRSTR {
/// Clock Recovery System reset
pub mod CRSRST {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// SWPMI block reset
pub mod SWPRST {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSRST::RW;
}
/// OPAMP block reset
pub mod OPAMPRST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSRST::RW;
}
/// MDIOS block reset
pub mod MDIOSRST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSRST::RW;
}
/// FDCAN block reset
pub mod FDCANRST {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSRST::RW;
}
}
/// RCC APB2 Peripheral Reset Register
pub mod APB2RSTR {
/// TIM1 block reset
pub mod TIM1RST {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// TIM8 block reset
pub mod TIM8RST {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// USART1 block reset
pub mod USART1RST {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// USART6 block reset
pub mod USART6RST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// SPI1 block reset
pub mod SPI1RST {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// SPI4 block reset
pub mod SPI4RST {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// TIM15 block reset
pub mod TIM15RST {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// TIM16 block reset
pub mod TIM16RST {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// TIM17 block reset
pub mod TIM17RST {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// SPI5 block reset
pub mod SPI5RST {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// SAI1 block reset
pub mod SAI1RST {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// SAI2 block reset
pub mod SAI2RST {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// SAI3 block reset
pub mod SAI3RST {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// DFSDM1 block reset
pub mod DFSDM1RST {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
/// HRTIM block reset
pub mod HRTIMRST {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1RST::RW;
}
}
/// RCC APB4 Peripheral Reset Register
pub mod APB4RSTR {
/// SYSCFG block reset
pub mod SYSCFGRST {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b1: Reset the selected module
pub const Reset: u32 = 0b1;
}
}
/// LPUART1 block reset
pub mod LPUART1RST {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// SPI6 block reset
pub mod SPI6RST {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// I2C4 block reset
pub mod I2C4RST {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// LPTIM2 block reset
pub mod LPTIM2RST {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// LPTIM3 block reset
pub mod LPTIM3RST {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// LPTIM4 block reset
pub mod LPTIM4RST {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// LPTIM5 block reset
pub mod LPTIM5RST {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// COMP12 Blocks Reset
pub mod COMP12RST {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// VREF block reset
pub mod VREFRST {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
/// SAI4 block reset
pub mod SAI4RST {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGRST::RW;
}
}
/// RCC Global Control Register
pub mod GCR {
/// WWDG1 reset scope control
pub mod WW1RSC {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Clear WWDG1 scope control
pub const Clear: u32 = 0b0;
/// 0b1: Set WWDG1 scope control
pub const Set: u32 = 0b1;
}
}
/// WWDG2 reset scope control
pub mod WW2RSC {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Force allow CPU1 to boot
pub mod BOOT_C1 {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Force allow CPU2 to boot
pub mod BOOT_C2 {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC D3 Autonomous mode Register
pub mod D3AMR {
/// BDMA and DMAMUX Autonomous mode enable
pub mod BDMAAMEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Clock disabled in autonomous mode
pub const Disabled: u32 = 0b0;
/// 0b1: Clock enabled in autonomous mode
pub const Enabled: u32 = 0b1;
}
}
/// LPUART1 Autonomous mode enable
pub mod LPUART1AMEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// SPI6 Autonomous mode enable
pub mod SPI6AMEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// I2C4 Autonomous mode enable
pub mod I2C4AMEN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// LPTIM2 Autonomous mode enable
pub mod LPTIM2AMEN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// LPTIM3 Autonomous mode enable
pub mod LPTIM3AMEN {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// LPTIM4 Autonomous mode enable
pub mod LPTIM4AMEN {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// LPTIM5 Autonomous mode enable
pub mod LPTIM5AMEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// COMP12 Autonomous mode enable
pub mod COMP12AMEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// VREF Autonomous mode enable
pub mod VREFAMEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// RTC Autonomous mode enable
pub mod RTCAMEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// CRC Autonomous mode enable
pub mod CRCAMEN {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// SAI4 Autonomous mode enable
pub mod SAI4AMEN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// ADC3 Autonomous mode enable
pub mod ADC3AMEN {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// Backup RAM Autonomous mode enable
pub mod BKPSRAMAMEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
/// SRAM4 Autonomous mode enable
pub mod SRAM4AMEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::BDMAAMEN::RW;
}
}
/// RCC Reset Status Register
pub mod RSR {
/// Remove reset flag
pub mod RMVF {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: Not clearing the the reset flags
pub const NotActive: u32 = 0b0;
/// 0b1: Clear the reset flags
pub const Clear: u32 = 0b1;
}
}
/// CPU reset flag
pub mod CPURSTF {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values
pub mod R {
/// 0b0: No reset occoured for block
pub const NoResetOccoured: u32 = 0b0;
/// 0b1: Reset occoured for block
pub const ResetOccourred: u32 = 0b1;
}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// D1 domain power switch reset flag
pub mod D1RSTF {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// D2 domain power switch reset flag
pub mod D2RSTF {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// BOR reset flag
pub mod BORRSTF {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Pin reset flag (NRST)
pub mod PINRSTF {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// POR/PDR reset flag
pub mod PORRSTF {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// System reset from CPU reset flag
pub mod SFTRSTF {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Independent Watchdog reset flag
pub mod IWDG1RSTF {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Window Watchdog reset flag
pub mod WWDG1RSTF {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// Reset due to illegal D1 DStandby or CPU CStop flag
pub mod LPWRRSTF {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
pub use super::CPURSTF::R;
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC Reset Status Register
pub mod C1_RSR {
pub use super::RSR::BORRSTF;
pub use super::RSR::CPURSTF;
pub use super::RSR::D1RSTF;
pub use super::RSR::D2RSTF;
pub use super::RSR::IWDG1RSTF;
pub use super::RSR::LPWRRSTF;
pub use super::RSR::PINRSTF;
pub use super::RSR::PORRSTF;
pub use super::RSR::RMVF;
pub use super::RSR::SFTRSTF;
pub use super::RSR::WWDG1RSTF;
}
/// RCC AHB3 Clock Register
pub mod C1_AHB3ENR {
/// MDMA Peripheral Clock Enable
pub mod MDMAEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// DMA2D Peripheral Clock Enable
pub mod DMA2DEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// JPGDEC Peripheral Clock Enable
pub mod JPGDECEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// FMC Peripheral Clocks Enable
pub mod FMCEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// QUADSPI and QUADSPI Delay Clock Enable
pub mod QSPIEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// SDMMC1 and SDMMC1 Delay Clock Enable
pub mod SDMMC1EN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
}
/// RCC AHB3 Clock Register
pub mod AHB3ENR {
/// MDMA Peripheral Clock Enable
pub mod MDMAEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// DMA2D Peripheral Clock Enable
pub mod DMA2DEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// JPGDEC Peripheral Clock Enable
pub mod JPGDECEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// FMC Peripheral Clocks Enable
pub mod FMCEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// QUADSPI and QUADSPI Delay Clock Enable
pub mod QSPIEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// SDMMC1 and SDMMC1 Delay Clock Enable
pub mod SDMMC1EN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// D1 DTCM1 block enable
pub mod DTCM1EN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// D1 DTCM2 block enable
pub mod DTCM2EN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// D1 ITCM block enable
pub mod ITCM1EN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
/// AXISRAM block enable
pub mod AXISRAMEN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMAEN::RW;
}
}
/// RCC AHB1 Clock Register
pub mod AHB1ENR {
/// DMA1 Clock Enable
pub mod DMA1EN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// DMA2 Clock Enable
pub mod DMA2EN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// ADC1/2 Peripheral Clocks Enable
pub mod ADC12EN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// Ethernet MAC bus interface Clock Enable
pub mod ETH1MACEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// Ethernet Transmission Clock Enable
pub mod ETH1TXEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// Ethernet Reception Clock Enable
pub mod ETH1RXEN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// USB1OTG Peripheral Clocks Enable
pub mod USB1OTGEN {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// USB_PHY1 Clocks Enable
pub mod USB1ULPIEN {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// USB2OTG Peripheral Clocks Enable
pub mod USB2OTGEN {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// USB_PHY2 Clocks Enable
pub mod USB2ULPIEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
/// ART Clock Enable
pub mod ARTEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1EN::RW;
}
}
/// RCC AHB1 Clock Register
pub mod C1_AHB1ENR {
pub use super::AHB1ENR::ADC12EN;
pub use super::AHB1ENR::ARTEN;
pub use super::AHB1ENR::DMA1EN;
pub use super::AHB1ENR::DMA2EN;
pub use super::AHB1ENR::ETH1MACEN;
pub use super::AHB1ENR::ETH1RXEN;
pub use super::AHB1ENR::ETH1TXEN;
pub use super::AHB1ENR::USB1OTGEN;
pub use super::AHB1ENR::USB1ULPIEN;
pub use super::AHB1ENR::USB2OTGEN;
pub use super::AHB1ENR::USB2ULPIEN;
}
/// RCC AHB2 Clock Register
pub mod C1_AHB2ENR {
/// DCMI peripheral clock
pub mod DCMIEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// CRYPT peripheral clock enable
pub mod CRYPTEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
/// HASH peripheral clock enable
pub mod HASHEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
/// RNG peripheral clocks enable
pub mod RNGEN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
/// SDMMC2 and SDMMC2 delay clock enable
pub mod SDMMC2EN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
/// SRAM1 block enable
pub mod SRAM1EN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
/// SRAM2 block enable
pub mod SRAM2EN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
/// SRAM3 block enable
pub mod SRAM3EN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMIEN::RW;
}
}
/// RCC AHB2 Clock Register
pub mod AHB2ENR {
pub use super::C1_AHB2ENR::CRYPTEN;
pub use super::C1_AHB2ENR::DCMIEN;
pub use super::C1_AHB2ENR::HASHEN;
pub use super::C1_AHB2ENR::RNGEN;
pub use super::C1_AHB2ENR::SDMMC2EN;
pub use super::C1_AHB2ENR::SRAM1EN;
pub use super::C1_AHB2ENR::SRAM2EN;
pub use super::C1_AHB2ENR::SRAM3EN;
}
/// RCC AHB4 Clock Register
pub mod AHB4ENR {
/// 0GPIO peripheral clock enable
pub mod GPIOAEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// 0GPIO peripheral clock enable
pub mod GPIOBEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOCEN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIODEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOEEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOFEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOGEN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOHEN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOIEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOJEN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// 0GPIO peripheral clock enable
pub mod GPIOKEN {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// CRC peripheral clock enable
pub mod CRCEN {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// BDMA and DMAMUX2 Clock Enable
pub mod BDMAEN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// ADC3 Peripheral Clocks Enable
pub mod ADC3EN {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// HSEM peripheral clock enable
pub mod HSEMEN {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
/// Backup RAM Clock Enable
pub mod BKPRAMEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOAEN::RW;
}
}
/// RCC AHB4 Clock Register
pub mod C1_AHB4ENR {
pub use super::AHB4ENR::ADC3EN;
pub use super::AHB4ENR::BDMAEN;
pub use super::AHB4ENR::BKPRAMEN;
pub use super::AHB4ENR::CRCEN;
pub use super::AHB4ENR::GPIOAEN;
pub use super::AHB4ENR::GPIOBEN;
pub use super::AHB4ENR::GPIOCEN;
pub use super::AHB4ENR::GPIODEN;
pub use super::AHB4ENR::GPIOEEN;
pub use super::AHB4ENR::GPIOFEN;
pub use super::AHB4ENR::GPIOGEN;
pub use super::AHB4ENR::GPIOHEN;
pub use super::AHB4ENR::GPIOIEN;
pub use super::AHB4ENR::GPIOJEN;
pub use super::AHB4ENR::GPIOKEN;
pub use super::AHB4ENR::HSEMEN;
}
/// RCC APB3 Clock Register
pub mod C1_APB3ENR {
/// LTDC peripheral clock enable
pub mod LTDCEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// WWDG1 Clock Enable
pub mod WWDG1EN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: LCD-TFT controller disabled
pub const Disabled: u32 = 0b0;
/// 0b1: LCD-TFT controller enabled
pub const Enabled: u32 = 0b1;
}
}
/// DSI Peripheral clocks enable
pub mod DSIEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::WWDG1EN::RW;
}
}
/// RCC APB3 Clock Register
pub mod APB3ENR {
pub use super::C1_APB3ENR::DSIEN;
pub use super::C1_APB3ENR::LTDCEN;
pub use super::C1_APB3ENR::WWDG1EN;
}
/// RCC APB1 Clock Register
pub mod APB1LENR {
/// TIM peripheral clock enable
pub mod TIM2EN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// TIM peripheral clock enable
pub mod TIM3EN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM4EN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM5EN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM6EN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM7EN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM12EN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM13EN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// TIM peripheral clock enable
pub mod TIM14EN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// LPTIM1 Peripheral Clocks Enable
pub mod LPTIM1EN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// SPI2 Peripheral Clocks Enable
pub mod SPI2EN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// SPI3 Peripheral Clocks Enable
pub mod SPI3EN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// SPDIFRX Peripheral Clocks Enable
pub mod SPDIFRXEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// USART2 Peripheral Clocks Enable
pub mod USART2EN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// USART3 Peripheral Clocks Enable
pub mod USART3EN {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// UART4 Peripheral Clocks Enable
pub mod UART4EN {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// UART5 Peripheral Clocks Enable
pub mod UART5EN {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// I2C1 Peripheral Clocks Enable
pub mod I2C1EN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// I2C2 Peripheral Clocks Enable
pub mod I2C2EN {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// I2C3 Peripheral Clocks Enable
pub mod I2C3EN {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// HDMI-CEC peripheral clock enable
pub mod CECEN {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// DAC1&2 peripheral clock enable
pub mod DAC12EN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// UART7 Peripheral Clocks Enable
pub mod UART7EN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// UART8 Peripheral Clocks Enable
pub mod UART8EN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
/// WWDG2 peripheral clock enable
pub mod WWDG2EN {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2EN::RW;
}
}
/// RCC APB1 Clock Register
pub mod C1_APB1LENR {
pub use super::APB1LENR::CECEN;
pub use super::APB1LENR::DAC12EN;
pub use super::APB1LENR::I2C1EN;
pub use super::APB1LENR::I2C2EN;
pub use super::APB1LENR::I2C3EN;
pub use super::APB1LENR::LPTIM1EN;
pub use super::APB1LENR::SPDIFRXEN;
pub use super::APB1LENR::SPI2EN;
pub use super::APB1LENR::SPI3EN;
pub use super::APB1LENR::TIM12EN;
pub use super::APB1LENR::TIM13EN;
pub use super::APB1LENR::TIM14EN;
pub use super::APB1LENR::TIM2EN;
pub use super::APB1LENR::TIM3EN;
pub use super::APB1LENR::TIM4EN;
pub use super::APB1LENR::TIM5EN;
pub use super::APB1LENR::TIM6EN;
pub use super::APB1LENR::TIM7EN;
pub use super::APB1LENR::UART4EN;
pub use super::APB1LENR::UART5EN;
pub use super::APB1LENR::UART7EN;
pub use super::APB1LENR::UART8EN;
pub use super::APB1LENR::USART2EN;
pub use super::APB1LENR::USART3EN;
pub use super::APB1LENR::WWDG2EN;
}
/// RCC APB1 Clock Register
pub mod APB1HENR {
/// Clock Recovery System peripheral clock enable
pub mod CRSEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// SWPMI Peripheral Clocks Enable
pub mod SWPEN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSEN::RW;
}
/// OPAMP peripheral clock enable
pub mod OPAMPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSEN::RW;
}
/// MDIOS peripheral clock enable
pub mod MDIOSEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSEN::RW;
}
/// FDCAN Peripheral Clocks Enable
pub mod FDCANEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSEN::RW;
}
}
/// RCC APB1 Clock Register
pub mod C1_APB1HENR {
pub use super::APB1HENR::CRSEN;
pub use super::APB1HENR::FDCANEN;
pub use super::APB1HENR::MDIOSEN;
pub use super::APB1HENR::OPAMPEN;
pub use super::APB1HENR::SWPEN;
}
/// RCC APB2 Clock Register
pub mod C1_APB2ENR {
/// TIM1 peripheral clock enable
pub mod TIM1EN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// TIM8 peripheral clock enable
pub mod TIM8EN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// USART1 Peripheral Clocks Enable
pub mod USART1EN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// USART6 Peripheral Clocks Enable
pub mod USART6EN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// SPI1 Peripheral Clocks Enable
pub mod SPI1EN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// SPI4 Peripheral Clocks Enable
pub mod SPI4EN {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// TIM16 peripheral clock enable
pub mod TIM16EN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// TIM15 peripheral clock enable
pub mod TIM15EN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// TIM17 peripheral clock enable
pub mod TIM17EN {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// SPI5 Peripheral Clocks Enable
pub mod SPI5EN {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// SAI1 Peripheral Clocks Enable
pub mod SAI1EN {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// SAI2 Peripheral Clocks Enable
pub mod SAI2EN {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// SAI3 Peripheral Clocks Enable
pub mod SAI3EN {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// DFSDM1 Peripheral Clocks Enable
pub mod DFSDM1EN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
/// HRTIM peripheral clock enable
pub mod HRTIMEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1EN::RW;
}
}
/// RCC APB2 Clock Register
pub mod APB2ENR {
pub use super::C1_APB2ENR::DFSDM1EN;
pub use super::C1_APB2ENR::HRTIMEN;
pub use super::C1_APB2ENR::SAI1EN;
pub use super::C1_APB2ENR::SAI2EN;
pub use super::C1_APB2ENR::SAI3EN;
pub use super::C1_APB2ENR::SPI1EN;
pub use super::C1_APB2ENR::SPI4EN;
pub use super::C1_APB2ENR::SPI5EN;
pub use super::C1_APB2ENR::TIM15EN;
pub use super::C1_APB2ENR::TIM16EN;
pub use super::C1_APB2ENR::TIM17EN;
pub use super::C1_APB2ENR::TIM1EN;
pub use super::C1_APB2ENR::TIM8EN;
pub use super::C1_APB2ENR::USART1EN;
pub use super::C1_APB2ENR::USART6EN;
}
/// RCC APB4 Clock Register
pub mod APB4ENR {
/// SYSCFG peripheral clock enable
pub mod SYSCFGEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled
pub const Enabled: u32 = 0b1;
}
}
/// LPUART1 Peripheral Clocks Enable
pub mod LPUART1EN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// SPI6 Peripheral Clocks Enable
pub mod SPI6EN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// I2C4 Peripheral Clocks Enable
pub mod I2C4EN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// LPTIM2 Peripheral Clocks Enable
pub mod LPTIM2EN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// LPTIM3 Peripheral Clocks Enable
pub mod LPTIM3EN {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// LPTIM4 Peripheral Clocks Enable
pub mod LPTIM4EN {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// LPTIM5 Peripheral Clocks Enable
pub mod LPTIM5EN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// COMP1/2 peripheral clock enable
pub mod COMP12EN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// VREF peripheral clock enable
pub mod VREFEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// RTC APB Clock Enable
pub mod RTCAPBEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
/// SAI4 Peripheral Clocks Enable
pub mod SAI4EN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGEN::RW;
}
}
/// RCC APB4 Clock Register
pub mod C1_APB4ENR {
pub use super::APB4ENR::COMP12EN;
pub use super::APB4ENR::I2C4EN;
pub use super::APB4ENR::LPTIM2EN;
pub use super::APB4ENR::LPTIM3EN;
pub use super::APB4ENR::LPTIM4EN;
pub use super::APB4ENR::LPTIM5EN;
pub use super::APB4ENR::LPUART1EN;
pub use super::APB4ENR::RTCAPBEN;
pub use super::APB4ENR::SAI4EN;
pub use super::APB4ENR::SPI6EN;
pub use super::APB4ENR::SYSCFGEN;
pub use super::APB4ENR::VREFEN;
}
/// RCC AHB3 Sleep Clock Register
pub mod C1_AHB3LPENR {
/// MDMA Clock Enable During CSleep Mode
pub mod MDMALPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// DMA2D Clock Enable During CSleep Mode
pub mod DMA2DLPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// JPGDEC Clock Enable During CSleep Mode
pub mod JPGDECLPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// Flash interface clock enable during csleep mode
pub mod FLASHPREN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// FMC Peripheral Clocks Enable During CSleep Mode
pub mod FMCLPEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// QUADSPI and QUADSPI Delay Clock Enable During CSleep Mode
pub mod QSPILPEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// SDMMC1 and SDMMC1 Delay Clock Enable During CSleep Mode
pub mod SDMMC1LPEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// D1DTCM1 Block Clock Enable During CSleep mode
pub mod D1DTCM1LPEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// D1 DTCM2 Block Clock Enable During CSleep mode
pub mod DTCM2LPEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// D1ITCM Block Clock Enable During CSleep mode
pub mod ITCMLPEN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// AXISRAM Block Clock Enable During CSleep mode
pub mod AXISRAMLPEN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
}
/// RCC AHB3 Sleep Clock Register
pub mod AHB3LPENR {
/// MDMA Clock Enable During CSleep Mode
pub mod MDMALPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// DMA2D Clock Enable During CSleep Mode
pub mod DMA2DLPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// JPGDEC Clock Enable During CSleep Mode
pub mod JPGDECLPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// FLITF Clock Enable During CSleep Mode
pub mod FLITFLPEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// FMC Peripheral Clocks Enable During CSleep Mode
pub mod FMCLPEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// QUADSPI and QUADSPI Delay Clock Enable During CSleep Mode
pub mod QSPILPEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// SDMMC1 and SDMMC1 Delay Clock Enable During CSleep Mode
pub mod SDMMC1LPEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// D1DTCM1 Block Clock Enable During CSleep mode
pub mod D1DTCM1LPEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// D1 DTCM2 Block Clock Enable During CSleep mode
pub mod DTCM2LPEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// D1ITCM Block Clock Enable During CSleep mode
pub mod ITCMLPEN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
/// AXISRAM Block Clock Enable During CSleep mode
pub mod AXISRAMLPEN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::MDMALPEN::RW;
}
}
/// RCC AHB1 Sleep Clock Register
pub mod AHB1LPENR {
/// DMA1 Clock Enable During CSleep Mode
pub mod DMA1LPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// DMA2 Clock Enable During CSleep Mode
pub mod DMA2LPEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// ADC1/2 Peripheral Clocks Enable During CSleep Mode
pub mod ADC12LPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// Ethernet MAC bus interface Clock Enable During CSleep Mode
pub mod ETH1MACLPEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// Ethernet Transmission Clock Enable During CSleep Mode
pub mod ETH1TXLPEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// Ethernet Reception Clock Enable During CSleep Mode
pub mod ETH1RXLPEN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// USB1OTG peripheral clock enable during CSleep mode
pub mod USB1OTGLPEN {
/// Offset (25 bits)
pub const offset: u32 = 25;
/// Mask (1 bit: 1 << 25)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// USB_PHY1 clock enable during CSleep mode
pub mod USB1ULPILPEN {
/// Offset (26 bits)
pub const offset: u32 = 26;
/// Mask (1 bit: 1 << 26)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// USB2OTG peripheral clock enable during CSleep mode
pub mod USB2OTGLPEN {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// USB_PHY2 clocks enable during CSleep mode
pub mod USB2ULPILPEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
/// ART Clock Enable During CSleep Mode
pub mod ARTLPEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DMA1LPEN::RW;
}
}
/// RCC AHB1 Sleep Clock Register
pub mod C1_AHB1LPENR {
pub use super::AHB1LPENR::ADC12LPEN;
pub use super::AHB1LPENR::ARTLPEN;
pub use super::AHB1LPENR::DMA1LPEN;
pub use super::AHB1LPENR::DMA2LPEN;
pub use super::AHB1LPENR::ETH1MACLPEN;
pub use super::AHB1LPENR::ETH1RXLPEN;
pub use super::AHB1LPENR::ETH1TXLPEN;
pub use super::AHB1LPENR::USB1OTGLPEN;
pub use super::AHB1LPENR::USB1ULPILPEN;
pub use super::AHB1LPENR::USB2OTGLPEN;
pub use super::AHB1LPENR::USB2ULPILPEN;
}
/// RCC AHB2 Sleep Clock Register
pub mod C1_AHB2LPENR {
/// DCMI peripheral clock enable during csleep mode
pub mod DCMILPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// CRYPT peripheral clock enable during CSleep mode
pub mod CRYPTLPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
/// HASH peripheral clock enable during CSleep mode
pub mod HASHLPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
/// SDMMC2 and SDMMC2 Delay Clock Enable During CSleep Mode
pub mod SDMMC2LPEN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
/// RNG peripheral clock enable during CSleep mode
pub mod RNGLPEN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
/// SRAM1 Clock Enable During CSleep Mode
pub mod SRAM1LPEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
/// SRAM2 Clock Enable During CSleep Mode
pub mod SRAM2LPEN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
/// SRAM3 Clock Enable During CSleep Mode
pub mod SRAM3LPEN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::DCMILPEN::RW;
}
}
/// RCC AHB2 Sleep Clock Register
pub mod AHB2LPENR {
pub use super::C1_AHB2LPENR::CRYPTLPEN;
pub use super::C1_AHB2LPENR::DCMILPEN;
pub use super::C1_AHB2LPENR::HASHLPEN;
pub use super::C1_AHB2LPENR::RNGLPEN;
pub use super::C1_AHB2LPENR::SDMMC2LPEN;
pub use super::C1_AHB2LPENR::SRAM1LPEN;
pub use super::C1_AHB2LPENR::SRAM2LPEN;
pub use super::C1_AHB2LPENR::SRAM3LPEN;
}
/// RCC AHB4 Sleep Clock Register
pub mod AHB4LPENR {
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOALPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOBLPEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOCLPEN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIODLPEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOELPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOFLPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOGLPEN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOHLPEN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOILPEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOJLPEN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// GPIO peripheral clock enable during CSleep mode
pub mod GPIOKLPEN {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// CRC peripheral clock enable during CSleep mode
pub mod CRCLPEN {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// BDMA Clock Enable During CSleep Mode
pub mod BDMALPEN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// ADC3 Peripheral Clocks Enable During CSleep Mode
pub mod ADC3LPEN {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// Backup RAM Clock Enable During CSleep Mode
pub mod BKPRAMLPEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
/// SRAM4 Clock Enable During CSleep Mode
pub mod SRAM4LPEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::GPIOALPEN::RW;
}
}
/// RCC AHB4 Sleep Clock Register
pub mod C1_AHB4LPENR {
pub use super::AHB4LPENR::ADC3LPEN;
pub use super::AHB4LPENR::BDMALPEN;
pub use super::AHB4LPENR::BKPRAMLPEN;
pub use super::AHB4LPENR::CRCLPEN;
pub use super::AHB4LPENR::GPIOALPEN;
pub use super::AHB4LPENR::GPIOBLPEN;
pub use super::AHB4LPENR::GPIOCLPEN;
pub use super::AHB4LPENR::GPIODLPEN;
pub use super::AHB4LPENR::GPIOELPEN;
pub use super::AHB4LPENR::GPIOFLPEN;
pub use super::AHB4LPENR::GPIOGLPEN;
pub use super::AHB4LPENR::GPIOHLPEN;
pub use super::AHB4LPENR::GPIOILPEN;
pub use super::AHB4LPENR::GPIOJLPEN;
pub use super::AHB4LPENR::GPIOKLPEN;
pub use super::AHB4LPENR::SRAM4LPEN;
}
/// RCC APB3 Sleep Clock Register
pub mod C1_APB3LPENR {
/// LTDC peripheral clock enable during CSleep mode
pub mod LTDCLPEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// WWDG1 Clock Enable During CSleep Mode
pub mod WWDG1LPEN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LTDCLPEN::RW;
}
/// DSI Peripheral Clock Enable During CSleep Mode
pub mod DSILPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::LTDCLPEN::RW;
}
}
/// RCC APB3 Sleep Clock Register
pub mod APB3LPENR {
pub use super::C1_APB3LPENR::DSILPEN;
pub use super::C1_APB3LPENR::LTDCLPEN;
pub use super::C1_APB3LPENR::WWDG1LPEN;
}
/// RCC APB1 Low Sleep Clock Register
pub mod APB1LLPENR {
/// TIM2 peripheral clock enable during CSleep mode
pub mod TIM2LPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// TIM3 peripheral clock enable during CSleep mode
pub mod TIM3LPEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM4 peripheral clock enable during CSleep mode
pub mod TIM4LPEN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM5 peripheral clock enable during CSleep mode
pub mod TIM5LPEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM6 peripheral clock enable during CSleep mode
pub mod TIM6LPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM7 peripheral clock enable during CSleep mode
pub mod TIM7LPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM12 peripheral clock enable during CSleep mode
pub mod TIM12LPEN {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (1 bit: 1 << 6)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM13 peripheral clock enable during CSleep mode
pub mod TIM13LPEN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// TIM14 peripheral clock enable during CSleep mode
pub mod TIM14LPEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// LPTIM1 Peripheral Clocks Enable During CSleep Mode
pub mod LPTIM1LPEN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// SPI2 Peripheral Clocks Enable During CSleep Mode
pub mod SPI2LPEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// SPI3 Peripheral Clocks Enable During CSleep Mode
pub mod SPI3LPEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// SPDIFRX Peripheral Clocks Enable During CSleep Mode
pub mod SPDIFRXLPEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// USART2 Peripheral Clocks Enable During CSleep Mode
pub mod USART2LPEN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// USART3 Peripheral Clocks Enable During CSleep Mode
pub mod USART3LPEN {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// UART4 Peripheral Clocks Enable During CSleep Mode
pub mod UART4LPEN {
/// Offset (19 bits)
pub const offset: u32 = 19;
/// Mask (1 bit: 1 << 19)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// UART5 Peripheral Clocks Enable During CSleep Mode
pub mod UART5LPEN {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// I2C1 Peripheral Clocks Enable During CSleep Mode
pub mod I2C1LPEN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// I2C2 Peripheral Clocks Enable During CSleep Mode
pub mod I2C2LPEN {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// I2C3 Peripheral Clocks Enable During CSleep Mode
pub mod I2C3LPEN {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// HDMI-CEC Peripheral Clocks Enable During CSleep Mode
pub mod CECLPEN {
/// Offset (27 bits)
pub const offset: u32 = 27;
/// Mask (1 bit: 1 << 27)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// DAC1/2 peripheral clock enable during CSleep mode
pub mod DAC12LPEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// UART7 Peripheral Clocks Enable During CSleep Mode
pub mod UART7LPEN {
/// Offset (30 bits)
pub const offset: u32 = 30;
/// Mask (1 bit: 1 << 30)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// UART8 Peripheral Clocks Enable During CSleep Mode
pub mod UART8LPEN {
/// Offset (31 bits)
pub const offset: u32 = 31;
/// Mask (1 bit: 1 << 31)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
/// WWDG2 peripheral Clocks Enable During CSleep Mode
pub mod WWDG2LPEN {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM2LPEN::RW;
}
}
/// RCC APB1 Low Sleep Clock Register
pub mod C1_APB1LLPENR {
pub use super::APB1LLPENR::CECLPEN;
pub use super::APB1LLPENR::DAC12LPEN;
pub use super::APB1LLPENR::I2C1LPEN;
pub use super::APB1LLPENR::I2C2LPEN;
pub use super::APB1LLPENR::I2C3LPEN;
pub use super::APB1LLPENR::LPTIM1LPEN;
pub use super::APB1LLPENR::SPDIFRXLPEN;
pub use super::APB1LLPENR::SPI2LPEN;
pub use super::APB1LLPENR::SPI3LPEN;
pub use super::APB1LLPENR::TIM12LPEN;
pub use super::APB1LLPENR::TIM13LPEN;
pub use super::APB1LLPENR::TIM14LPEN;
pub use super::APB1LLPENR::TIM2LPEN;
pub use super::APB1LLPENR::TIM3LPEN;
pub use super::APB1LLPENR::TIM4LPEN;
pub use super::APB1LLPENR::TIM5LPEN;
pub use super::APB1LLPENR::TIM6LPEN;
pub use super::APB1LLPENR::TIM7LPEN;
pub use super::APB1LLPENR::UART4LPEN;
pub use super::APB1LLPENR::UART5LPEN;
pub use super::APB1LLPENR::UART7LPEN;
pub use super::APB1LLPENR::UART8LPEN;
pub use super::APB1LLPENR::USART2LPEN;
pub use super::APB1LLPENR::USART3LPEN;
pub use super::APB1LLPENR::WWDG2LPEN;
}
/// RCC APB1 High Sleep Clock Register
pub mod C1_APB1HLPENR {
/// Clock Recovery System peripheral clock enable during CSleep mode
pub mod CRSLPEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// SWPMI Peripheral Clocks Enable During CSleep Mode
pub mod SWPLPEN {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSLPEN::RW;
}
/// OPAMP peripheral clock enable during CSleep mode
pub mod OPAMPLPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSLPEN::RW;
}
/// MDIOS peripheral clock enable during CSleep mode
pub mod MDIOSLPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSLPEN::RW;
}
/// FDCAN Peripheral Clocks Enable During CSleep Mode
pub mod FDCANLPEN {
/// Offset (8 bits)
pub const offset: u32 = 8;
/// Mask (1 bit: 1 << 8)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::CRSLPEN::RW;
}
}
/// RCC APB1 High Sleep Clock Register
pub mod APB1HLPENR {
pub use super::C1_APB1HLPENR::CRSLPEN;
pub use super::C1_APB1HLPENR::FDCANLPEN;
pub use super::C1_APB1HLPENR::MDIOSLPEN;
pub use super::C1_APB1HLPENR::OPAMPLPEN;
pub use super::C1_APB1HLPENR::SWPLPEN;
}
/// RCC APB2 Sleep Clock Register
pub mod APB2LPENR {
/// TIM1 peripheral clock enable during CSleep mode
pub mod TIM1LPEN {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// TIM8 peripheral clock enable during CSleep mode
pub mod TIM8LPEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// USART1 Peripheral Clocks Enable During CSleep Mode
pub mod USART1LPEN {
/// Offset (4 bits)
pub const offset: u32 = 4;
/// Mask (1 bit: 1 << 4)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// USART6 Peripheral Clocks Enable During CSleep Mode
pub mod USART6LPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// SPI1 Peripheral Clocks Enable During CSleep Mode
pub mod SPI1LPEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// SPI4 Peripheral Clocks Enable During CSleep Mode
pub mod SPI4LPEN {
/// Offset (13 bits)
pub const offset: u32 = 13;
/// Mask (1 bit: 1 << 13)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// TIM15 peripheral clock enable during CSleep mode
pub mod TIM15LPEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// TIM16 peripheral clock enable during CSleep mode
pub mod TIM16LPEN {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// TIM17 peripheral clock enable during CSleep mode
pub mod TIM17LPEN {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// SPI5 Peripheral Clocks Enable During CSleep Mode
pub mod SPI5LPEN {
/// Offset (20 bits)
pub const offset: u32 = 20;
/// Mask (1 bit: 1 << 20)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// SAI1 Peripheral Clocks Enable During CSleep Mode
pub mod SAI1LPEN {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// SAI2 Peripheral Clocks Enable During CSleep Mode
pub mod SAI2LPEN {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// SAI3 Peripheral Clocks Enable During CSleep Mode
pub mod SAI3LPEN {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (1 bit: 1 << 24)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// DFSDM1 Peripheral Clocks Enable During CSleep Mode
pub mod DFSDM1LPEN {
/// Offset (28 bits)
pub const offset: u32 = 28;
/// Mask (1 bit: 1 << 28)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
/// HRTIM peripheral clock enable during CSleep mode
pub mod HRTIMLPEN {
/// Offset (29 bits)
pub const offset: u32 = 29;
/// Mask (1 bit: 1 << 29)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::TIM1LPEN::RW;
}
}
/// RCC APB2 Sleep Clock Register
pub mod C1_APB2LPENR {
pub use super::APB2LPENR::DFSDM1LPEN;
pub use super::APB2LPENR::HRTIMLPEN;
pub use super::APB2LPENR::SAI1LPEN;
pub use super::APB2LPENR::SAI2LPEN;
pub use super::APB2LPENR::SAI3LPEN;
pub use super::APB2LPENR::SPI1LPEN;
pub use super::APB2LPENR::SPI4LPEN;
pub use super::APB2LPENR::SPI5LPEN;
pub use super::APB2LPENR::TIM15LPEN;
pub use super::APB2LPENR::TIM16LPEN;
pub use super::APB2LPENR::TIM17LPEN;
pub use super::APB2LPENR::TIM1LPEN;
pub use super::APB2LPENR::TIM8LPEN;
pub use super::APB2LPENR::USART1LPEN;
pub use super::APB2LPENR::USART6LPEN;
}
/// RCC APB4 Sleep Clock Register
pub mod C1_APB4LPENR {
/// SYSCFG peripheral clock enable during CSleep mode
pub mod SYSCFGLPEN {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values
pub mod RW {
/// 0b0: The selected clock is disabled during csleep mode
pub const Disabled: u32 = 0b0;
/// 0b1: The selected clock is enabled during csleep mode
pub const Enabled: u32 = 0b1;
}
}
/// LPUART1 Peripheral Clocks Enable During CSleep Mode
pub mod LPUART1LPEN {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// SPI6 Peripheral Clocks Enable During CSleep Mode
pub mod SPI6LPEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// I2C4 Peripheral Clocks Enable During CSleep Mode
pub mod I2C4LPEN {
/// Offset (7 bits)
pub const offset: u32 = 7;
/// Mask (1 bit: 1 << 7)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// LPTIM2 Peripheral Clocks Enable During CSleep Mode
pub mod LPTIM2LPEN {
/// Offset (9 bits)
pub const offset: u32 = 9;
/// Mask (1 bit: 1 << 9)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// LPTIM3 Peripheral Clocks Enable During CSleep Mode
pub mod LPTIM3LPEN {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// LPTIM4 Peripheral Clocks Enable During CSleep Mode
pub mod LPTIM4LPEN {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// LPTIM5 Peripheral Clocks Enable During CSleep Mode
pub mod LPTIM5LPEN {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// COMP1/2 peripheral clock enable during CSleep mode
pub mod COMP12LPEN {
/// Offset (14 bits)
pub const offset: u32 = 14;
/// Mask (1 bit: 1 << 14)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// VREF peripheral clock enable during CSleep mode
pub mod VREFLPEN {
/// Offset (15 bits)
pub const offset: u32 = 15;
/// Mask (1 bit: 1 << 15)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// RTC APB Clock Enable During CSleep Mode
pub mod RTCAPBLPEN {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
/// SAI4 Peripheral Clocks Enable During CSleep Mode
pub mod SAI4LPEN {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
pub use super::SYSCFGLPEN::RW;
}
}
/// RCC APB4 Sleep Clock Register
pub mod APB4LPENR {
pub use super::C1_APB4LPENR::COMP12LPEN;
pub use super::C1_APB4LPENR::I2C4LPEN;
pub use super::C1_APB4LPENR::LPTIM2LPEN;
pub use super::C1_APB4LPENR::LPTIM3LPEN;
pub use super::C1_APB4LPENR::LPTIM4LPEN;
pub use super::C1_APB4LPENR::LPTIM5LPEN;
pub use super::C1_APB4LPENR::LPUART1LPEN;
pub use super::C1_APB4LPENR::RTCAPBLPEN;
pub use super::C1_APB4LPENR::SAI4LPEN;
pub use super::C1_APB4LPENR::SPI6LPEN;
pub use super::C1_APB4LPENR::SYSCFGLPEN;
pub use super::C1_APB4LPENR::VREFLPEN;
}
/// RCC HSI configuration register
pub mod HSICFGR {
/// HSI clock trimming
pub mod HSITRIM {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (7 bits: 0x7f << 24)
pub const mask: u32 = 0x7f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// HSI clock calibration
pub mod HSICAL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (12 bits: 0xfff << 0)
pub const mask: u32 = 0xfff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// RCC CSI configuration register
pub mod CSICFGR {
/// CSI clock trimming
pub mod CSITRIM {
/// Offset (24 bits)
pub const offset: u32 = 24;
/// Mask (6 bits: 0x3f << 24)
pub const mask: u32 = 0x3f << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// CSI clock calibration
pub mod CSICAL {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (10 bits: 0x3ff << 0)
pub const mask: u32 = 0x3ff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
#[repr(C)]
pub struct RegisterBlock {
/// clock control register
pub CR: RWRegister<u32>,
/// RCC HSI configuration register
pub HSICFGR: RWRegister<u32>,
/// RCC Clock Recovery RC Register
pub CRRCR: RORegister<u32>,
/// RCC CSI configuration register
pub CSICFGR: RWRegister<u32>,
/// RCC Clock Configuration Register
pub CFGR: RWRegister<u32>,
_reserved1: [u32; 1],
/// RCC Domain 1 Clock Configuration Register
pub D1CFGR: RWRegister<u32>,
/// RCC Domain 2 Clock Configuration Register
pub D2CFGR: RWRegister<u32>,
/// RCC Domain 3 Clock Configuration Register
pub D3CFGR: RWRegister<u32>,
_reserved2: [u32; 1],
/// RCC PLLs Clock Source Selection Register
pub PLLCKSELR: RWRegister<u32>,
/// RCC PLLs Configuration Register
pub PLLCFGR: RWRegister<u32>,
/// RCC PLL1 Dividers Configuration Register
pub PLL1DIVR: RWRegister<u32>,
/// RCC PLL1 Fractional Divider Register
pub PLL1FRACR: RWRegister<u32>,
/// RCC PLL2 Dividers Configuration Register
pub PLL2DIVR: RWRegister<u32>,
/// RCC PLL2 Fractional Divider Register
pub PLL2FRACR: RWRegister<u32>,
/// RCC PLL3 Dividers Configuration Register
pub PLL3DIVR: RWRegister<u32>,
/// RCC PLL3 Fractional Divider Register
pub PLL3FRACR: RWRegister<u32>,
_reserved3: [u32; 1],
/// RCC Domain 1 Kernel Clock Configuration Register
pub D1CCIPR: RWRegister<u32>,
/// RCC Domain 2 Kernel Clock Configuration Register
pub D2CCIP1R: RWRegister<u32>,
/// RCC Domain 2 Kernel Clock Configuration Register
pub D2CCIP2R: RWRegister<u32>,
/// RCC Domain 3 Kernel Clock Configuration Register
pub D3CCIPR: RWRegister<u32>,
_reserved4: [u32; 1],
/// RCC Clock Source Interrupt Enable Register
pub CIER: RWRegister<u32>,
/// RCC Clock Source Interrupt Flag Register
pub CIFR: RORegister<u32>,
/// RCC Clock Source Interrupt Clear Register
pub CICR: RWRegister<u32>,
_reserved5: [u32; 1],
/// RCC Backup Domain Control Register
pub BDCR: RWRegister<u32>,
/// RCC Clock Control and Status Register
pub CSR: RWRegister<u32>,
_reserved6: [u32; 1],
/// RCC AHB3 Reset Register
pub AHB3RSTR: RWRegister<u32>,
/// RCC AHB1 Peripheral Reset Register
pub AHB1RSTR: RWRegister<u32>,
/// RCC AHB2 Peripheral Reset Register
pub AHB2RSTR: RWRegister<u32>,
/// RCC AHB4 Peripheral Reset Register
pub AHB4RSTR: RWRegister<u32>,
/// RCC APB3 Peripheral Reset Register
pub APB3RSTR: RWRegister<u32>,
/// RCC APB1 Peripheral Reset Register
pub APB1LRSTR: RWRegister<u32>,
/// RCC APB1 Peripheral Reset Register
pub APB1HRSTR: RWRegister<u32>,
/// RCC APB2 Peripheral Reset Register
pub APB2RSTR: RWRegister<u32>,
/// RCC APB4 Peripheral Reset Register
pub APB4RSTR: RWRegister<u32>,
/// RCC Global Control Register
pub GCR: RWRegister<u32>,
_reserved7: [u32; 1],
/// RCC D3 Autonomous mode Register
pub D3AMR: RWRegister<u32>,
_reserved8: [u32; 9],
/// RCC Reset Status Register
pub RSR: RWRegister<u32>,
/// RCC AHB3 Clock Register
pub AHB3ENR: RWRegister<u32>,
/// RCC AHB1 Clock Register
pub AHB1ENR: RWRegister<u32>,
/// RCC AHB2 Clock Register
pub AHB2ENR: RWRegister<u32>,
/// RCC AHB4 Clock Register
pub AHB4ENR: RWRegister<u32>,
/// RCC APB3 Clock Register
pub APB3ENR: RWRegister<u32>,
/// RCC APB1 Clock Register
pub APB1LENR: RWRegister<u32>,
/// RCC APB1 Clock Register
pub APB1HENR: RWRegister<u32>,
/// RCC APB2 Clock Register
pub APB2ENR: RWRegister<u32>,
/// RCC APB4 Clock Register
pub APB4ENR: RWRegister<u32>,
_reserved9: [u32; 1],
/// RCC AHB3 Sleep Clock Register
pub AHB3LPENR: RWRegister<u32>,
/// RCC AHB1 Sleep Clock Register
pub AHB1LPENR: RWRegister<u32>,
/// RCC AHB2 Sleep Clock Register
pub AHB2LPENR: RWRegister<u32>,
/// RCC AHB4 Sleep Clock Register
pub AHB4LPENR: RWRegister<u32>,
/// RCC APB3 Sleep Clock Register
pub APB3LPENR: RWRegister<u32>,
/// RCC APB1 Low Sleep Clock Register
pub APB1LLPENR: RWRegister<u32>,
/// RCC APB1 High Sleep Clock Register
pub APB1HLPENR: RWRegister<u32>,
/// RCC APB2 Sleep Clock Register
pub APB2LPENR: RWRegister<u32>,
/// RCC APB4 Sleep Clock Register
pub APB4LPENR: RWRegister<u32>,
_reserved10: [u32; 4],
/// RCC Reset Status Register
pub C1_RSR: RWRegister<u32>,
/// RCC AHB3 Clock Register
pub C1_AHB3ENR: RWRegister<u32>,
/// RCC AHB1 Clock Register
pub C1_AHB1ENR: RWRegister<u32>,
/// RCC AHB2 Clock Register
pub C1_AHB2ENR: RWRegister<u32>,
/// RCC AHB4 Clock Register
pub C1_AHB4ENR: RWRegister<u32>,
/// RCC APB3 Clock Register
pub C1_APB3ENR: RWRegister<u32>,
/// RCC APB1 Clock Register
pub C1_APB1LENR: RWRegister<u32>,
/// RCC APB1 Clock Register
pub C1_APB1HENR: RWRegister<u32>,
/// RCC APB2 Clock Register
pub C1_APB2ENR: RWRegister<u32>,
/// RCC APB4 Clock Register
pub C1_APB4ENR: RWRegister<u32>,
_reserved11: [u32; 1],
/// RCC AHB3 Sleep Clock Register
pub C1_AHB3LPENR: RWRegister<u32>,
/// RCC AHB1 Sleep Clock Register
pub C1_AHB1LPENR: RWRegister<u32>,
/// RCC AHB2 Sleep Clock Register
pub C1_AHB2LPENR: RWRegister<u32>,
/// RCC AHB4 Sleep Clock Register
pub C1_AHB4LPENR: RWRegister<u32>,
/// RCC APB3 Sleep Clock Register
pub C1_APB3LPENR: RWRegister<u32>,
/// RCC APB1 Low Sleep Clock Register
pub C1_APB1LLPENR: RWRegister<u32>,
/// RCC APB1 High Sleep Clock Register
pub C1_APB1HLPENR: RWRegister<u32>,
/// RCC APB2 Sleep Clock Register
pub C1_APB2LPENR: RWRegister<u32>,
/// RCC APB4 Sleep Clock Register
pub C1_APB4LPENR: RWRegister<u32>,
}
pub struct ResetValues {
pub CR: u32,
pub HSICFGR: u32,
pub CRRCR: u32,
pub CSICFGR: u32,
pub CFGR: u32,
pub D1CFGR: u32,
pub D2CFGR: u32,
pub D3CFGR: u32,
pub PLLCKSELR: u32,
pub PLLCFGR: u32,
pub PLL1DIVR: u32,
pub PLL1FRACR: u32,
pub PLL2DIVR: u32,
pub PLL2FRACR: u32,
pub PLL3DIVR: u32,
pub PLL3FRACR: u32,
pub D1CCIPR: u32,
pub D2CCIP1R: u32,
pub D2CCIP2R: u32,
pub D3CCIPR: u32,
pub CIER: u32,
pub CIFR: u32,
pub CICR: u32,
pub BDCR: u32,
pub CSR: u32,
pub AHB3RSTR: u32,
pub AHB1RSTR: u32,
pub AHB2RSTR: u32,
pub AHB4RSTR: u32,
pub APB3RSTR: u32,
pub APB1LRSTR: u32,
pub APB1HRSTR: u32,
pub APB2RSTR: u32,
pub APB4RSTR: u32,
pub GCR: u32,
pub D3AMR: u32,
pub RSR: u32,
pub AHB3ENR: u32,
pub AHB1ENR: u32,
pub AHB2ENR: u32,
pub AHB4ENR: u32,
pub APB3ENR: u32,
pub APB1LENR: u32,
pub APB1HENR: u32,
pub APB2ENR: u32,
pub APB4ENR: u32,
pub AHB3LPENR: u32,
pub AHB1LPENR: u32,
pub AHB2LPENR: u32,
pub AHB4LPENR: u32,
pub APB3LPENR: u32,
pub APB1LLPENR: u32,
pub APB1HLPENR: u32,
pub APB2LPENR: u32,
pub APB4LPENR: u32,
pub C1_RSR: u32,
pub C1_AHB3ENR: u32,
pub C1_AHB1ENR: u32,
pub C1_AHB2ENR: u32,
pub C1_AHB4ENR: u32,
pub C1_APB3ENR: u32,
pub C1_APB1LENR: u32,
pub C1_APB1HENR: u32,
pub C1_APB2ENR: u32,
pub C1_APB4ENR: u32,
pub C1_AHB3LPENR: u32,
pub C1_AHB1LPENR: u32,
pub C1_AHB2LPENR: u32,
pub C1_AHB4LPENR: u32,
pub C1_APB3LPENR: u32,
pub C1_APB1LLPENR: u32,
pub C1_APB1HLPENR: u32,
pub C1_APB2LPENR: u32,
pub C1_APB4LPENR: u32,
}
#[cfg(not(feature = "nosync"))]
pub struct Instance {
pub(crate) addr: u32,
pub(crate) _marker: PhantomData<*const RegisterBlock>,
}
#[cfg(not(feature = "nosync"))]
impl ::core::ops::Deref for Instance {
type Target = RegisterBlock;
#[inline(always)]
fn deref(&self) -> &RegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
#[cfg(feature = "rtic")]
unsafe impl Send for Instance {}
/// Access functions for the RCC peripheral instance
pub mod RCC {
use super::ResetValues;
#[cfg(not(feature = "nosync"))]
use super::Instance;
#[cfg(not(feature = "nosync"))]
const INSTANCE: Instance = Instance {
addr: 0x58024400,
_marker: ::core::marker::PhantomData,
};
/// Reset values for each field in RCC
pub const reset: ResetValues = ResetValues {
CR: 0x00000083,
CRRCR: 0x00000000,
CFGR: 0x00000000,
D1CFGR: 0x00000000,
D2CFGR: 0x00000000,
D3CFGR: 0x00000000,
PLLCKSELR: 0x02020200,
PLLCFGR: 0x01FF0000,
PLL1DIVR: 0x01010280,
PLL1FRACR: 0x00000000,
PLL2DIVR: 0x01010280,
PLL2FRACR: 0x00000000,
PLL3DIVR: 0x01010280,
PLL3FRACR: 0x00000000,
D1CCIPR: 0x00000000,
D2CCIP1R: 0x00000000,
D2CCIP2R: 0x00000000,
D3CCIPR: 0x00000000,
CIER: 0x00000000,
CIFR: 0x00000000,
CICR: 0x00000000,
BDCR: 0x00000000,
CSR: 0x00000000,
AHB3RSTR: 0x00000000,
AHB1RSTR: 0x00000000,
AHB2RSTR: 0x00000000,
AHB4RSTR: 0x00000000,
APB3RSTR: 0x00000000,
APB1LRSTR: 0x00000000,
APB1HRSTR: 0x00000000,
APB2RSTR: 0x00000000,
APB4RSTR: 0x00000000,
GCR: 0x00000000,
D3AMR: 0x00000000,
RSR: 0x00000000,
C1_RSR: 0x00000000,
C1_AHB3ENR: 0x00000000,
AHB3ENR: 0x00000000,
AHB1ENR: 0x00000000,
C1_AHB1ENR: 0x00000000,
C1_AHB2ENR: 0x00000000,
AHB2ENR: 0x00000000,
AHB4ENR: 0x00000000,
C1_AHB4ENR: 0x00000000,
C1_APB3ENR: 0x00000000,
APB3ENR: 0x00000000,
APB1LENR: 0x00000000,
C1_APB1LENR: 0x00000000,
APB1HENR: 0x00000000,
C1_APB1HENR: 0x00000000,
C1_APB2ENR: 0x00000000,
APB2ENR: 0x00000000,
APB4ENR: 0x00000000,
C1_APB4ENR: 0x00000000,
C1_AHB3LPENR: 0x00000000,
AHB3LPENR: 0x00000000,
AHB1LPENR: 0x00000000,
C1_AHB1LPENR: 0x00000000,
C1_AHB2LPENR: 0x00000000,
AHB2LPENR: 0x00000000,
AHB4LPENR: 0x00000000,
C1_AHB4LPENR: 0x00000000,
C1_APB3LPENR: 0x00000000,
APB3LPENR: 0x00000000,
APB1LLPENR: 0x00000000,
C1_APB1LLPENR: 0x00000000,
C1_APB1HLPENR: 0x00000000,
APB1HLPENR: 0x00000000,
APB2LPENR: 0x00000000,
C1_APB2LPENR: 0x00000000,
C1_APB4LPENR: 0x00000000,
APB4LPENR: 0x00000000,
HSICFGR: 0x00000000,
CSICFGR: 0x00000000,
};
#[cfg(not(feature = "nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut RCC_TAKEN: bool = false;
/// Safe access to RCC
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn take() -> Option<Instance> {
external_cortex_m::interrupt::free(|_| unsafe {
if RCC_TAKEN {
None
} else {
RCC_TAKEN = true;
Some(INSTANCE)
}
})
}
/// Release exclusive access to RCC
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn release(inst: Instance) {
external_cortex_m::interrupt::free(|_| unsafe {
if RCC_TAKEN && inst.addr == INSTANCE.addr {
RCC_TAKEN = false;
} else {
panic!("Released a peripheral which was not taken");
}
});
}
/// Unsafely steal RCC
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature = "nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {
RCC_TAKEN = true;
INSTANCE
}
}
/// Raw pointer to RCC
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const RCC: *const RegisterBlock = 0x58024400 as *const _;
| 27.952976 | 99 | 0.50885 |
11f3ff62274dfeca203e8640e4cf7337cf33ec20 | 79,926 | // =================================================================================================
// Newer IR code gen.
//
// NOTE: This is converting IR to Vec<Op> first, and then to finalized VM bytecode much like the
// original code. This is to keep things simple, and to reuse the current tools like DataSection.
//
// But this is not ideal and needs to be refactored:
// - AsmNamespace is tied to data structures from other stages like Ident and Literal.
use std::collections::HashMap;
use crate::{
asm_generation::{
build_contract_abi_switch, build_preamble, finalized_asm::FinalizedAsm,
register_sequencer::RegisterSequencer, AbstractInstructionSet, DataId, DataSection,
SwayAsmSet,
},
asm_lang::{virtual_register::*, Label, Op, VirtualImmediate12, VirtualImmediate24, VirtualOp},
error::*,
parse_tree::Literal,
BuildConfig,
};
use sway_ir::*;
use sway_types::span::Span;
use either::Either;
pub fn compile_ir_to_asm(ir: &Context, build_config: &BuildConfig) -> CompileResult<FinalizedAsm> {
let mut warnings: Vec<CompileWarning> = Vec::new();
let mut errors: Vec<CompileError> = Vec::new();
let mut reg_seqr = RegisterSequencer::new();
let mut bytecode: Vec<Op> = build_preamble(&mut reg_seqr).to_vec();
// Eventually when we get this 'correct' with no hacks we'll want to compile all the modules
// separately and then use a linker to connect them. This way we could also keep binary caches
// of libraries and link against them, rather than recompile everything each time.
assert!(ir.module_iter().count() == 1);
let module = ir.module_iter().next().unwrap();
let (data_section, mut ops, mut reg_seqr) = check!(
compile_module_to_asm(reg_seqr, ir, module),
return err(warnings, errors),
warnings,
errors
);
bytecode.append(&mut ops);
let asm = match module.get_kind(ir) {
Kind::Script => SwayAsmSet::ScriptMain {
program_section: AbstractInstructionSet { ops: bytecode },
data_section,
},
Kind::Contract => SwayAsmSet::ContractAbi {
program_section: AbstractInstructionSet { ops: bytecode },
data_section,
},
Kind::Library | Kind::Predicate => todo!("libraries and predicates coming soon!"),
};
if build_config.print_intermediate_asm {
println!("{}", asm);
}
let finalized_asm = asm
.remove_unnecessary_jumps()
.allocate_registers(&mut reg_seqr)
.optimize();
if build_config.print_finalized_asm {
println!("{}", finalized_asm);
}
check!(
crate::checks::check_invalid_opcodes(&finalized_asm),
return err(warnings, errors),
warnings,
errors
);
ok(finalized_asm, warnings, errors)
}
fn compile_module_to_asm(
reg_seqr: RegisterSequencer,
context: &Context,
module: Module,
) -> CompileResult<(DataSection, Vec<Op>, RegisterSequencer)> {
let mut builder = AsmBuilder::new(DataSection::default(), reg_seqr, context);
match module.get_kind(context) {
Kind::Script => {
// We can't do function calls yet, so we expect everything to be inlined into `main`.
let function = module
.function_iter(context)
.find(|func| &context.functions[func.0].name == "main")
.expect("Can't find main function!");
builder
.compile_function(function)
.flat_map(|_| builder.finalize())
}
Kind::Contract => {
let mut warnings = Vec::new();
let mut errors = Vec::new();
let mut selectors_and_labels: Vec<([u8; 4], Label)> = Vec::new();
// Compile only the functions which have selectors and gather the selectors and labels.
for function in module.function_iter(context) {
if function.has_selector(context) {
let selector = function.get_selector(context).unwrap();
let label = builder.add_label();
check!(
builder.compile_function(function),
return err(warnings, errors),
warnings,
errors
);
selectors_and_labels.push((selector, label));
}
}
let (mut data_section, mut funcs_bytecode, mut reg_seqr) = check!(
builder.finalize(),
return err(warnings, errors),
warnings,
errors
);
let mut bytecode_with_switch =
build_contract_abi_switch(&mut reg_seqr, &mut data_section, selectors_and_labels);
bytecode_with_switch.append(&mut funcs_bytecode);
ok(
(data_section, bytecode_with_switch, reg_seqr),
warnings,
errors,
)
}
Kind::Library | Kind::Predicate => todo!("libraries and predicates coming soon!"),
}
}
// -------------------------------------------------------------------------------------------------
macro_rules! size_bytes_in_words {
($bytes_expr: expr) => {
($bytes_expr + 7) / 8
};
}
// This is a mouthful...
macro_rules! size_bytes_round_up_to_word_alignment {
($bytes_expr: expr) => {
($bytes_expr + 7) - (($bytes_expr + 7) % 8)
};
}
struct AsmBuilder<'ir> {
// Data section is used by the rest of code gen to layout const memory.
data_section: DataSection,
// Register sequencer dishes out new registers and labels.
reg_seqr: RegisterSequencer,
// Label map is from IR block to label name.
label_map: HashMap<Block, Label>,
// Reg map, const map and var map are all tracking IR values to VM values. Var map has an
// optional (None) register until its first assignment.
reg_map: HashMap<Value, VirtualRegister>,
ptr_map: HashMap<Pointer, Storage>,
// Stack base register, copied from $SP at the start, but only if we have stack storage.
stack_base_reg: Option<VirtualRegister>,
// The layouts of each aggregate; their whole size in bytes and field offsets in words.
aggregate_layouts: HashMap<Aggregate, (u64, Vec<FieldLayout>)>,
// IR context we're compiling.
context: &'ir Context,
// Final resulting VM bytecode ops.
bytecode: Vec<Op>,
}
struct FieldLayout {
offset_in_words: u64, // Use words because LW/SW do.
size_in_bytes: u64, // Use bytes because CFEI/MCP do.
}
// NOTE: For stack storage we need to be aware:
// - sizes are in bytes; CFEI reserves in bytes.
// - offsets are in 64-bit words; LW/SW reads/writes to word offsets. XXX Wrap in a WordOffset struct.
#[derive(Clone, Debug)]
pub(super) enum Storage {
Data(DataId), // Const storage in the data section.
Register(VirtualRegister), // Storage in a register.
Stack(u64), // Storage in the runtime stack starting at an absolute word offset. Essentially a global.
}
impl<'ir> AsmBuilder<'ir> {
fn new(data_section: DataSection, reg_seqr: RegisterSequencer, context: &'ir Context) -> Self {
AsmBuilder {
data_section,
reg_seqr,
label_map: HashMap::new(),
reg_map: HashMap::new(),
ptr_map: HashMap::new(),
stack_base_reg: None,
aggregate_layouts: HashMap::new(),
context,
bytecode: Vec::new(),
}
}
// This is here temporarily for in the case when the IR can't absolutely provide a valid span,
// until we can improve ASM block parsing and verification mostly. It's where it's needed the
// most, for returning failure errors. If we move ASM verification to the parser and semantic
// analysis then ASM block conversion shouldn't/can't fail and we won't need to provide a
// guaranteed to be available span.
fn empty_span() -> Span {
let msg = "unknown source location";
Span {
span: pest::Span::new(std::sync::Arc::from(msg), 0, msg.len()).unwrap(),
path: None,
}
}
fn add_locals(&mut self, function: Function) {
// If they're immutable and have a constant initialiser then they go in the data section.
// Otherwise they go in runtime allocated space, either a register or on the stack.
//
// Stack offsets are in words to both enforce alignment and simplify use with LW/SW.
let mut stack_base = 0_u64;
for (_name, ptr) in function.locals_iter(self.context) {
let ptr_content = &self.context.pointers[ptr.0];
if !ptr_content.is_mutable && ptr_content.initializer.is_some() {
let constant = ptr_content.initializer.as_ref().unwrap();
let lit = ir_constant_to_ast_literal(constant);
let data_id = self.data_section.insert_data_value(&lit);
self.ptr_map.insert(*ptr, Storage::Data(data_id));
} else {
match ptr_content.ty {
Type::Unit | Type::Bool | Type::Uint(_) => {
let reg = self.reg_seqr.next();
self.ptr_map.insert(*ptr, Storage::Register(reg));
}
Type::B256 => {
self.ptr_map.insert(*ptr, Storage::Stack(stack_base));
stack_base += 4;
}
Type::String(count) => {
self.ptr_map.insert(*ptr, Storage::Stack(stack_base));
// XXX `count` is a CHAR count, not BYTE count. We need to count the size
// of the string before allocating. For now assuming CHAR == BYTE.
stack_base += size_bytes_in_words!(count);
}
Type::Array(aggregate) => {
// Store this aggregate at the current stack base.
self.ptr_map.insert(*ptr, Storage::Stack(stack_base));
// Reserve space by incrementing the base.
stack_base += size_bytes_in_words!(self.aggregate_size(&aggregate));
}
Type::Struct(aggregate) => {
// Store this aggregate at the current stack base.
self.ptr_map.insert(*ptr, Storage::Stack(stack_base));
// Reserve space by incrementing the base.
stack_base += size_bytes_in_words!(self.aggregate_size(&aggregate));
}
Type::Union(aggregate) => {
// Store this aggregate AND a 64bit tag at the current stack base.
self.ptr_map.insert(*ptr, Storage::Stack(stack_base));
// Reserve space by incrementing the base.
stack_base +=
size_bytes_in_words!(self.aggregate_max_field_size(&aggregate));
}
Type::ContractCaller(_) => {
self.ptr_map.insert(*ptr, Storage::Stack(stack_base));
// Reserve space for the contract address only.
stack_base += 4;
}
Type::Contract => {
unimplemented!("contract on the stack?")
}
};
}
}
// Reserve space on the stack for ALL our locals which require it.
if !self.ptr_map.is_empty() {
let base_reg = self.reg_seqr.next();
self.bytecode.push(Op::unowned_register_move_comment(
base_reg.clone(),
VirtualRegister::Constant(ConstantRegister::StackPointer),
"save locals base register",
));
// It's possible (though undesirable) to have empty local data structures only.
if stack_base != 0 {
if stack_base * 8 > crate::asm_generation::compiler_constants::TWENTY_FOUR_BITS {
todo!("Enormous stack usage for locals.");
}
let mut alloc_op = Op::unowned_stack_allocate_memory(VirtualImmediate24 {
value: (stack_base * 8) as u32,
});
alloc_op.comment = format!("allocate {} bytes for all locals", stack_base * 8);
self.bytecode.push(alloc_op);
}
self.stack_base_reg = Some(base_reg);
}
}
fn add_block_label(&mut self, block: Block) {
if &block.get_label(self.context) != "entry" {
let label = self.block_to_label(&block);
self.bytecode.push(Op::unowned_jump_label(label))
}
}
fn add_label(&mut self) -> Label {
let label = self.reg_seqr.get_label();
self.bytecode.push(Op::unowned_jump_label(label.clone()));
label
}
fn finalize(self) -> CompileResult<(DataSection, Vec<Op>, RegisterSequencer)> {
// XXX Assuming no warnings...
ok(
(self.data_section, self.bytecode, self.reg_seqr),
Vec::new(),
Vec::new(),
)
}
fn compile_function(&mut self, function: Function) -> CompileResult<()> {
// Compile instructions.
self.add_locals(function);
let mut warnings = Vec::new();
let mut errors = Vec::new();
for block in function.block_iter(self.context) {
self.add_block_label(block);
for instr_val in block.instruction_iter(self.context) {
check!(
self.compile_instruction(&block, &instr_val),
return err(warnings, errors),
warnings,
errors
);
}
}
ok((), warnings, errors)
}
fn compile_instruction(&mut self, block: &Block, instr_val: &Value) -> CompileResult<()> {
let mut warnings = Vec::new();
let mut errors = Vec::new();
if let ValueDatum::Instruction(instruction) = &self.context.values[instr_val.0].value {
match instruction {
Instruction::AsmBlock(asm, args) => {
check!(
self.compile_asm_block(instr_val, asm, args),
return err(warnings, errors),
warnings,
errors
)
}
Instruction::Branch(to_block) => self.compile_branch(block, to_block),
Instruction::Call(..) => {
errors.push(CompileError::Internal(
"Calls are not yet supported.",
instr_val
.get_span(self.context)
.unwrap_or_else(Self::empty_span),
));
return err(warnings, errors);
}
Instruction::ConditionalBranch {
cond_value,
true_block,
false_block,
} => self.compile_conditional_branch(cond_value, block, true_block, false_block),
Instruction::ExtractElement {
array,
ty,
index_val,
} => self.compile_extract_element(instr_val, array, ty, index_val),
Instruction::ExtractValue {
aggregate,
ty,
indices,
} => self.compile_extract_value(instr_val, aggregate, ty, indices),
Instruction::GetPointer(ptr) => self.compile_get_pointer(instr_val, ptr),
Instruction::InsertElement {
array,
ty,
value,
index_val,
} => self.compile_insert_element(instr_val, array, ty, value, index_val),
Instruction::InsertValue {
aggregate,
ty,
value,
indices,
} => self.compile_insert_value(instr_val, aggregate, ty, value, indices),
Instruction::Load(ptr) => self.compile_load(instr_val, ptr),
Instruction::Nop => (),
Instruction::Phi(_) => (), // Managing the phi value is done in br and cbr compilation.
Instruction::Ret(ret_val, ty) => self.compile_ret(instr_val, ret_val, ty),
Instruction::Store { ptr, stored_val } => {
self.compile_store(instr_val, ptr, stored_val)
}
}
} else {
errors.push(CompileError::Internal(
"Value not an instruction.",
instr_val
.get_span(self.context)
.unwrap_or_else(Self::empty_span),
));
}
ok((), warnings, errors)
}
// OK, I began by trying to translate the IR ASM block data structures back into AST data
// structures which I could feed to the code in asm_generation/expression/mod.rs where it
// compiles the inline ASM. But it's more work to do that than to just re-implement that
// algorithm with the IR data here.
fn compile_asm_block(
&mut self,
instr_val: &Value,
asm: &AsmBlock,
asm_args: &[AsmArg],
) -> CompileResult<()> {
let mut warnings: Vec<CompileWarning> = Vec::new();
let mut errors: Vec<CompileError> = Vec::new();
let mut inline_reg_map = HashMap::new();
let mut inline_ops = Vec::new();
for AsmArg { name, initializer } in asm_args {
assert_or_warn!(
ConstantRegister::parse_register_name(name.as_str()).is_none(),
warnings,
name.span().clone(),
Warning::ShadowingReservedRegister {
reg_name: name.clone()
}
);
let arg_reg = initializer
.map(|init_val| self.value_to_register(&init_val))
.unwrap_or_else(|| self.reg_seqr.next());
inline_reg_map.insert(name.as_str(), arg_reg);
}
let realize_register = |reg_name: &str| {
inline_reg_map.get(reg_name).cloned().or_else(|| {
ConstantRegister::parse_register_name(reg_name).map(&VirtualRegister::Constant)
})
};
// For each opcode in the asm expression, attempt to parse it into an opcode and
// replace references to the above registers with the newly allocated ones.
let asm_block = &self.context.asm_blocks[asm.0];
for op in &asm_block.body {
let replaced_registers = op
.args
.iter()
.map(|reg_name| -> Result<_, CompileError> {
realize_register(reg_name.as_str()).ok_or_else(|| {
CompileError::UnknownRegister {
span: reg_name.span().clone(),
initialized_registers: inline_reg_map
.iter()
.map(|(name, _)| *name)
.collect::<Vec<_>>()
.join("\n"),
}
})
})
.filter_map(|res| match res {
Err(e) => {
errors.push(e);
None
}
Ok(o) => Some(o),
})
.collect::<Vec<VirtualRegister>>();
// Parse the actual op and registers.
let op_span = match op.span_md_idx {
None => {
// XXX This sucks. We have two options: not needing a span to parse the opcode
// (which is used for the error) or force a span from the IR somehow, maybe by
// using a .ir file? OK, we have a third and best option: do the parsing of
// asm blocks in the parser itself, so we can verify them all the way back then
// and not have to worry about them being malformed all the way down here in
// codegen.
Self::empty_span()
}
Some(span_md_idx) => match span_md_idx.to_span(self.context) {
Ok(span) => span,
Err(ir_error) => {
errors.push(CompileError::InternalOwned(
ir_error.to_string(),
instr_val
.get_span(self.context)
.unwrap_or_else(Self::empty_span),
));
return err(warnings, errors);
}
},
};
let opcode = check!(
Op::parse_opcode(
&op.name,
&replaced_registers,
&op.immediate,
op_span.clone(),
),
return err(warnings, errors),
warnings,
errors
);
inline_ops.push(Op {
opcode: either::Either::Left(opcode),
comment: "asm block".into(),
owning_span: Some(op_span),
});
}
// Now, load the designated asm return register into the desired return register, but only
// if it was named.
if let Some(ret_reg_name) = &asm_block.return_name {
// Lookup and replace the return register.
let ret_reg = match realize_register(ret_reg_name.as_str()) {
Some(reg) => reg,
None => {
errors.push(CompileError::UnknownRegister {
initialized_registers: inline_reg_map
.iter()
.map(|(name, _)| name.to_string())
.collect::<Vec<_>>()
.join("\n"),
span: ret_reg_name.span().clone(),
});
return err(warnings, errors);
}
};
let instr_reg = self.reg_seqr.next();
inline_ops.push(Op {
opcode: Either::Left(VirtualOp::MOVE(instr_reg.clone(), ret_reg)),
comment: "return value from inline asm".into(),
owning_span: instr_val.get_span(self.context),
});
self.reg_map.insert(*instr_val, instr_reg);
}
self.bytecode.append(&mut inline_ops);
ok((), warnings, errors)
}
fn compile_branch(&mut self, from_block: &Block, to_block: &Block) {
self.compile_branch_to_phi_value(from_block, to_block);
let label = self.block_to_label(to_block);
self.bytecode.push(Op::jump_to_label(label));
}
fn compile_conditional_branch(
&mut self,
cond_value: &Value,
from_block: &Block,
true_block: &Block,
false_block: &Block,
) {
self.compile_branch_to_phi_value(from_block, true_block);
self.compile_branch_to_phi_value(from_block, false_block);
let cond_reg = self.value_to_register(cond_value);
let false_label = self.block_to_label(false_block);
self.bytecode.push(Op::jump_if_not_equal(
cond_reg,
VirtualRegister::Constant(ConstantRegister::One),
false_label,
));
let true_label = self.block_to_label(true_block);
self.bytecode.push(Op::jump_to_label(true_label));
}
fn compile_branch_to_phi_value(&mut self, from_block: &Block, to_block: &Block) {
if let Some(local_val) = to_block.get_phi_val_coming_from(self.context, from_block) {
let local_reg = self.value_to_register(&local_val);
let phi_reg = self.value_to_register(&to_block.get_phi(self.context));
self.bytecode
.push(Op::unowned_register_move(phi_reg, local_reg));
}
}
fn compile_extract_element(
&mut self,
instr_val: &Value,
array: &Value,
ty: &Aggregate,
index_val: &Value,
) {
// Base register should pointer to some stack allocated memory.
let base_reg = self.value_to_register(array);
// Index value is the array element index, not byte nor word offset.
let index_reg = self.value_to_register(index_val);
// We could put the OOB check here, though I'm now thinking it would be too wasteful.
// See compile_bounds_assertion() in expression/array.rs (or look in Git history).
let instr_reg = self.reg_seqr.next();
let elem_size = self.ir_type_size_in_bytes(&ty.get_elem_type(self.context).unwrap());
if elem_size <= 8 {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MULI(
index_reg.clone(),
index_reg.clone(),
VirtualImmediate12 { value: 8 },
)),
comment: "extract_element relative offset".into(),
owning_span: instr_val.get_span(self.context),
});
let elem_offs_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(elem_offs_reg.clone(), base_reg, index_reg)),
comment: "extract_element absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LW(
instr_reg.clone(),
elem_offs_reg,
VirtualImmediate12 { value: 0 },
)),
comment: "extract_element".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
// Value too big for a register, so we return the memory offset.
if elem_size > crate::asm_generation::compiler_constants::TWELVE_BITS {
let size_data_id = self
.data_section
.insert_data_value(&Literal::U64(elem_size));
let size_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)),
owning_span: instr_val.get_span(self.context),
comment: "loading element size for relative offset".into(),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MUL(instr_reg.clone(), index_reg, size_reg)),
comment: "extract_element relative offset".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MULI(
instr_reg.clone(),
index_reg,
VirtualImmediate12 {
value: elem_size as u16,
},
)),
comment: "extract_element relative offset".into(),
owning_span: instr_val.get_span(self.context),
});
}
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
instr_reg.clone(),
base_reg,
instr_reg.clone(),
)),
comment: "extract_element absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
}
self.reg_map.insert(*instr_val, instr_reg);
}
fn compile_extract_value(
&mut self,
instr_val: &Value,
aggregate: &Value,
ty: &Aggregate,
indices: &[u64],
) {
// Base register should pointer to some stack allocated memory.
let base_reg = self.value_to_register(aggregate);
let (extract_offset, value_size) = self.aggregate_idcs_to_field_layout(ty, indices);
let instr_reg = self.reg_seqr.next();
if value_size <= 8 {
if extract_offset > crate::asm_generation::compiler_constants::TWELVE_BITS {
let offset_reg = self.reg_seqr.next();
self.number_to_reg(
extract_offset,
&offset_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
offset_reg.clone(),
base_reg.clone(),
base_reg,
)),
comment: "add array base to offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LW(
instr_reg.clone(),
offset_reg,
VirtualImmediate12 { value: 0 },
)),
comment: format!(
"extract_value @ {}",
indices
.iter()
.map(|idx| format!("{}", idx))
.collect::<Vec<String>>()
.join(",")
),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LW(
instr_reg.clone(),
base_reg,
VirtualImmediate12 {
value: extract_offset as u16,
},
)),
comment: format!(
"extract_value @ {}",
indices
.iter()
.map(|idx| format!("{}", idx))
.collect::<Vec<String>>()
.join(",")
),
owning_span: instr_val.get_span(self.context),
});
}
} else {
// Value too big for a register, so we return the memory offset.
if extract_offset * 8 > crate::asm_generation::compiler_constants::TWELVE_BITS {
let offset_reg = self.reg_seqr.next();
self.number_to_reg(
extract_offset * 8,
&offset_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADD(
instr_reg.clone(),
base_reg,
offset_reg,
)),
comment: "extract address".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADDI(
instr_reg.clone(),
base_reg,
VirtualImmediate12 {
value: (extract_offset * 8) as u16,
},
)),
comment: "extract address".into(),
owning_span: instr_val.get_span(self.context),
});
}
}
self.reg_map.insert(*instr_val, instr_reg);
}
fn compile_get_pointer(&mut self, instr_val: &Value, ptr: &Pointer) {
// `get_ptr` is like a `load` except the value isn't dereferenced.
match self.ptr_map.get(ptr) {
None => unimplemented!("BUG? Uninitialised pointer."),
Some(storage) => match storage.clone() {
Storage::Data(_data_id) => {
// Not sure if we'll ever need this.
unimplemented!("TODO get_ptr() into the data section.");
}
Storage::Register(var_reg) => {
self.reg_map.insert(*instr_val, var_reg);
}
Storage::Stack(word_offs) => {
let word_offs = word_offs * 8;
let instr_reg = self.reg_seqr.next();
if word_offs > crate::asm_generation::compiler_constants::TWELVE_BITS {
self.number_to_reg(word_offs, &instr_reg, instr_val.get_span(self.context));
} else {
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADDI(
instr_reg.clone(),
self.stack_base_reg.as_ref().unwrap().clone(),
VirtualImmediate12 {
value: (word_offs) as u16,
},
)),
comment: "get_ptr".into(),
owning_span: instr_val.get_span(self.context),
});
}
self.reg_map.insert(*instr_val, instr_reg);
}
},
}
}
fn compile_insert_element(
&mut self,
instr_val: &Value,
array: &Value,
ty: &Aggregate,
value: &Value,
index_val: &Value,
) {
// Base register should point to some stack allocated memory.
let base_reg = self.value_to_register(array);
let insert_reg = self.value_to_register(value);
// Index value is the array element index, not byte nor word offset.
let index_reg = self.value_to_register(index_val);
let elem_size = self.ir_type_size_in_bytes(&ty.get_elem_type(self.context).unwrap());
if elem_size <= 8 {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MULI(
index_reg.clone(),
index_reg.clone(),
VirtualImmediate12 { value: 8 },
)),
comment: "insert_element relative offset".into(),
owning_span: instr_val.get_span(self.context),
});
let elem_offs_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
elem_offs_reg.clone(),
base_reg.clone(),
index_reg,
)),
comment: "insert_element absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
elem_offs_reg,
insert_reg,
VirtualImmediate12 { value: 0 },
)),
comment: "insert_element".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
// Element size is larger than 8; we switch to bytewise offsets and sizes and use MCP.
if elem_size > crate::asm_generation::compiler_constants::TWELVE_BITS {
todo!("array element size bigger than 4k")
} else {
let elem_index_offs_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MULI(
elem_index_offs_reg.clone(),
index_reg,
VirtualImmediate12 {
value: elem_size as u16,
},
)),
comment: "insert_element relative offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
elem_index_offs_reg.clone(),
base_reg.clone(),
elem_index_offs_reg.clone(),
)),
comment: "insert_element absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MCPI(
elem_index_offs_reg,
insert_reg,
VirtualImmediate12 {
value: elem_size as u16,
},
)),
comment: "insert_element store value".into(),
owning_span: instr_val.get_span(self.context),
});
}
}
// We set the 'instruction' register to the base register, so that cascading inserts will
// work.
self.reg_map.insert(*instr_val, base_reg);
}
fn compile_insert_value(
&mut self,
instr_val: &Value,
aggregate: &Value,
ty: &Aggregate,
value: &Value,
indices: &[u64],
) {
// Base register should point to some stack allocated memory.
let base_reg = self.value_to_register(aggregate);
let insert_reg = self.value_to_register(value);
let (insert_offs, value_size) = self.aggregate_idcs_to_field_layout(ty, indices);
let indices_str = indices
.iter()
.map(|idx| format!("{}", idx))
.collect::<Vec<String>>()
.join(",");
if value_size <= 8 {
if insert_offs > crate::asm_generation::compiler_constants::TWELVE_BITS {
let insert_offs_reg = self.reg_seqr.next();
self.number_to_reg(
insert_offs,
&insert_offs_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
base_reg.clone(),
base_reg.clone(),
insert_offs_reg,
)),
comment: "insert_value absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
base_reg.clone(),
insert_reg,
VirtualImmediate12 { value: 0 },
)),
comment: format!("insert_value @ {}", indices_str),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
base_reg.clone(),
insert_reg,
VirtualImmediate12 {
value: insert_offs as u16,
},
)),
comment: format!("insert_value @ {}", indices_str),
owning_span: instr_val.get_span(self.context),
});
}
} else {
let offs_reg = self.reg_seqr.next();
if insert_offs * 8 > crate::asm_generation::compiler_constants::TWELVE_BITS {
self.number_to_reg(insert_offs * 8, &offs_reg, instr_val.get_span(self.context));
} else {
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADDI(
offs_reg.clone(),
base_reg.clone(),
VirtualImmediate12 {
value: (insert_offs * 8) as u16,
},
)),
comment: format!("get struct field(s) {} offset", indices_str),
owning_span: instr_val.get_span(self.context),
});
}
if value_size > crate::asm_generation::compiler_constants::TWELVE_BITS {
let size_reg = self.reg_seqr.next();
self.number_to_reg(value_size, &size_reg, instr_val.get_span(self.context));
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MCP(offs_reg, insert_reg, size_reg)),
comment: "store struct field value".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MCPI(
offs_reg,
insert_reg,
VirtualImmediate12 {
value: value_size as u16,
},
)),
comment: "store struct field value".into(),
owning_span: instr_val.get_span(self.context),
});
}
}
// We set the 'instruction' register to the base register, so that cascading inserts will
// work.
self.reg_map.insert(*instr_val, base_reg);
}
fn compile_load(&mut self, instr_val: &Value, ptr: &Pointer) {
let load_size_in_words =
size_bytes_in_words!(self.ir_type_size_in_bytes(ptr.get_type(self.context)));
let instr_reg = self.reg_seqr.next();
match self.ptr_map.get(ptr) {
None => unimplemented!("BUG? Uninitialised pointer."),
Some(storage) => match storage.clone() {
Storage::Data(data_id) => {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LWDataId(instr_reg.clone(), data_id)),
comment: "load constant".into(),
owning_span: instr_val.get_span(self.context),
});
}
Storage::Register(var_reg) => {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MOVE(instr_reg.clone(), var_reg)),
comment: String::new(),
owning_span: instr_val.get_span(self.context),
});
}
Storage::Stack(word_offs) => {
let base_reg = self.stack_base_reg.as_ref().unwrap().clone();
// XXX Need to check for zero sized types?
if load_size_in_words == 1 {
// Value can fit in a register, so we load the value.
if word_offs > crate::asm_generation::compiler_constants::TWELVE_BITS {
let offs_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
base_reg.clone(),
base_reg,
offs_reg.clone(),
)),
comment: "insert_value absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LW(
instr_reg.clone(),
offs_reg,
VirtualImmediate12 { value: 0 },
)),
comment: "load value".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LW(
instr_reg.clone(),
base_reg,
VirtualImmediate12 {
value: word_offs as u16,
},
)),
comment: "load value".into(),
owning_span: instr_val.get_span(self.context),
});
}
} else {
// Value too big for a register, so we return the memory offset. This is
// what LW to the data section does, via LWDataId.
let word_offs = word_offs * 8;
if word_offs > crate::asm_generation::compiler_constants::TWELVE_BITS {
let offs_reg = self.reg_seqr.next();
self.number_to_reg(
word_offs,
&offs_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADD(
instr_reg.clone(),
base_reg,
offs_reg,
)),
comment: "load address".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADDI(
instr_reg.clone(),
base_reg,
VirtualImmediate12 {
value: word_offs as u16,
},
)),
comment: "load address".into(),
owning_span: instr_val.get_span(self.context),
});
}
}
}
},
}
self.reg_map.insert(*instr_val, instr_reg);
}
fn compile_ret(&mut self, instr_val: &Value, ret_val: &Value, ret_type: &Type) {
if ret_type == &Type::Unit {
// Unit returns should always be zero, although because they can be omitted from
// functions, the register is sometimes uninitialized. Manually return zero in this
// case.
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::RET(VirtualRegister::Constant(
ConstantRegister::Zero,
))),
owning_span: instr_val.get_span(self.context),
comment: "returning unit as zero".into(),
});
} else {
let ret_reg = self.value_to_register(ret_val);
let size_in_bytes = self.ir_type_size_in_bytes(ret_type);
if size_in_bytes <= 8 {
self.bytecode.push(Op {
owning_span: instr_val.get_span(self.context),
opcode: Either::Left(VirtualOp::RET(ret_reg)),
comment: "".into(),
});
} else {
// If the type is larger than one word, then we use RETD to return data. First put
// the size into the data section, then add a LW to get it, then add a RETD which
// uses it.
let size_reg = self.reg_seqr.next();
let size_data_id = self
.data_section
.insert_data_value(&Literal::U64(size_in_bytes));
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)),
owning_span: instr_val.get_span(self.context),
comment: "loading size for RETD".into(),
});
self.bytecode.push(Op {
owning_span: instr_val.get_span(self.context),
opcode: Either::Left(VirtualOp::RETD(ret_reg, size_reg)),
comment: "".into(),
});
}
}
}
fn compile_store(&mut self, instr_val: &Value, ptr: &Pointer, stored_val: &Value) {
let stored_reg = self.value_to_register(stored_val);
let is_struct_ptr = ptr.is_struct_ptr(self.context);
match self.ptr_map.get(ptr) {
None => unreachable!("Bug! Trying to store to an unknown pointer."),
Some(storage) => match storage {
Storage::Data(_) => unreachable!("BUG! Trying to store to the data section."),
Storage::Register(reg) => {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MOVE(reg.clone(), stored_reg)),
comment: String::new(),
owning_span: instr_val.get_span(self.context),
});
}
Storage::Stack(word_offs) => {
let word_offs = *word_offs;
let store_size_in_words = size_bytes_in_words!(
self.ir_type_size_in_bytes(ptr.get_type(self.context))
);
match store_size_in_words {
// We can have empty sized types which we can ignore.
0 => (),
1 => {
let base_reg = self.stack_base_reg.as_ref().unwrap().clone();
// A single word can be stored with SW.
let stored_reg = if !is_struct_ptr {
// stored_reg is a value.
stored_reg
} else {
// stored_reg is a pointer, even though size is 1. We need to load it.
let tmp_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::LW(
tmp_reg.clone(),
stored_reg,
VirtualImmediate12 { value: 0 },
)),
comment: "load for store".into(),
owning_span: instr_val.get_span(self.context),
});
tmp_reg
};
if word_offs > crate::asm_generation::compiler_constants::TWELVE_BITS {
let offs_reg = self.reg_seqr.next();
self.number_to_reg(
word_offs,
&offs_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::ADD(
base_reg.clone(),
base_reg,
offs_reg.clone(),
)),
comment: "store absolute offset".into(),
owning_span: instr_val.get_span(self.context),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
offs_reg,
stored_reg,
VirtualImmediate12 { value: 0 },
)),
comment: "store value".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
base_reg,
stored_reg,
VirtualImmediate12 {
value: word_offs as u16,
},
)),
comment: "store value".into(),
owning_span: instr_val.get_span(self.context),
});
}
}
_ => {
let base_reg = self.stack_base_reg.as_ref().unwrap().clone();
// Bigger than 1 word needs a MCPI. XXX Or MCP if it's huge.
let dest_offs_reg = self.reg_seqr.next();
if word_offs * 8
> crate::asm_generation::compiler_constants::TWELVE_BITS
{
self.number_to_reg(
word_offs * 8,
&dest_offs_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADD(
dest_offs_reg.clone(),
base_reg,
dest_offs_reg.clone(),
)),
comment: "get store offset".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADDI(
dest_offs_reg.clone(),
base_reg,
VirtualImmediate12 {
value: (word_offs * 8) as u16,
},
)),
comment: "get store offset".into(),
owning_span: instr_val.get_span(self.context),
});
}
if store_size_in_words * 8
> crate::asm_generation::compiler_constants::TWELVE_BITS
{
let size_reg = self.reg_seqr.next();
self.number_to_reg(
store_size_in_words * 8,
&size_reg,
instr_val.get_span(self.context),
);
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MCP(
dest_offs_reg,
stored_reg,
size_reg,
)),
comment: "store value".into(),
owning_span: instr_val.get_span(self.context),
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MCPI(
dest_offs_reg,
stored_reg,
VirtualImmediate12 {
value: (store_size_in_words * 8) as u16,
},
)),
comment: "store value".into(),
owning_span: instr_val.get_span(self.context),
});
}
}
}
}
},
};
}
fn value_to_register(&mut self, value: &Value) -> VirtualRegister {
match self.reg_map.get(value) {
Some(reg) => reg.clone(),
None => {
match &self.context.values[value.0].value {
// Handle constants.
ValueDatum::Constant(constant) => {
match &constant.value {
ConstantValue::Struct(_) | ConstantValue::Array(_) => {
// A constant struct or array. We still allocate space for it on
// the stack, but create the field or element initialisers
// recursively.
// Get the total size.
let total_size = size_bytes_round_up_to_word_alignment!(
self.constant_size_in_bytes(constant)
);
if total_size
> crate::asm_generation::compiler_constants::TWENTY_FOUR_BITS
{
todo!("Enormous stack usage for locals.");
}
let start_reg = self.reg_seqr.next();
// We can have zero sized structs and maybe arrays?
if total_size > 0 {
// Save the stack pointer.
self.bytecode.push(Op::unowned_register_move_comment(
start_reg.clone(),
VirtualRegister::Constant(ConstantRegister::StackPointer),
"save register for temporary stack value",
));
let mut alloc_op =
Op::unowned_stack_allocate_memory(VirtualImmediate24 {
value: total_size as u32,
});
alloc_op.comment = format!(
"allocate {} bytes for temporary {}",
total_size,
if matches!(&constant.value, ConstantValue::Struct(_)) {
"struct"
} else {
"array"
},
);
self.bytecode.push(alloc_op);
// Fill in the fields.
self.initialise_constant_memory(
constant,
&start_reg,
0,
value.get_span(self.context),
);
}
// Return the start ptr.
start_reg
}
ConstantValue::Undef
| ConstantValue::Unit
| ConstantValue::Bool(_)
| ConstantValue::Uint(_)
| ConstantValue::B256(_)
| ConstantValue::String(_) => {
// Get the constant into the namespace.
let lit = ir_constant_to_ast_literal(constant);
let data_id = self.data_section.insert_data_value(&lit);
// Allocate a register for it, and a load instruction.
let reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::LWDataId(
reg.clone(),
data_id,
)),
comment: "literal instantiation".into(),
owning_span: value.get_span(self.context),
});
// Insert the value into the map.
//self.reg_map.insert(*value, reg.clone());
//
// Actually, no, don't. It's possible for constant values to be
// reused in the IR, especially with transforms which copy blocks
// around, like inlining. The `LW`/`LWDataId` instruction above
// initialises that constant value but it may be in a conditional
// block and not actually get evaluated for every possible
// execution. So using the register later on by pulling it from
// `self.reg_map` will have a potentially uninitialised register.
//
// By not putting it in the map we recreate the `LW` each time it's
// used, which also isn't ideal. A better solution is to put this
// initialisation into the IR itself, and allow for analysis there
// to determine when it may be initialised and/or reused.
// Return register.
reg
}
}
}
_otherwise => {
// Just make a new register for this value.
let reg = self.reg_seqr.next();
self.reg_map.insert(*value, reg.clone());
reg
}
}
}
}
}
fn number_to_reg(&mut self, offset: u64, offset_reg: &VirtualRegister, span: Option<Span>) {
if offset > crate::asm_generation::compiler_constants::TWENTY_FOUR_BITS {
todo!("Absolutely giant arrays.");
}
// Use bitwise ORs and SHIFTs to crate a 24 bit value in a register.
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ORI(
offset_reg.clone(),
VirtualRegister::Constant(ConstantRegister::Zero),
VirtualImmediate12 {
value: (offset >> 12) as u16,
},
)),
comment: "get extract offset high bits".into(),
owning_span: span.clone(),
});
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::SLLI(
offset_reg.clone(),
offset_reg.clone(),
VirtualImmediate12 { value: 12 },
)),
comment: "shift extract offset high bits".into(),
owning_span: span.clone(),
});
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ORI(
offset_reg.clone(),
offset_reg.clone(),
VirtualImmediate12 {
value: (offset & 0xfff) as u16,
},
)),
comment: "get extract offset low bits".into(),
owning_span: span,
});
}
fn constant_size_in_bytes(&mut self, constant: &Constant) -> u64 {
match &constant.value {
ConstantValue::Undef => self.ir_type_size_in_bytes(&constant.ty),
ConstantValue::Unit => 8,
ConstantValue::Bool(_) => 8,
ConstantValue::Uint(_) => 8,
ConstantValue::B256(_) => 32,
ConstantValue::String(s) => s.len() as u64, // String::len() returns the byte size, not char count.
ConstantValue::Array(elems) => {
if elems.is_empty() {
0
} else {
self.constant_size_in_bytes(&elems[0]) * elems.len() as u64
}
}
ConstantValue::Struct(fields) => fields
.iter()
.fold(0, |acc, field| acc + self.constant_size_in_bytes(field)),
}
}
fn initialise_constant_memory(
&mut self,
constant: &Constant,
start_reg: &VirtualRegister,
offs_in_words: u64,
span: Option<Span>,
) -> u64 {
match &constant.value {
ConstantValue::Undef => {
// We don't need to actually create an initialiser, but we do need to return the
// field size in words.
size_bytes_in_words!(self.ir_type_size_in_bytes(&constant.ty))
}
ConstantValue::Unit
| ConstantValue::Bool(_)
| ConstantValue::Uint(_)
| ConstantValue::B256(_) => {
// Get the constant into the namespace.
let lit = ir_constant_to_ast_literal(constant);
let data_id = self.data_section.insert_data_value(&lit);
// Load the initialiser value.
let init_reg = self.reg_seqr.next();
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::LWDataId(init_reg.clone(), data_id)),
comment: "literal instantiation for aggregate field".into(),
owning_span: span.clone(),
});
// Write the initialiser to memory. Most Literals are 1 word, B256 is 32 bytes and
// needs to use a MCP instruction.
if matches!(lit, Literal::B256(_)) {
let offs_reg = self.reg_seqr.next();
if offs_in_words * 8 > crate::asm_generation::compiler_constants::TWELVE_BITS {
self.number_to_reg(offs_in_words * 8, &offs_reg, span.clone());
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADD(
offs_reg.clone(),
start_reg.clone(),
offs_reg.clone(),
)),
comment: "calculate byte offset to aggregate field".into(),
owning_span: span.clone(),
});
} else {
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADDI(
offs_reg.clone(),
start_reg.clone(),
VirtualImmediate12 {
value: (offs_in_words * 8) as u16,
},
)),
comment: "calculate byte offset to aggregate field".into(),
owning_span: span.clone(),
});
}
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::MCPI(
offs_reg,
init_reg,
VirtualImmediate12 { value: 32 },
)),
comment: "initialise aggregate field".into(),
owning_span: span,
});
4 // 32 bytes is 4 words.
} else {
if offs_in_words > crate::asm_generation::compiler_constants::TWELVE_BITS {
let offs_reg = self.reg_seqr.next();
self.number_to_reg(offs_in_words, &offs_reg, span.clone());
self.bytecode.push(Op {
opcode: either::Either::Left(VirtualOp::ADD(
start_reg.clone(),
start_reg.clone(),
offs_reg.clone(),
)),
comment: "calculate byte offset to aggregate field".into(),
owning_span: span.clone(),
});
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
start_reg.clone(),
init_reg,
VirtualImmediate12 { value: 0 },
)),
comment: "initialise aggregate field".into(),
owning_span: span,
});
} else {
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::SW(
start_reg.clone(),
init_reg,
VirtualImmediate12 {
value: offs_in_words as u16,
},
)),
comment: "initialise aggregate field".into(),
owning_span: span,
});
}
1
}
}
ConstantValue::String(_) => {
// These are still not properly implemented until we refactor for spans! There's
// an issue on GitHub for it.
self.bytecode.push(Op {
opcode: Either::Left(VirtualOp::NOOP),
comment: "strings aren't implemented!".into(),
owning_span: span,
});
0
}
ConstantValue::Array(items) | ConstantValue::Struct(items) => {
let mut cur_offs = offs_in_words;
for item in items {
let item_size =
self.initialise_constant_memory(item, start_reg, cur_offs, span.clone());
cur_offs += item_size;
}
cur_offs
}
}
}
fn block_to_label(&mut self, block: &Block) -> Label {
match self.label_map.get(block) {
Some(label) => label.clone(),
None => {
let label = self.reg_seqr.get_label();
self.label_map.insert(*block, label.clone());
label
}
}
}
// Aggregate size in bytes.
fn aggregate_size(&mut self, aggregate: &Aggregate) -> u64 {
self.analyze_aggregate(aggregate);
self.aggregate_layouts.get(aggregate).unwrap().0
}
// Size of largest aggregate field in bytes.
fn aggregate_max_field_size(&mut self, aggregate: &Aggregate) -> u64 {
self.analyze_aggregate(aggregate);
self.aggregate_layouts
.get(aggregate)
.unwrap()
.1
.iter()
.map(|layout| layout.size_in_bytes)
.max()
.unwrap_or(0)
}
// Aggregate (nested) field offset in words and size in bytes.
fn aggregate_idcs_to_field_layout(
&mut self,
aggregate: &Aggregate,
idcs: &[u64],
) -> (u64, u64) {
self.analyze_aggregate(aggregate);
idcs.iter()
.fold(
((0, 0), Type::Struct(*aggregate)),
|((offs, _), ty), idx| match ty {
Type::Struct(aggregate) => {
let agg_content = &self.context.aggregates[aggregate.0];
let field_type = agg_content.field_types()[*idx as usize];
let field_layout =
&self.aggregate_layouts.get(&aggregate).unwrap().1[*idx as usize];
(
(
offs + field_layout.offset_in_words,
field_layout.size_in_bytes,
),
field_type,
)
}
_otherwise => panic!("Attempt to access field in non-aggregate."),
},
)
.0
}
fn analyze_aggregate(&mut self, aggregate: &Aggregate) {
if self.aggregate_layouts.contains_key(aggregate) {
return;
}
match &self.context.aggregates[aggregate.0] {
AggregateContent::FieldTypes(field_types) => {
let (total_in_words, offsets) =
field_types
.iter()
.fold((0, Vec::new()), |(cur_offset, mut layouts), ty| {
let field_size_in_bytes = self.ir_type_size_in_bytes(ty);
layouts.push(FieldLayout {
offset_in_words: cur_offset,
size_in_bytes: field_size_in_bytes,
});
(
cur_offset + size_bytes_in_words!(field_size_in_bytes),
layouts,
)
});
self.aggregate_layouts
.insert(*aggregate, (total_in_words * 8, offsets));
}
AggregateContent::ArrayType(el_type, count) => {
// Careful! We *could* wrap the aggregate in Type::Array and call
// ir_type_size_in_bytes() BUT we'd then enter a recursive loop.
let el_size = self.ir_type_size_in_bytes(el_type);
self.aggregate_layouts
.insert(*aggregate, (count * el_size, Vec::new()));
}
}
}
fn ir_type_size_in_bytes(&mut self, ty: &Type) -> u64 {
match ty {
Type::Unit | Type::Bool | Type::Uint(_) => 8,
Type::B256 => 32,
Type::String(n) => *n,
Type::Array(aggregate) | Type::Struct(aggregate) => {
self.analyze_aggregate(aggregate);
self.aggregate_size(aggregate)
}
Type::Union(aggregate) => {
self.analyze_aggregate(aggregate);
self.aggregate_max_field_size(aggregate)
}
Type::ContractCaller(_) => {
// We only store the address.
32
}
Type::Contract => {
unimplemented!("do contract/contract caller have/need a size?")
}
}
}
}
fn ir_constant_to_ast_literal(constant: &Constant) -> Literal {
match &constant.value {
ConstantValue::Undef => unreachable!("Cannot convert 'undef' to a literal."),
ConstantValue::Unit => Literal::U64(0), // No unit.
ConstantValue::Bool(b) => Literal::Boolean(*b),
ConstantValue::Uint(n) => Literal::U64(*n),
ConstantValue::B256(bs) => Literal::B256(*bs),
ConstantValue::String(_) => Literal::String(crate::span::Span {
span: pest::Span::new(
"STRINGS ARE UNIMPLEMENTED UNTIL WE REDO DATASECTION".into(),
0,
51,
)
.unwrap(),
path: None,
}),
ConstantValue::Array(_) => unimplemented!(),
ConstantValue::Struct(_) => unimplemented!(),
}
}
// -------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use sway_ir::parser::parse;
use std::path::PathBuf;
#[test]
fn ir_to_asm_tests() {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let dir: PathBuf = format!("{}/tests/ir_to_asm", manifest_dir).into();
for entry in std::fs::read_dir(dir).unwrap() {
// We're only interested in the `.sw` files here.
let path = entry.unwrap().path();
match path.extension().unwrap().to_str() {
Some("ir") => {
//
// Run the tests!
//
println!("---- IR To ASM: {:?} ----", path);
test_ir_to_asm(path);
}
Some("asm") | Some("disabled") => (),
_ => panic!(
"File with invalid extension in tests dir: {:?}",
path.file_name().unwrap_or(path.as_os_str())
),
}
}
}
fn test_ir_to_asm(mut path: PathBuf) {
let input_bytes = std::fs::read(&path).unwrap();
let input = String::from_utf8_lossy(&input_bytes);
path.set_extension("asm");
let expected_bytes = std::fs::read(&path).unwrap();
let expected = String::from_utf8_lossy(&expected_bytes);
let ir = parse(&input).expect("parsed ir");
let asm_result = compile_ir_to_asm(
&ir,
&BuildConfig {
file_name: std::sync::Arc::new("".into()),
dir_of_code: std::sync::Arc::new("".into()),
manifest_path: std::sync::Arc::new("".into()),
use_ir: false,
print_intermediate_asm: false,
print_finalized_asm: false,
print_ir: false,
generated_names: std::sync::Arc::new(std::sync::Mutex::new(vec![])),
},
);
let mut warnings = Vec::new();
let mut errors = Vec::new();
let asm = asm_result.unwrap(&mut warnings, &mut errors);
assert!(warnings.is_empty() && errors.is_empty());
let asm_script = format!("{}", asm);
if asm_script != expected {
println!("{}", prettydiff::diff_lines(&expected, &asm_script));
panic!();
}
}
}
// =================================================================================================
| 43.320325 | 111 | 0.452931 |
fe60acb7c76079bdfaee9cca49bd9aa26c5e0c99 | 1,104 | #[doc = "Reader of register SM1_INSTR"]
pub type R = crate::R<u32, super::SM1_INSTR>;
#[doc = "Writer for register SM1_INSTR"]
pub type W = crate::W<u32, super::SM1_INSTR>;
#[doc = "Register SM1_INSTR `reset()`'s with value 0"]
impl crate::ResetValue for super::SM1_INSTR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `SM1_INSTR`"]
pub type SM1_INSTR_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `SM1_INSTR`"]
pub struct SM1_INSTR_W<'a> {
w: &'a mut W,
}
impl<'a> SM1_INSTR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15"]
#[inline(always)]
pub fn sm1_instr(&self) -> SM1_INSTR_R {
SM1_INSTR_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15"]
#[inline(always)]
pub fn sm1_instr(&mut self) -> SM1_INSTR_W {
SM1_INSTR_W { w: self }
}
}
| 26.926829 | 74 | 0.584239 |
e8f26b949bbd0b2bcba41c413fabbc1e43937008 | 1,606 | fn main() {
let width1 = 30;
let height1 = 50;
println!(
"The area of the rectangle is {} square pixels.",
area(width1, height1)
);
let rect = (30, 50);
println!(
"The area of the rectangle is {} square pixels.",
area_tuple(rect)
);
let rect1 = Rectangle {
width: 30,
height: 50,
};
println!(
"The area of the rectangle is {:#?} square pixels.",
area_rectangle(&rect1)
);
println!(
"The area of the rectangle is {:#?} square pixels.",
rect1.area()
);
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
let square = Rectangle::square(3);
println!("Square area is {}", square.area());
}
fn area(width: u32, height: u32) -> u32 {
width * height
}
fn area_tuple(dimensions: (u32, u32)) -> u32 {
dimensions.0 * dimensions.1
}
fn area_rectangle(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool {
other.width < self.width && other.height < self.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
| 20.589744 | 65 | 0.541096 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.