hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
29f0b3e7b7d92b77a73bbebe85ed22d382436472
| 3,906 |
use std::sync::{
atomic::{AtomicU32, Ordering},
Arc,
};
use super::WeakTopology;
use crate::{
cmap::{options::ConnectionPoolOptions, ConnectionPool},
error::Error,
options::{ClientOptions, ServerAddress},
runtime::{AcknowledgedMessage, HttpClient},
sdam::monitor::Monitor,
};
/// Contains the state for a given server in the topology.
#[derive(Debug)]
pub(crate) struct Server {
pub(crate) address: ServerAddress,
/// The connection pool for the server.
pub(crate) pool: ConnectionPool,
/// Number of operations currently using this server.
operation_count: AtomicU32,
}
impl Server {
#[cfg(test)]
pub(crate) fn new_mocked(address: ServerAddress, operation_count: u32) -> Self {
Self {
address: address.clone(),
pool: ConnectionPool::new_mocked(address),
operation_count: AtomicU32::new(operation_count),
}
}
/// Create a new reference counted `Server` instance and a `Monitor` for that server.
/// The monitor is not started as part of this; call `Monitor::execute` to start it.
pub(crate) fn create(
address: ServerAddress,
options: &ClientOptions,
topology: WeakTopology,
http_client: HttpClient,
) -> (Arc<Self>, Monitor) {
let (update_sender, update_receiver) = ServerUpdateSender::channel();
let server = Arc::new(Self {
pool: ConnectionPool::new(
address.clone(),
http_client,
update_sender,
Some(ConnectionPoolOptions::from_client_options(options)),
),
address: address.clone(),
operation_count: AtomicU32::new(0),
});
let monitor = Monitor::new(address, &server, topology, options.clone(), update_receiver);
(server, monitor)
}
pub(crate) fn increment_operation_count(&self) {
self.operation_count.fetch_add(1, Ordering::SeqCst);
}
pub(crate) fn decrement_operation_count(&self) {
self.operation_count.fetch_sub(1, Ordering::SeqCst);
}
pub(crate) fn operation_count(&self) -> u32 {
self.operation_count.load(Ordering::SeqCst)
}
}
/// An event that could update the topology's view of a server.
/// TODO: add success cases from application handshakes.
#[derive(Debug)]
pub(crate) enum ServerUpdate {
Error { error: Error, error_generation: u32 },
}
#[derive(Debug)]
pub(crate) struct ServerUpdateReceiver {
receiver: tokio::sync::mpsc::Receiver<AcknowledgedMessage<ServerUpdate>>,
}
impl ServerUpdateReceiver {
pub(crate) async fn recv(&mut self) -> Option<AcknowledgedMessage<ServerUpdate>> {
self.receiver.recv().await
}
}
/// Struct used to update the topology's view of a given server.
#[derive(Clone, Debug)]
pub(crate) struct ServerUpdateSender {
sender: tokio::sync::mpsc::Sender<AcknowledgedMessage<ServerUpdate>>,
}
impl ServerUpdateSender {
/// Create a new sender/receiver pair.
pub(crate) fn channel() -> (Self, ServerUpdateReceiver) {
let (sender, receiver) = tokio::sync::mpsc::channel(1);
(
ServerUpdateSender { sender },
ServerUpdateReceiver { receiver },
)
}
/// Update the server based on the given error.
/// This will block until the topology has processed the error.
pub(crate) async fn handle_error(&mut self, error: Error, error_generation: u32) {
let reason = ServerUpdate::Error {
error,
error_generation,
};
let (message, callback) = AcknowledgedMessage::package(reason);
// These only fails if the other ends hang up, which means the monitor is
// stopped, so we can just discard this update.
let _: std::result::Result<_, _> = self.sender.send(message).await;
callback.wait_for_acknowledgment().await;
}
}
| 32.016393 | 97 | 0.643625 |
8f32df50a56737999c2c4bb1e430800436d31540
| 166 |
use core::alloc::Alloc;
pub unsafe trait StaticAlloc: Alloc + Sync {
unsafe fn static_ref() -> &'static Self;
unsafe fn static_mut() -> &'static mut Self;
}
| 23.714286 | 48 | 0.662651 |
2fda7acb6f4616c76cb1ca79e5e1f4f01b28bc24
| 1,411 |
use libp2p::core::muxing::StreamMuxerBox;
use libp2p::core::transport::boxed::Boxed;
use libp2p::core::transport::upgrade::Version;
use libp2p::core::upgrade::SelectUpgrade;
use libp2p::dns::DnsConfig;
use libp2p::identity;
use libp2p::mplex::MplexConfig;
use libp2p::noise::{self, NoiseConfig};
use libp2p::tcp::TokioTcpConfig;
use libp2p::yamux::Config as YamuxConfig;
use libp2p::{PeerId, Transport};
use std::io::{self, Error, ErrorKind};
use std::time::Duration;
/// Transport type.
pub(crate) type TTransport = Boxed<(PeerId, StreamMuxerBox), Error>;
/// Builds the transport that serves as a common ground for all connections.
///
/// Set up an encrypted TCP transport over the Mplex protocol.
pub fn build_transport(keypair: identity::Keypair) -> io::Result<TTransport> {
let xx_keypair = noise::Keypair::<noise::X25519Spec>::new()
.into_authentic(&keypair)
.unwrap();
let noise_config = NoiseConfig::xx(xx_keypair).into_authenticated();
Ok(DnsConfig::new(TokioTcpConfig::new().nodelay(true))?
.upgrade(Version::V1)
.authenticate(noise_config)
.multiplex(SelectUpgrade::new(
YamuxConfig::default(),
MplexConfig::new(),
))
.timeout(Duration::from_secs(20))
.map(|(peer_id, muxer), _| (peer_id, StreamMuxerBox::new(muxer)))
.map_err(|err| Error::new(ErrorKind::Other, err))
.boxed())
}
| 36.179487 | 78 | 0.681786 |
18f0cf1232581c236cff885f4693bd66aee28744
| 25,442 |
//! This module is in charge of layout out the snippets in the timeline.
//!
//! The rough idea is: we lay out the snippets from top to bottom. At each step, we "drape" the new
//! snippet around the "contours" of the existing snippets. More precisely, we use a skyline data
//! structure to represent the existing snippets.
use druid::kurbo::{BezPath, Point, Rect, Vec2};
use std::collections::HashMap;
use std::hash::Hash;
use scribl_curves::{DrawSnippet, DrawSnippetId, Time};
use crate::audio::{TalkSnippet, TalkSnippetId};
/// An element of the skyline. We don't store the beginning -- it's the end of the previous
/// building.
#[derive(Clone, Copy, Debug, PartialEq)]
struct Building {
y: f64,
end_x: f64,
}
/// A bare-bones skyline implementation. We don't store the starting x coord -- for the skyline that
/// represents the whole timeline, we always start at zero. For the skyline representing just the
/// new snippet that we're adding, we just keep track of the start separately.
#[derive(Clone, Debug, Default, PartialEq)]
struct Skyline {
buildings: Vec<Building>,
}
/// A collection of rectangles describing the layout of a single snippet in the timeline. These
/// rectangles will be increasing in the `x` coordinate, and usually overlapping a bit (depending
/// on the `overlap` parameter in `Parameters`). For example, they might look like
///
/// ```
/// +--------------------+
/// +----------------------+ |
/// +---------------------+ | | |
/// | | | +--------------------+
/// | | | |
/// | +----------------------+
/// | |
/// +---------------------+
/// ```
#[derive(Clone, Debug)]
pub struct SnippetShape {
pub rects: Vec<Rect>,
}
/// This is an intermediate data-type that we use when converting a `SnippetShape` into a
/// nice-looking curved path. Basically, we replace each `Rect` with a collection of the
/// four "important" points after taking overlapping neighbors into account. In the example
/// below, the important points in the middle rect are marked with `o`:
///
/// ```
/// +--------------------+
/// o-------------------o--+ |
/// +---------------------+ | | |
/// | | | +--------------------+
/// | | | |
/// | +-o--------------------o
/// | |
/// +---------------------+
/// ```
#[derive(Debug)]
struct Quad {
top_left: Point,
bottom_left: Point,
top_right: Point,
bottom_right: Point,
}
impl Quad {
fn new(rect: &Rect) -> Quad {
Quad {
top_left: (rect.x0, rect.y0).into(),
bottom_left: (rect.x0, rect.y1).into(),
top_right: (rect.x1, rect.y0).into(),
bottom_right: (rect.x1, rect.y1).into(),
}
}
fn bottom_center(&self) -> Point {
self.bottom_left.midpoint(self.bottom_right)
}
fn bottom_width(&self) -> f64 {
self.bottom_right.x - self.bottom_left.x
}
fn top_center(&self) -> Point {
self.top_left.midpoint(self.top_right)
}
fn top_width(&self) -> f64 {
self.top_right.x - self.top_left.x
}
}
impl SnippetShape {
/// See the doc comment to `Quads` for an example of what this is doing.
fn to_quads(&self) -> Vec<Quad> {
let mut ret: Vec<_> = self.rects.iter().map(Quad::new).collect();
if ret.is_empty() {
return ret;
}
for i in 1..ret.len() {
if ret[i - 1].bottom_right.y > ret[i].bottom_left.y {
ret[i].bottom_left.x = ret[i - 1].bottom_right.x;
} else {
ret[i - 1].bottom_right.x = ret[i].bottom_left.x;
}
if ret[i - 1].top_right.y > ret[i].top_left.y {
ret[i - 1].top_right.x = ret[i].top_left.x;
} else {
ret[i].top_left.x = ret[i - 1].top_right.x;
}
}
ret
}
/// Converts this snippet into a nice-looking path with rounded corners.
pub fn to_path(&self, radius: f64) -> BezPath {
let mut ret = BezPath::new();
if self.rects.is_empty() {
return ret;
}
let quads = self.to_quads();
let dx = Vec2::new(radius, 0.0);
let first_pt = if quads[0].bottom_width() >= 2.0 * radius {
quads[0].bottom_left + dx
} else {
quads[0].bottom_center()
};
ret.move_to(first_pt);
// Left-to-right across the bottom.
let mut prev_ctrl = first_pt;
for qs in quads.windows(2) {
let q = &qs[0];
let next = &qs[1];
if q.bottom_width() >= 2.0 * radius {
prev_ctrl = q.bottom_right - dx;
ret.line_to(prev_ctrl);
}
let next_ctrl = if next.bottom_width() >= 2.0 * radius {
next.bottom_left + dx
} else {
next.bottom_center()
};
ret.curve_to(prev_ctrl + dx, next_ctrl - dx, next_ctrl);
prev_ctrl = next_ctrl;
}
let q = quads.last().unwrap();
let mut next_ctrl = q.top_center();
if q.bottom_width() >= 2.0 * radius {
prev_ctrl = q.bottom_right - dx;
next_ctrl = q.top_right - dx;
ret.line_to(prev_ctrl);
}
ret.curve_to(prev_ctrl + dx, next_ctrl + dx, next_ctrl);
prev_ctrl = next_ctrl;
// Now backwards across the top
for qs in quads.windows(2).rev() {
let q = &qs[1];
let next = &qs[0];
if q.top_width() >= 2.0 * radius {
prev_ctrl = q.top_left + dx;
ret.line_to(prev_ctrl);
}
let next_ctrl = if next.top_width() >= 2.0 * radius {
next.top_right - dx
} else {
next.top_center()
};
ret.curve_to(prev_ctrl - dx, next_ctrl + dx, next_ctrl);
prev_ctrl = next_ctrl;
}
if quads[0].top_width() >= 2.0 * radius {
prev_ctrl = quads[0].top_left + dx;
ret.line_to(prev_ctrl);
}
ret.curve_to(prev_ctrl - dx, first_pt - dx, first_pt);
ret.close_path();
ret
}
/// Reflects this shape vertically, so that `0.0` is mapped to `bottom`, `1.0` is mapped to
/// `bottom - 1.0`, etc.
pub fn reflect_y(&mut self, bottom: f64) {
for r in &mut self.rects {
let y1 = bottom - r.y0;
let y0 = bottom - r.y1;
r.y0 = y0;
r.y1 = y1;
}
}
}
impl Skyline {
fn new(end_x: f64) -> Skyline {
Skyline {
buildings: vec![Building { y: 0.0, end_x }],
}
}
/// Delete zero-width buildings, and merge adjacent buildings that have the same height.
fn delete_empty(&mut self) {
let mut start_x = 0.0;
self.buildings.retain(|b| {
if b.end_x <= start_x {
false
} else {
start_x = b.end_x;
true
}
});
let mut next_y = f64::INFINITY;
for b in self.buildings.iter_mut().rev() {
// There doesn't seem to be a "retain" that goes backwards, so we mark buildings as
// to-be-deleted by setting end_x to zero.
if b.y == next_y {
b.end_x = 0.0;
}
next_y = b.y;
}
self.buildings.retain(|b| b.end_x > 0.0);
}
/// Expand all the buildings in the skyline horizontally (both left and right) by the given
/// amount. The beginning of the first building is unchanged (because we don't store it), and
/// the end of the last building is also unchanged.
fn expand_horizontally(&mut self, padding: f64) {
for i in 1..self.buildings.len() {
if self.buildings[i - 1].y > self.buildings[i].y {
self.buildings[i - 1].end_x += padding;
} else {
self.buildings[i - 1].end_x -= padding;
}
}
self.delete_empty();
}
/// Expand the skyline to ensure that every building has a minimal width.
fn fill_gaps(&mut self, min_width: f64) {
let mut start_x = 0.0;
let mut prev_nonempty: Option<usize> = None;
// For every building that's too short, we have a choice:
// - extend it,
// - cover it with its left neighbor,
// - cover it with its right neigbor.
for i in 0..self.buildings.len() {
if self.buildings[i].end_x - start_x < min_width {
let cover_left = prev_nonempty
.map(|j| self.buildings[j].y > self.buildings[i].y)
.unwrap_or(false);
let cover_right =
i + 1 < self.buildings.len() && self.buildings[i + 1].y > self.buildings[i].y;
match (cover_left, cover_right) {
(false, false) => {
self.buildings[i].end_x = start_x + min_width;
prev_nonempty = Some(i);
}
(true, false) => {
self.buildings[prev_nonempty.unwrap()].end_x = self.buildings[i].end_x
}
(false, true) => self.buildings[i].end_x = start_x,
(true, true) => {
let prev = prev_nonempty.unwrap();
if self.buildings[prev].y <= self.buildings[i + 1].y {
self.buildings[i].end_x = start_x;
} else {
self.buildings[prev].end_x = self.buildings[i].end_x;
}
}
}
} else {
prev_nonempty = Some(i);
}
start_x = self.buildings[i].end_x;
}
self.delete_empty();
}
fn add_rect(
&self,
start_x: f64,
end_x: f64,
height: f64,
min_width: f64,
new_part: &mut Skyline,
) {
// Find the first building that ends strictly after `start_x`.
let start_idx = match self
.buildings
.binary_search_by(|b| b.end_x.partial_cmp(&start_x).unwrap())
{
Ok(idx) => idx + 1,
Err(idx) => idx,
};
assert!(start_idx == self.buildings.len() || self.buildings[start_idx].end_x > start_x);
let mut idx = start_idx;
let mut x = start_x;
while idx < self.buildings.len() {
let min_end = x + min_width;
let orig_idx = idx;
let mut y0 = self.buildings[idx].y;
while idx + 1 < self.buildings.len() && min_end >= self.buildings[idx].end_x {
idx += 1;
y0 = y0.max(self.buildings[idx].y);
}
let this_end_x = if orig_idx < idx {
min_end
} else {
self.buildings[idx].end_x.min(end_x).max(min_end)
};
new_part.buildings.push(Building {
y: y0 + height,
end_x: this_end_x,
});
x = this_end_x;
if end_x <= x {
break;
}
if this_end_x >= self.buildings[idx].end_x {
idx += 1;
}
}
}
fn update_skyline(&mut self, start_x: f64, other: &[Building]) {
let mut new = Vec::new();
let mut merged = false;
let mut i = 0;
while i < self.buildings.len() {
if !merged && start_x < self.buildings[i].end_x {
new.push(Building {
end_x: start_x,
y: self.buildings[i].y,
});
new.extend_from_slice(other);
let x = other.last().map(|b| b.end_x).unwrap_or(0.0);
while i < self.buildings.len() && self.buildings[i].end_x <= x {
i += 1;
}
merged = true;
if i == self.buildings.len() {
break;
}
}
new.push(self.buildings[i]);
i += 1;
}
self.buildings = new;
}
fn to_rects(&self, mut start_x: f64, thick_count: usize, params: &Parameters) -> Vec<Rect> {
let mut ret = Vec::new();
for w in self.buildings.windows(2) {
let end_x = if w[1].y > w[0].y {
w[0].end_x
} else {
w[0].end_x + params.overlap
};
let height = if ret.len() < thick_count {
params.thick_height
} else {
params.thin_height
};
let y1 = w[0].y;
let y0 = y1 - height;
ret.push(Rect {
x0: start_x,
x1: end_x,
y0,
y1,
});
start_x = if w[1].y > w[0].y {
w[0].end_x - params.overlap
} else {
w[0].end_x
};
}
if let Some(last) = self.buildings.last() {
let height = if ret.len() < thick_count {
params.thick_height
} else {
params.thin_height
};
let y1 = last.y;
let y0 = y1 - height;
ret.push(Rect {
x0: start_x,
x1: last.end_x,
y0,
y1,
});
}
ret
}
fn add_snippet<Id>(&mut self, b: &SnippetBounds<Id>, params: &Parameters) -> SnippetShape {
let mut snip = Skyline {
buildings: Vec::new(),
};
let p = |x: Time| x.as_micros() as f64 * params.pixels_per_usec;
let thick_end = b.thin.or(b.end).map(p).unwrap_or(params.end_x);
self.add_rect(
p(b.start),
thick_end,
params.thick_height + params.v_padding,
params.min_width,
&mut snip,
);
// Keep track of the number of thick segments, so that later we know which parts of `snip`
// are thin, and which parts are thick.
// TODO: maybe better for add_rect to produce Rects and then we convert to skyline later?
let thick_count = snip.buildings.len();
if let Some(thin) = b.thin {
let thin_end = b.end.map(p).unwrap_or(params.end_x);
let thin_start = snip.buildings.last().map(|b| b.end_x).unwrap_or(p(thin));
self.add_rect(
thin_start,
thin_end,
params.thin_height + params.v_padding,
params.min_width,
&mut snip,
);
}
let rects = snip.to_rects(p(b.start), thick_count, params);
snip.expand_horizontally(params.h_padding + params.overlap);
if let Some(last) = snip.buildings.last_mut() {
last.end_x = (last.end_x + params.h_padding).min(params.end_x);
}
self.update_skyline(p(b.start) - params.h_padding, &snip.buildings[..]);
self.fill_gaps(params.min_width);
SnippetShape { rects }
}
}
/// A collection of parameters describing how to turn a bunch of snippets into a
/// hopefully-visually-pleasing layout.
pub struct Parameters {
/// Snippets have thick parts and thin parts (the thick part is the time interval where the
/// drawing is happening; the thin part then lasts until the snippet disappears). This is
/// the thickness of the thick part.
pub thick_height: f64,
/// The thickness of the thin part.
pub thin_height: f64,
/// Horizontal padding that we add between snippets.
pub h_padding: f64,
/// Vertical padding that we add between snippets.
pub v_padding: f64,
/// The number of pixels per microsecond of timeline time.
pub pixels_per_usec: f64,
/// The minimum width of a rectangle in the timeline.
pub min_width: f64,
/// When the vertical position of a snippet changes, we overlap the rectangles by this much.
/// See the `SnippetShape` for a picture.
pub overlap: f64,
/// The largest `x` position (because logically we need to deal with infinite `x` positions
/// but in practice we need to truncate).
pub end_x: f64,
}
/// The result of laying out the snippets. The type parameter `T` is a snippet id (probably
/// `DrawSnippetId` or `TalkSnippetId`).
pub struct Layout<T> {
/// A map from the snippet's id to its shape.
pub positions: HashMap<T, SnippetShape>,
/// The maximum height of any snippet. This is redundant, in that it can be recomputed from
/// `positions`.
pub max_y: f64,
}
#[derive(Clone)]
pub struct SnippetBounds<T> {
/// The time at which this snippet starts.
start: Time,
/// The time at which this snippet changes from thick to thin (if it does).
thin: Option<Time>,
/// The time at which this snippet ends (if it does).
end: Option<Time>,
id: T,
}
impl From<(DrawSnippetId, &DrawSnippet)> for SnippetBounds<DrawSnippetId> {
fn from(data: (DrawSnippetId, &DrawSnippet)) -> SnippetBounds<DrawSnippetId> {
let last_draw = data.1.last_draw_time();
let thin = if let Some(end) = data.1.end_time() {
if end <= last_draw {
None
} else {
Some(last_draw)
}
} else {
Some(last_draw)
};
SnippetBounds {
start: data.1.start_time(),
thin,
end: data.1.end_time(),
id: data.0,
}
}
}
impl From<(TalkSnippetId, &TalkSnippet)> for SnippetBounds<TalkSnippetId> {
fn from(data: (TalkSnippetId, &TalkSnippet)) -> SnippetBounds<TalkSnippetId> {
SnippetBounds {
start: data.1.start_time(),
thin: None,
end: Some(data.1.end_time()),
id: data.0,
}
}
}
pub fn layout<Id: Copy + Hash + Eq + Ord, T: Into<SnippetBounds<Id>>, I: Iterator<Item = T>>(
iter: I,
params: &Parameters,
) -> Layout<Id> {
let mut sky = Skyline::new(params.end_x);
let mut ret = Layout {
positions: HashMap::new(),
max_y: 0.0,
};
for b in iter.map(|t| t.into()) {
let shape = sky.add_snippet(&b, params);
ret.max_y = ret.max_y.max(
shape
.rects
.iter()
.map(|r| r.y1)
.max_by(|x, y| x.partial_cmp(y).unwrap())
.unwrap_or(0.0),
);
ret.positions.insert(b.id, shape);
}
ret
}
#[cfg(test)]
mod tests {
use super::*;
// Creates a snippet that is empty, but has a starting and (possibly) an ending time.
fn snip(id: usize, start: Time, thin: Option<Time>, end: Option<Time>) -> SnippetBounds<usize> {
SnippetBounds {
start,
thin,
end,
id,
}
}
fn sky(arr: &[(f64, f64)]) -> Skyline {
Skyline {
buildings: arr
.iter()
.map(|&(end_x, y)| Building { end_x, y })
.collect(),
}
}
macro_rules! snips {
( $(($begin:expr, $thin:expr, $end:expr)),* ) => {
{
let mut ret = Vec::<SnippetBounds<usize>>::new();
let mut id = 0;
$(
id += 1;
ret.push(snip(id, Time::from_micros($begin), $thin.map(Time::from_micros), $end.map(Time::from_micros)));
)*
ret.into_iter()
}
}
}
const PARAMS: Parameters = Parameters {
thick_height: 2.0,
thin_height: 1.0,
h_padding: 0.0,
v_padding: 0.0,
min_width: 2.0,
overlap: 1.0,
pixels_per_usec: 1.0,
end_x: 100.0,
};
const PARAMS_PADDED: Parameters = Parameters {
thick_height: 2.0,
thin_height: 1.0,
h_padding: 1.0,
v_padding: 1.0,
min_width: 2.0,
overlap: 1.0,
pixels_per_usec: 1.0,
end_x: 100.0,
};
#[test]
fn layout_infinite() {
let snips = snips!((0, Some(30), None), (10, Some(50), None));
let layout = layout(snips, &PARAMS);
assert_eq!(
&layout.positions[&1].rects,
&[
Rect::new(0.0, 0.0, 31.0, 2.0),
Rect::new(30.0, 0.0, 100.0, 1.0)
]
);
assert_eq!(
&layout.positions[&2].rects,
&[
Rect::new(10.0, 2.0, 32.0, 4.0),
Rect::new(31.0, 1.0, 51.0, 3.0),
Rect::new(50.0, 1.0, 100.0, 2.0)
]
);
}
#[test]
fn layout_two() {
let snips = snips!((0, Some(20), Some(50)), (20, Some(30), Some(50)));
let layout = layout(snips, &PARAMS);
assert_eq!(
&layout.positions[&1].rects,
&[
Rect::new(0.0, 0.0, 21.0, 2.0),
Rect::new(20.0, 0.0, 50.0, 1.0)
]
);
assert_eq!(
&layout.positions[&2].rects,
&[
Rect::new(20.0, 2.0, 23.0, 4.0),
Rect::new(22.0, 1.0, 31.0, 3.0),
Rect::new(30.0, 1.0, 50.0, 2.0)
]
);
}
#[test]
fn layout_padded() {
let snips = snips!((0, Some(20), Some(50)), (10, Some(30), Some(50)));
let layout = layout(snips, &PARAMS_PADDED);
assert_eq!(
&layout.positions[&1].rects,
&[
Rect::new(0.0, 1.0, 21.0, 3.0),
Rect::new(20.0, 1.0, 50.0, 2.0)
]
);
assert_eq!(
&layout.positions[&2].rects,
&[
Rect::new(10.0, 4.0, 23.0, 6.0),
Rect::new(22.0, 3.0, 31.0, 5.0),
Rect::new(30.0, 3.0, 50.0, 4.0)
]
);
}
#[test]
fn instant_draw() {
let snips = snips!((0, Some(0), Some(20)));
let layout = layout(snips, &PARAMS);
assert_eq!(
&layout.positions[&1].rects,
&[
Rect::new(0.0, 0.0, 3.0, 2.0),
Rect::new(2.0, 0.0, 20.0, 1.0),
]
);
let snips = snips!((0, None, Some(50)), (49, Some(49), Some(80)));
let layout = self::layout(snips, &PARAMS);
assert_eq!(
&layout.positions[&1].rects,
&[Rect::new(0.0, 0.0, 50.0, 2.0),]
);
assert_eq!(
&layout.positions[&2].rects,
&[
Rect::new(49.0, 2.0, 52.0, 4.0),
Rect::new(51.0, 0.0, 80.0, 1.0),
]
);
}
#[test]
fn fill_gaps() {
let min_width = 3.0;
let mut no_gaps = sky(&[(5.0, 1.0), (10.0, 2.0), (15.0, 1.0)]);
let clone = no_gaps.clone();
no_gaps.fill_gaps(min_width);
assert_eq!(no_gaps, clone);
let mut gap_start = sky(&[(1.0, 0.0), (3.0, 1.0)]);
gap_start.fill_gaps(min_width);
assert_eq!(gap_start, sky(&[(3.0, 1.0)]));
let mut gap_start = sky(&[(1.0, 1.0), (3.0, 0.0)]);
gap_start.fill_gaps(min_width);
assert_eq!(gap_start, sky(&[(3.0, 1.0)]));
let mut gap_mid = sky(&[(4.0, 2.0), (6.0, 1.0), (9.0, 3.0)]);
gap_mid.fill_gaps(min_width);
assert_eq!(gap_mid, sky(&[(4.0, 2.0), (9.0, 3.0)]));
let mut gap_mid = sky(&[(4.0, 3.0), (6.0, 1.0), (9.0, 2.0)]);
gap_mid.fill_gaps(min_width);
assert_eq!(gap_mid, sky(&[(6.0, 3.0), (9.0, 2.0)]));
let mut gap_end = sky(&[(5.0, 0.0), (6.0, 1.0)]);
gap_end.fill_gaps(min_width);
assert_eq!(gap_end, sky(&[(5.0, 0.0), (8.0, 1.0)]));
let mut gap_end = sky(&[(5.0, 1.0), (6.0, 0.0)]);
gap_end.fill_gaps(min_width);
assert_eq!(gap_end, sky(&[(6.0, 1.0)]));
let mut staircase = sky(&[(1.0, 1.0), (2.0, 2.0), (3.0, 3.0), (4.0, 4.0), (5.0, 5.0)]);
staircase.fill_gaps(min_width);
assert_eq!(staircase, sky(&[(3.0, 3.0), (6.0, 5.0)]));
// There's a bit of asymmetry here with the way that we process things greedily
// left-to-right.
let mut staircase = sky(&[(1.0, 5.0), (2.0, 4.0), (3.0, 3.0), (4.0, 2.0), (5.0, 1.0)]);
staircase.fill_gaps(min_width);
assert_eq!(staircase, sky(&[(5.0, 5.0)]));
}
#[test]
fn add_rect() {
let min_width = 3.0;
let mut s = sky(&[(100.0, 0.0)]);
let mut new_s = Skyline::default();
s.add_rect(10.0, 20.0, 1.0, min_width, &mut new_s);
assert_eq!(new_s, sky(&[(20.0, 1.0)]));
s.update_skyline(10.0, &new_s.buildings);
s.fill_gaps(min_width);
assert_eq!(s, sky(&[(10.0, 0.0), (20.0, 1.0), (100.0, 0.0)]));
new_s.buildings.clear();
s.add_rect(15.0, 25.0, 1.0, min_width, &mut new_s);
assert_eq!(new_s, sky(&[(20.0, 2.0), (25.0, 1.0)]));
}
}
| 32.368957 | 125 | 0.484946 |
c1f679bdecc5c2db43109091eedbdedf81b9abaf
| 993 |
#![no_std]
#![doc = include_str!("../README.md")]
/// Extension trait offering PrimitivePromotion type.
///
/// * `<u8 as PrimitivePromotionExt>::PrimitivePromotion == u16`;
/// * `<i16 as PrimitivePromotionExt>::PrimitivePromotion == i32`;
/// * ...
pub trait PrimitivePromotionExt {
type PrimitivePromotion;
}
impl PrimitivePromotionExt for u8 {
type PrimitivePromotion = u16;
}
impl PrimitivePromotionExt for u16 {
type PrimitivePromotion = u32;
}
impl PrimitivePromotionExt for u32 {
type PrimitivePromotion = u64;
}
impl PrimitivePromotionExt for u64 {
type PrimitivePromotion = u128;
}
impl PrimitivePromotionExt for i8 {
type PrimitivePromotion = i16;
}
impl PrimitivePromotionExt for i16 {
type PrimitivePromotion = i32;
}
impl PrimitivePromotionExt for i32 {
type PrimitivePromotion = i64;
}
impl PrimitivePromotionExt for i64 {
type PrimitivePromotion = i128;
}
impl PrimitivePromotionExt for f32 {
type PrimitivePromotion = f64;
}
| 20.6875 | 66 | 0.725076 |
16cade377f3330294e13e4b17ecf59d18b7b9874
| 838 |
use bevy_ecs::prelude::*;
macro_rules! create_entities {
($world:ident; $( $variants:ident ),*) => {
$(
#[derive(Component)]
struct $variants(f32);
for _ in 0..20 {
$world.spawn().insert_bundle(($variants(0.0), Data(1.0)));
}
)*
};
}
#[derive(Component)]
struct Data(f32);
pub struct Benchmark<'w>(World, QueryState<&'w mut Data>);
impl<'w> Benchmark<'w> {
pub fn new() -> Self {
let mut world = World::new();
create_entities!(world; A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z);
let query = world.query::<&mut Data>();
Self(world, query)
}
pub fn run(&mut self) {
self.1.for_each_mut(&mut self.0, |mut data| {
data.0 *= 2.0;
});
}
}
| 23.277778 | 110 | 0.479714 |
0367736ff3cebf566b37fdfb3e215cf13c3354ee
| 3,432 |
// Generated from definition io.k8s.api.core.v1.LocalVolumeSource
/// Local represents directly-attached storage with node affinity (Beta feature)
#[derive(Clone, Debug, Default, PartialEq)]
pub struct LocalVolumeSource {
/// The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled.
pub path: String,
}
impl<'de> crate::serde::Deserialize<'de> for LocalVolumeSource {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_path,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"path" => Field::Key_path,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = LocalVolumeSource;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("LocalVolumeSource")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_path: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_path => value_path = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(LocalVolumeSource {
path: value_path.ok_or_else(|| crate::serde::de::Error::missing_field("path"))?,
})
}
}
deserializer.deserialize_struct(
"LocalVolumeSource",
&[
"path",
],
Visitor,
)
}
}
impl crate::serde::Serialize for LocalVolumeSource {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
"LocalVolumeSource",
1,
)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "path", &self.path)?;
crate::serde::ser::SerializeStruct::end(state)
}
}
| 39.906977 | 330 | 0.540793 |
d901c9e2c06b0cba144e5d2525337672c024a3c3
| 23,786 |
pub type c_char = u8;
pub type __u64 = ::c_ulonglong;
pub type wchar_t = u32;
pub type nlink_t = u32;
pub type blksize_t = ::c_int;
s! {
pub struct stat {
pub st_dev: ::dev_t,
pub st_ino: ::ino_t,
pub st_mode: ::mode_t,
pub st_nlink: ::nlink_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
__pad0: ::c_ulong,
pub st_size: ::off_t,
pub st_blksize: ::blksize_t,
__pad1: ::c_int,
pub st_blocks: ::blkcnt_t,
pub st_atime: ::time_t,
pub st_atime_nsec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtime_nsec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctime_nsec: ::c_long,
__unused: [::c_uint; 2],
}
pub struct stat64 {
pub st_dev: ::dev_t,
pub st_ino: ::ino_t,
pub st_mode: ::mode_t,
pub st_nlink: ::nlink_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
__pad0: ::c_ulong,
pub st_size: ::off_t,
pub st_blksize: ::blksize_t,
__pad1: ::c_int,
pub st_blocks: ::blkcnt_t,
pub st_atime: ::time_t,
pub st_atime_nsec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtime_nsec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctime_nsec: ::c_long,
__unused: [::c_uint; 2],
}
pub struct user_regs_struct {
pub regs: [::c_ulonglong; 31],
pub sp: ::c_ulonglong,
pub pc: ::c_ulonglong,
pub pstate: ::c_ulonglong,
}
pub struct ipc_perm {
pub __ipc_perm_key: ::key_t,
pub uid: ::uid_t,
pub gid: ::gid_t,
pub cuid: ::uid_t,
pub cgid: ::gid_t,
pub mode: ::mode_t,
pub __seq: ::c_ushort,
__unused1: ::c_ulong,
__unused2: ::c_ulong,
}
}
pub const O_APPEND: ::c_int = 1024;
pub const O_DIRECT: ::c_int = 0x10000;
pub const O_DIRECTORY: ::c_int = 0x4000;
pub const O_LARGEFILE: ::c_int = 0x20000;
pub const O_NOFOLLOW: ::c_int = 0x8000;
pub const O_CREAT: ::c_int = 64;
pub const O_EXCL: ::c_int = 128;
pub const O_NOCTTY: ::c_int = 256;
pub const O_NONBLOCK: ::c_int = 2048;
pub const O_SYNC: ::c_int = 1052672;
pub const O_RSYNC: ::c_int = 1052672;
pub const O_DSYNC: ::c_int = 4096;
pub const O_ASYNC: ::c_int = 0x2000;
pub const ENAMETOOLONG: ::c_int = 36;
pub const ENOLCK: ::c_int = 37;
pub const ENOSYS: ::c_int = 38;
pub const ENOTEMPTY: ::c_int = 39;
pub const ELOOP: ::c_int = 40;
pub const ENOMSG: ::c_int = 42;
pub const EIDRM: ::c_int = 43;
pub const ECHRNG: ::c_int = 44;
pub const EL2NSYNC: ::c_int = 45;
pub const EL3HLT: ::c_int = 46;
pub const EL3RST: ::c_int = 47;
pub const ELNRNG: ::c_int = 48;
pub const EUNATCH: ::c_int = 49;
pub const ENOCSI: ::c_int = 50;
pub const EL2HLT: ::c_int = 51;
pub const EBADE: ::c_int = 52;
pub const EBADR: ::c_int = 53;
pub const EXFULL: ::c_int = 54;
pub const ENOANO: ::c_int = 55;
pub const EBADRQC: ::c_int = 56;
pub const EBADSLT: ::c_int = 57;
pub const EMULTIHOP: ::c_int = 72;
pub const EBADMSG: ::c_int = 74;
pub const EOVERFLOW: ::c_int = 75;
pub const ENOTUNIQ: ::c_int = 76;
pub const EBADFD: ::c_int = 77;
pub const EREMCHG: ::c_int = 78;
pub const ELIBACC: ::c_int = 79;
pub const ELIBBAD: ::c_int = 80;
pub const ELIBSCN: ::c_int = 81;
pub const ELIBMAX: ::c_int = 82;
pub const ELIBEXEC: ::c_int = 83;
pub const EILSEQ: ::c_int = 84;
pub const ERESTART: ::c_int = 85;
pub const ESTRPIPE: ::c_int = 86;
pub const EUSERS: ::c_int = 87;
pub const ENOTSOCK: ::c_int = 88;
pub const EDESTADDRREQ: ::c_int = 89;
pub const EMSGSIZE: ::c_int = 90;
pub const EPROTOTYPE: ::c_int = 91;
pub const ENOPROTOOPT: ::c_int = 92;
pub const EPROTONOSUPPORT: ::c_int = 93;
pub const ESOCKTNOSUPPORT: ::c_int = 94;
pub const EOPNOTSUPP: ::c_int = 95;
pub const ENOTSUP: ::c_int = EOPNOTSUPP;
pub const EPFNOSUPPORT: ::c_int = 96;
pub const EAFNOSUPPORT: ::c_int = 97;
pub const EADDRINUSE: ::c_int = 98;
pub const EADDRNOTAVAIL: ::c_int = 99;
pub const ENETDOWN: ::c_int = 100;
pub const ENETUNREACH: ::c_int = 101;
pub const ENETRESET: ::c_int = 102;
pub const ECONNABORTED: ::c_int = 103;
pub const ECONNRESET: ::c_int = 104;
pub const ENOBUFS: ::c_int = 105;
pub const EISCONN: ::c_int = 106;
pub const ENOTCONN: ::c_int = 107;
pub const ESHUTDOWN: ::c_int = 108;
pub const ETOOMANYREFS: ::c_int = 109;
pub const ETIMEDOUT: ::c_int = 110;
pub const ECONNREFUSED: ::c_int = 111;
pub const EHOSTDOWN: ::c_int = 112;
pub const EHOSTUNREACH: ::c_int = 113;
pub const EALREADY: ::c_int = 114;
pub const EINPROGRESS: ::c_int = 115;
pub const ESTALE: ::c_int = 116;
pub const EUCLEAN: ::c_int = 117;
pub const ENOTNAM: ::c_int = 118;
pub const ENAVAIL: ::c_int = 119;
pub const EISNAM: ::c_int = 120;
pub const EREMOTEIO: ::c_int = 121;
pub const EDQUOT: ::c_int = 122;
pub const ENOMEDIUM: ::c_int = 123;
pub const EMEDIUMTYPE: ::c_int = 124;
pub const ECANCELED: ::c_int = 125;
pub const ENOKEY: ::c_int = 126;
pub const EKEYEXPIRED: ::c_int = 127;
pub const EKEYREVOKED: ::c_int = 128;
pub const EKEYREJECTED: ::c_int = 129;
pub const EOWNERDEAD: ::c_int = 130;
pub const ENOTRECOVERABLE: ::c_int = 131;
pub const ERFKILL: ::c_int = 132;
pub const EHWPOISON: ::c_int = 133;
// bits/hwcap.h
pub const HWCAP_FP: ::c_ulong = 1 << 0;
pub const HWCAP_ASIMD: ::c_ulong = 1 << 1;
pub const HWCAP_EVTSTRM: ::c_ulong = 1 << 2;
pub const HWCAP_AES: ::c_ulong = 1 << 3;
pub const HWCAP_PMULL: ::c_ulong = 1 << 4;
pub const HWCAP_SHA1: ::c_ulong = 1 << 5;
pub const HWCAP_SHA2: ::c_ulong = 1 << 6;
pub const HWCAP_CRC32: ::c_ulong = 1 << 7;
pub const HWCAP_ATOMICS: ::c_ulong = 1 << 8;
pub const HWCAP_FPHP: ::c_ulong = 1 << 9;
pub const HWCAP_ASIMDHP: ::c_ulong = 1 << 10;
pub const HWCAP_CPUID: ::c_ulong = 1 << 11;
pub const HWCAP_ASIMDRDM: ::c_ulong = 1 << 12;
pub const HWCAP_JSCVT: ::c_ulong = 1 << 13;
pub const HWCAP_FCMA: ::c_ulong = 1 << 14;
pub const HWCAP_LRCPC: ::c_ulong = 1 << 15;
pub const HWCAP_DCPOP: ::c_ulong = 1 << 16;
pub const HWCAP_SHA3: ::c_ulong = 1 << 17;
pub const HWCAP_SM3: ::c_ulong = 1 << 18;
pub const HWCAP_SM4: ::c_ulong = 1 << 19;
pub const HWCAP_ASIMDDP: ::c_ulong = 1 << 20;
pub const HWCAP_SHA512: ::c_ulong = 1 << 21;
pub const HWCAP_SVE: ::c_ulong = 1 << 22;
pub const HWCAP_ASIMDFHM: ::c_ulong = 1 << 23;
pub const HWCAP_DIT: ::c_ulong = 1 << 24;
pub const HWCAP_USCAT: ::c_ulong = 1 << 25;
pub const HWCAP_ILRCPC: ::c_ulong = 1 << 26;
pub const HWCAP_FLAGM: ::c_ulong = 1 << 27;
pub const HWCAP_SSBS: ::c_ulong = 1 << 28;
pub const HWCAP_SB: ::c_ulong = 1 << 29;
pub const HWCAP_PACA: ::c_ulong = 1 << 30;
pub const HWCAP_PACG: ::c_ulong = 1 << 31;
pub const MAP_ANON: ::c_int = 0x0020;
pub const MAP_GROWSDOWN: ::c_int = 0x0100;
pub const MAP_DENYWRITE: ::c_int = 0x0800;
pub const MAP_EXECUTABLE: ::c_int = 0x01000;
pub const MAP_LOCKED: ::c_int = 0x02000;
pub const MAP_NORESERVE: ::c_int = 0x04000;
pub const MAP_POPULATE: ::c_int = 0x08000;
pub const MAP_NONBLOCK: ::c_int = 0x010000;
pub const MAP_STACK: ::c_int = 0x020000;
pub const MAP_HUGETLB: ::c_int = 0x040000;
pub const MAP_SYNC: ::c_int = 0x080000;
pub const SOCK_STREAM: ::c_int = 1;
pub const SOCK_DGRAM: ::c_int = 2;
pub const SA_ONSTACK: ::c_int = 0x08000000;
pub const SA_SIGINFO: ::c_int = 0x00000004;
pub const SA_NOCLDWAIT: ::c_int = 0x00000002;
pub const SIGCHLD: ::c_int = 17;
pub const SIGBUS: ::c_int = 7;
pub const SIGTTIN: ::c_int = 21;
pub const SIGTTOU: ::c_int = 22;
pub const SIGXCPU: ::c_int = 24;
pub const SIGXFSZ: ::c_int = 25;
pub const SIGVTALRM: ::c_int = 26;
pub const SIGPROF: ::c_int = 27;
pub const SIGWINCH: ::c_int = 28;
pub const SIGUSR1: ::c_int = 10;
pub const SIGUSR2: ::c_int = 12;
pub const SIGCONT: ::c_int = 18;
pub const SIGSTOP: ::c_int = 19;
pub const SIGTSTP: ::c_int = 20;
pub const SIGURG: ::c_int = 23;
pub const SIGIO: ::c_int = 29;
pub const SIGSYS: ::c_int = 31;
pub const SIGSTKFLT: ::c_int = 16;
pub const SIGPOLL: ::c_int = 29;
pub const SIGPWR: ::c_int = 30;
pub const SIG_SETMASK: ::c_int = 2;
pub const SIG_BLOCK: ::c_int = 0x000000;
pub const SIG_UNBLOCK: ::c_int = 0x01;
pub const F_GETLK: ::c_int = 5;
pub const F_GETOWN: ::c_int = 9;
pub const F_SETLK: ::c_int = 6;
pub const F_SETLKW: ::c_int = 7;
pub const F_SETOWN: ::c_int = 8;
pub const F_OFD_GETLK: ::c_int = 36;
pub const F_OFD_SETLK: ::c_int = 37;
pub const F_OFD_SETLKW: ::c_int = 38;
pub const VEOF: usize = 4;
pub const POLLWRNORM: ::c_short = 0x100;
pub const POLLWRBAND: ::c_short = 0x200;
pub const MINSIGSTKSZ: ::size_t = 6144;
pub const SIGSTKSZ: ::size_t = 12288;
pub const MADV_SOFT_OFFLINE: ::c_int = 101;
pub const SYS_io_setup: ::c_long = 0;
pub const SYS_io_destroy: ::c_long = 1;
pub const SYS_io_submit: ::c_long = 2;
pub const SYS_io_cancel: ::c_long = 3;
pub const SYS_io_getevents: ::c_long = 4;
pub const SYS_setxattr: ::c_long = 5;
pub const SYS_lsetxattr: ::c_long = 6;
pub const SYS_fsetxattr: ::c_long = 7;
pub const SYS_getxattr: ::c_long = 8;
pub const SYS_lgetxattr: ::c_long = 9;
pub const SYS_fgetxattr: ::c_long = 10;
pub const SYS_listxattr: ::c_long = 11;
pub const SYS_llistxattr: ::c_long = 12;
pub const SYS_flistxattr: ::c_long = 13;
pub const SYS_removexattr: ::c_long = 14;
pub const SYS_lremovexattr: ::c_long = 15;
pub const SYS_fremovexattr: ::c_long = 16;
pub const SYS_getcwd: ::c_long = 17;
pub const SYS_lookup_dcookie: ::c_long = 18;
pub const SYS_eventfd2: ::c_long = 19;
pub const SYS_epoll_create1: ::c_long = 20;
pub const SYS_epoll_ctl: ::c_long = 21;
pub const SYS_epoll_pwait: ::c_long = 22;
pub const SYS_dup: ::c_long = 23;
pub const SYS_dup3: ::c_long = 24;
pub const SYS_fcntl: ::c_long = 25;
pub const SYS_inotify_init1: ::c_long = 26;
pub const SYS_inotify_add_watch: ::c_long = 27;
pub const SYS_inotify_rm_watch: ::c_long = 28;
pub const SYS_ioctl: ::c_long = 29;
pub const SYS_ioprio_set: ::c_long = 30;
pub const SYS_ioprio_get: ::c_long = 31;
pub const SYS_flock: ::c_long = 32;
pub const SYS_mknodat: ::c_long = 33;
pub const SYS_mkdirat: ::c_long = 34;
pub const SYS_unlinkat: ::c_long = 35;
pub const SYS_symlinkat: ::c_long = 36;
pub const SYS_linkat: ::c_long = 37;
pub const SYS_renameat: ::c_long = 38;
pub const SYS_umount2: ::c_long = 39;
pub const SYS_mount: ::c_long = 40;
pub const SYS_pivot_root: ::c_long = 41;
pub const SYS_nfsservctl: ::c_long = 42;
pub const SYS_statfs: ::c_long = 43;
pub const SYS_fstatfs: ::c_long = 44;
pub const SYS_truncate: ::c_long = 45;
pub const SYS_ftruncate: ::c_long = 46;
pub const SYS_fallocate: ::c_long = 47;
pub const SYS_faccessat: ::c_long = 48;
pub const SYS_chdir: ::c_long = 49;
pub const SYS_fchdir: ::c_long = 50;
pub const SYS_chroot: ::c_long = 51;
pub const SYS_fchmod: ::c_long = 52;
pub const SYS_fchmodat: ::c_long = 53;
pub const SYS_fchownat: ::c_long = 54;
pub const SYS_fchown: ::c_long = 55;
pub const SYS_openat: ::c_long = 56;
pub const SYS_close: ::c_long = 57;
pub const SYS_vhangup: ::c_long = 58;
pub const SYS_pipe2: ::c_long = 59;
pub const SYS_quotactl: ::c_long = 60;
pub const SYS_getdents64: ::c_long = 61;
pub const SYS_lseek: ::c_long = 62;
pub const SYS_read: ::c_long = 63;
pub const SYS_write: ::c_long = 64;
pub const SYS_readv: ::c_long = 65;
pub const SYS_writev: ::c_long = 66;
pub const SYS_pread64: ::c_long = 67;
pub const SYS_pwrite64: ::c_long = 68;
pub const SYS_preadv: ::c_long = 69;
pub const SYS_pwritev: ::c_long = 70;
pub const SYS_pselect6: ::c_long = 72;
pub const SYS_ppoll: ::c_long = 73;
pub const SYS_signalfd4: ::c_long = 74;
pub const SYS_vmsplice: ::c_long = 75;
pub const SYS_splice: ::c_long = 76;
pub const SYS_tee: ::c_long = 77;
pub const SYS_readlinkat: ::c_long = 78;
pub const SYS_newfstatat: ::c_long = 79;
pub const SYS_fstat: ::c_long = 80;
pub const SYS_sync: ::c_long = 81;
pub const SYS_fsync: ::c_long = 82;
pub const SYS_fdatasync: ::c_long = 83;
pub const SYS_sync_file_range: ::c_long = 84;
pub const SYS_timerfd_create: ::c_long = 85;
pub const SYS_timerfd_settime: ::c_long = 86;
pub const SYS_timerfd_gettime: ::c_long = 87;
pub const SYS_utimensat: ::c_long = 88;
pub const SYS_acct: ::c_long = 89;
pub const SYS_capget: ::c_long = 90;
pub const SYS_capset: ::c_long = 91;
pub const SYS_personality: ::c_long = 92;
pub const SYS_exit: ::c_long = 93;
pub const SYS_exit_group: ::c_long = 94;
pub const SYS_waitid: ::c_long = 95;
pub const SYS_set_tid_address: ::c_long = 96;
pub const SYS_unshare: ::c_long = 97;
pub const SYS_futex: ::c_long = 98;
pub const SYS_set_robust_list: ::c_long = 99;
pub const SYS_get_robust_list: ::c_long = 100;
pub const SYS_nanosleep: ::c_long = 101;
pub const SYS_getitimer: ::c_long = 102;
pub const SYS_setitimer: ::c_long = 103;
pub const SYS_kexec_load: ::c_long = 104;
pub const SYS_init_module: ::c_long = 105;
pub const SYS_delete_module: ::c_long = 106;
pub const SYS_timer_create: ::c_long = 107;
pub const SYS_timer_gettime: ::c_long = 108;
pub const SYS_timer_getoverrun: ::c_long = 109;
pub const SYS_timer_settime: ::c_long = 110;
pub const SYS_timer_delete: ::c_long = 111;
pub const SYS_clock_settime: ::c_long = 112;
pub const SYS_clock_gettime: ::c_long = 113;
pub const SYS_clock_getres: ::c_long = 114;
pub const SYS_clock_nanosleep: ::c_long = 115;
pub const SYS_syslog: ::c_long = 116;
pub const SYS_ptrace: ::c_long = 117;
pub const SYS_sched_setparam: ::c_long = 118;
pub const SYS_sched_setscheduler: ::c_long = 119;
pub const SYS_sched_getscheduler: ::c_long = 120;
pub const SYS_sched_getparam: ::c_long = 121;
pub const SYS_sched_setaffinity: ::c_long = 122;
pub const SYS_sched_getaffinity: ::c_long = 123;
pub const SYS_sched_yield: ::c_long = 124;
pub const SYS_sched_get_priority_max: ::c_long = 125;
pub const SYS_sched_get_priority_min: ::c_long = 126;
pub const SYS_sched_rr_get_interval: ::c_long = 127;
pub const SYS_restart_syscall: ::c_long = 128;
pub const SYS_kill: ::c_long = 129;
pub const SYS_tkill: ::c_long = 130;
pub const SYS_tgkill: ::c_long = 131;
pub const SYS_sigaltstack: ::c_long = 132;
pub const SYS_rt_sigsuspend: ::c_long = 133;
pub const SYS_rt_sigaction: ::c_long = 134;
pub const SYS_rt_sigprocmask: ::c_long = 135;
pub const SYS_rt_sigpending: ::c_long = 136;
pub const SYS_rt_sigtimedwait: ::c_long = 137;
pub const SYS_rt_sigqueueinfo: ::c_long = 138;
pub const SYS_rt_sigreturn: ::c_long = 139;
pub const SYS_setpriority: ::c_long = 140;
pub const SYS_getpriority: ::c_long = 141;
pub const SYS_reboot: ::c_long = 142;
pub const SYS_setregid: ::c_long = 143;
pub const SYS_setgid: ::c_long = 144;
pub const SYS_setreuid: ::c_long = 145;
pub const SYS_setuid: ::c_long = 146;
pub const SYS_setresuid: ::c_long = 147;
pub const SYS_getresuid: ::c_long = 148;
pub const SYS_setresgid: ::c_long = 149;
pub const SYS_getresgid: ::c_long = 150;
pub const SYS_setfsuid: ::c_long = 151;
pub const SYS_setfsgid: ::c_long = 152;
pub const SYS_times: ::c_long = 153;
pub const SYS_setpgid: ::c_long = 154;
pub const SYS_getpgid: ::c_long = 155;
pub const SYS_getsid: ::c_long = 156;
pub const SYS_setsid: ::c_long = 157;
pub const SYS_getgroups: ::c_long = 158;
pub const SYS_setgroups: ::c_long = 159;
pub const SYS_uname: ::c_long = 160;
pub const SYS_sethostname: ::c_long = 161;
pub const SYS_setdomainname: ::c_long = 162;
pub const SYS_getrlimit: ::c_long = 163;
pub const SYS_setrlimit: ::c_long = 164;
pub const SYS_getrusage: ::c_long = 165;
pub const SYS_umask: ::c_long = 166;
pub const SYS_prctl: ::c_long = 167;
pub const SYS_getcpu: ::c_long = 168;
pub const SYS_gettimeofday: ::c_long = 169;
pub const SYS_settimeofday: ::c_long = 170;
pub const SYS_adjtimex: ::c_long = 171;
pub const SYS_getpid: ::c_long = 172;
pub const SYS_getppid: ::c_long = 173;
pub const SYS_getuid: ::c_long = 174;
pub const SYS_geteuid: ::c_long = 175;
pub const SYS_getgid: ::c_long = 176;
pub const SYS_getegid: ::c_long = 177;
pub const SYS_gettid: ::c_long = 178;
pub const SYS_sysinfo: ::c_long = 179;
pub const SYS_mq_open: ::c_long = 180;
pub const SYS_mq_unlink: ::c_long = 181;
pub const SYS_mq_timedsend: ::c_long = 182;
pub const SYS_mq_timedreceive: ::c_long = 183;
pub const SYS_mq_notify: ::c_long = 184;
pub const SYS_mq_getsetattr: ::c_long = 185;
pub const SYS_msgget: ::c_long = 186;
pub const SYS_msgctl: ::c_long = 187;
pub const SYS_msgrcv: ::c_long = 188;
pub const SYS_msgsnd: ::c_long = 189;
pub const SYS_semget: ::c_long = 190;
pub const SYS_semctl: ::c_long = 191;
pub const SYS_semtimedop: ::c_long = 192;
pub const SYS_semop: ::c_long = 193;
pub const SYS_shmget: ::c_long = 194;
pub const SYS_shmctl: ::c_long = 195;
pub const SYS_shmat: ::c_long = 196;
pub const SYS_shmdt: ::c_long = 197;
pub const SYS_socket: ::c_long = 198;
pub const SYS_socketpair: ::c_long = 199;
pub const SYS_bind: ::c_long = 200;
pub const SYS_listen: ::c_long = 201;
pub const SYS_accept: ::c_long = 202;
pub const SYS_connect: ::c_long = 203;
pub const SYS_getsockname: ::c_long = 204;
pub const SYS_getpeername: ::c_long = 205;
pub const SYS_sendto: ::c_long = 206;
pub const SYS_recvfrom: ::c_long = 207;
pub const SYS_setsockopt: ::c_long = 208;
pub const SYS_getsockopt: ::c_long = 209;
pub const SYS_shutdown: ::c_long = 210;
pub const SYS_sendmsg: ::c_long = 211;
pub const SYS_recvmsg: ::c_long = 212;
pub const SYS_readahead: ::c_long = 213;
pub const SYS_brk: ::c_long = 214;
pub const SYS_munmap: ::c_long = 215;
pub const SYS_mremap: ::c_long = 216;
pub const SYS_add_key: ::c_long = 217;
pub const SYS_request_key: ::c_long = 218;
pub const SYS_keyctl: ::c_long = 219;
pub const SYS_clone: ::c_long = 220;
pub const SYS_execve: ::c_long = 221;
pub const SYS_mmap: ::c_long = 222;
pub const SYS_swapon: ::c_long = 224;
pub const SYS_swapoff: ::c_long = 225;
pub const SYS_mprotect: ::c_long = 226;
pub const SYS_msync: ::c_long = 227;
pub const SYS_mlock: ::c_long = 228;
pub const SYS_munlock: ::c_long = 229;
pub const SYS_mlockall: ::c_long = 230;
pub const SYS_munlockall: ::c_long = 231;
pub const SYS_mincore: ::c_long = 232;
pub const SYS_madvise: ::c_long = 233;
pub const SYS_remap_file_pages: ::c_long = 234;
pub const SYS_mbind: ::c_long = 235;
pub const SYS_get_mempolicy: ::c_long = 236;
pub const SYS_set_mempolicy: ::c_long = 237;
pub const SYS_migrate_pages: ::c_long = 238;
pub const SYS_move_pages: ::c_long = 239;
pub const SYS_rt_tgsigqueueinfo: ::c_long = 240;
pub const SYS_perf_event_open: ::c_long = 241;
pub const SYS_accept4: ::c_long = 242;
pub const SYS_recvmmsg: ::c_long = 243;
pub const SYS_wait4: ::c_long = 260;
pub const SYS_prlimit64: ::c_long = 261;
pub const SYS_fanotify_init: ::c_long = 262;
pub const SYS_fanotify_mark: ::c_long = 263;
pub const SYS_name_to_handle_at: ::c_long = 264;
pub const SYS_open_by_handle_at: ::c_long = 265;
pub const SYS_clock_adjtime: ::c_long = 266;
pub const SYS_syncfs: ::c_long = 267;
pub const SYS_setns: ::c_long = 268;
pub const SYS_sendmmsg: ::c_long = 269;
pub const SYS_process_vm_readv: ::c_long = 270;
pub const SYS_process_vm_writev: ::c_long = 271;
pub const SYS_kcmp: ::c_long = 272;
pub const SYS_finit_module: ::c_long = 273;
pub const SYS_sched_setattr: ::c_long = 274;
pub const SYS_sched_getattr: ::c_long = 275;
pub const SYS_renameat2: ::c_long = 276;
pub const SYS_seccomp: ::c_long = 277;
pub const SYS_getrandom: ::c_long = 278;
pub const SYS_memfd_create: ::c_long = 279;
pub const SYS_bpf: ::c_long = 280;
pub const SYS_execveat: ::c_long = 281;
pub const SYS_userfaultfd: ::c_long = 282;
pub const SYS_membarrier: ::c_long = 283;
pub const SYS_mlock2: ::c_long = 284;
pub const SYS_copy_file_range: ::c_long = 285;
pub const SYS_preadv2: ::c_long = 286;
pub const SYS_pwritev2: ::c_long = 287;
pub const SYS_pkey_mprotect: ::c_long = 288;
pub const SYS_pkey_alloc: ::c_long = 289;
pub const SYS_pkey_free: ::c_long = 290;
pub const SYS_statx: ::c_long = 291;
pub const SYS_pidfd_send_signal: ::c_long = 424;
pub const SYS_io_uring_setup: ::c_long = 425;
pub const SYS_io_uring_enter: ::c_long = 426;
pub const SYS_io_uring_register: ::c_long = 427;
pub const SYS_open_tree: ::c_long = 428;
pub const SYS_move_mount: ::c_long = 429;
pub const SYS_fsopen: ::c_long = 430;
pub const SYS_fsconfig: ::c_long = 431;
pub const SYS_fsmount: ::c_long = 432;
pub const SYS_fspick: ::c_long = 433;
pub const SYS_pidfd_open: ::c_long = 434;
pub const SYS_clone3: ::c_long = 435;
pub const SYS_close_range: ::c_long = 436;
pub const SYS_openat2: ::c_long = 437;
pub const SYS_pidfd_getfd: ::c_long = 438;
pub const SYS_faccessat2: ::c_long = 439;
pub const SYS_process_madvise: ::c_long = 440;
pub const SYS_epoll_pwait2: ::c_long = 441;
pub const SYS_mount_setattr: ::c_long = 442;
pub const RLIMIT_NLIMITS: ::c_int = 15;
pub const RLIM_NLIMITS: ::c_int = RLIMIT_NLIMITS;
pub const MCL_CURRENT: ::c_int = 0x0001;
pub const MCL_FUTURE: ::c_int = 0x0002;
pub const CBAUD: ::tcflag_t = 0o0010017;
pub const TAB1: ::c_int = 0x00000800;
pub const TAB2: ::c_int = 0x00001000;
pub const TAB3: ::c_int = 0x00001800;
pub const CR1: ::c_int = 0x00000200;
pub const CR2: ::c_int = 0x00000400;
pub const CR3: ::c_int = 0x00000600;
pub const FF1: ::c_int = 0x00008000;
pub const BS1: ::c_int = 0x00002000;
pub const VT1: ::c_int = 0x00004000;
pub const VWERASE: usize = 14;
pub const VREPRINT: usize = 12;
pub const VSUSP: usize = 10;
pub const VSTART: usize = 8;
pub const VSTOP: usize = 9;
pub const VDISCARD: usize = 13;
pub const VTIME: usize = 5;
pub const IXON: ::tcflag_t = 0x00000400;
pub const IXOFF: ::tcflag_t = 0x00001000;
pub const ONLCR: ::tcflag_t = 0x4;
pub const CSIZE: ::tcflag_t = 0x00000030;
pub const CS6: ::tcflag_t = 0x00000010;
pub const CS7: ::tcflag_t = 0x00000020;
pub const CS8: ::tcflag_t = 0x00000030;
pub const CSTOPB: ::tcflag_t = 0x00000040;
pub const CREAD: ::tcflag_t = 0x00000080;
pub const PARENB: ::tcflag_t = 0x00000100;
pub const PARODD: ::tcflag_t = 0x00000200;
pub const HUPCL: ::tcflag_t = 0x00000400;
pub const CLOCAL: ::tcflag_t = 0x00000800;
pub const ECHOKE: ::tcflag_t = 0x00000800;
pub const ECHOE: ::tcflag_t = 0x00000010;
pub const ECHOK: ::tcflag_t = 0x00000020;
pub const ECHONL: ::tcflag_t = 0x00000040;
pub const ECHOPRT: ::tcflag_t = 0x00000400;
pub const ECHOCTL: ::tcflag_t = 0x00000200;
pub const ISIG: ::tcflag_t = 0x00000001;
pub const ICANON: ::tcflag_t = 0x00000002;
pub const PENDIN: ::tcflag_t = 0x00004000;
pub const NOFLSH: ::tcflag_t = 0x00000080;
pub const CIBAUD: ::tcflag_t = 0o02003600000;
pub const CBAUDEX: ::tcflag_t = 0o010000;
pub const VSWTC: usize = 7;
pub const OLCUC: ::tcflag_t = 0o000002;
pub const NLDLY: ::tcflag_t = 0o000400;
pub const CRDLY: ::tcflag_t = 0o003000;
pub const TABDLY: ::tcflag_t = 0o014000;
pub const BSDLY: ::tcflag_t = 0o020000;
pub const FFDLY: ::tcflag_t = 0o100000;
pub const VTDLY: ::tcflag_t = 0o040000;
pub const XTABS: ::tcflag_t = 0o014000;
pub const B57600: ::speed_t = 0o010001;
pub const B115200: ::speed_t = 0o010002;
pub const B230400: ::speed_t = 0o010003;
pub const B460800: ::speed_t = 0o010004;
pub const B500000: ::speed_t = 0o010005;
pub const B576000: ::speed_t = 0o010006;
pub const B921600: ::speed_t = 0o010007;
pub const B1000000: ::speed_t = 0o010010;
pub const B1152000: ::speed_t = 0o010011;
pub const B1500000: ::speed_t = 0o010012;
pub const B2000000: ::speed_t = 0o010013;
pub const B2500000: ::speed_t = 0o010014;
pub const B3000000: ::speed_t = 0o010015;
pub const B3500000: ::speed_t = 0o010016;
pub const B4000000: ::speed_t = 0o010017;
pub const EDEADLK: ::c_int = 35;
pub const EDEADLOCK: ::c_int = EDEADLK;
pub const EXTPROC: ::tcflag_t = 0x00010000;
pub const VEOL: usize = 11;
pub const VEOL2: usize = 16;
pub const VMIN: usize = 6;
pub const IEXTEN: ::tcflag_t = 0x00008000;
pub const TOSTOP: ::tcflag_t = 0x00000100;
pub const FLUSHO: ::tcflag_t = 0x00001000;
cfg_if! {
if #[cfg(libc_align)] {
mod align;
pub use self::align::*;
}
}
| 36.763524 | 53 | 0.699823 |
dbc2aa7ce3b91449c06f554500bddc09b96e3440
| 14,762 |
use crate::core::types::{GameCfg, HedgehogInfo, ServerVar, TeamInfo, VoteType};
use std::{convert::From, iter::once, ops};
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum HwProtocolMessage {
// common messages
Ping,
Pong,
Quit(Option<String>),
Global(String),
Watch(u32),
ToggleServerRegisteredOnly,
SuperPower,
Info(String),
// anteroom messages
Nick(String),
Proto(u16),
Password(String, String),
Checker(u16, String, String),
// lobby messages
List,
Chat(String),
CreateRoom(String, Option<String>),
JoinRoom(String, Option<String>),
Follow(String),
Rnd(Vec<String>),
Kick(String),
Ban(String, String, u32),
BanIp(String, String, u32),
BanNick(String, String, u32),
BanList,
Unban(String),
SetServerVar(ServerVar),
GetServerVar,
RestartServer,
Stats,
// room messages
Part(Option<String>),
Cfg(GameCfg),
AddTeam(Box<TeamInfo>),
RemoveTeam(String),
SetHedgehogsNumber(String, u8),
SetTeamColor(String, u8),
ToggleReady,
StartGame,
EngineMessage(String),
RoundFinished,
ToggleRestrictJoin,
ToggleRestrictTeams,
ToggleRegisteredOnly,
RoomName(String),
Delegate(String),
TeamChat(String),
MaxTeams(u8),
Fix,
Unfix,
Greeting(Option<String>),
CallVote(Option<VoteType>),
Vote(bool),
ForceVote(bool),
Save(String, String),
Delete(String),
SaveRoom(String),
LoadRoom(String),
}
#[derive(Debug, Clone, Copy)]
pub enum ProtocolFlags {
InRoom,
RoomMaster,
Ready,
InGame,
Registered,
Admin,
Contributor,
}
impl ProtocolFlags {
#[inline]
fn flag_char(&self) -> char {
match self {
ProtocolFlags::InRoom => 'i',
ProtocolFlags::RoomMaster => 'h',
ProtocolFlags::Ready => 'r',
ProtocolFlags::InGame => 'g',
ProtocolFlags::Registered => 'u',
ProtocolFlags::Admin => 'a',
ProtocolFlags::Contributor => 'c',
}
}
#[inline]
fn format(prefix: char, flags: &[ProtocolFlags]) -> String {
once(prefix)
.chain(flags.iter().map(|f| f.flag_char()))
.collect()
}
}
#[inline]
pub fn add_flags(flags: &[ProtocolFlags]) -> String {
ProtocolFlags::format('+', flags)
}
#[inline]
pub fn remove_flags(flags: &[ProtocolFlags]) -> String {
ProtocolFlags::format('-', flags)
}
#[derive(Debug)]
pub enum HwServerMessage {
Connected(u32),
Redirect(u16),
Ping,
Pong,
Bye(String),
Nick(String),
Proto(u16),
AskPassword(String),
ServerAuth(String),
LobbyLeft(String, String),
LobbyJoined(Vec<String>),
ChatMsg { nick: String, msg: String },
ClientFlags(String, Vec<String>),
Rooms(Vec<String>),
RoomAdd(Vec<String>),
RoomJoined(Vec<String>),
RoomLeft(String, String),
RoomRemove(String),
RoomUpdated(String, Vec<String>),
Joining(String),
TeamAdd(Vec<String>),
TeamRemove(String),
TeamAccepted(String),
TeamColor(String, u8),
HedgehogsNumber(String, u8),
ConfigEntry(String, Vec<String>),
Kicked,
RunGame,
ForwardEngineMessage(Vec<String>),
RoundFinished,
ReplayStart,
Info(Vec<String>),
ServerMessage(String),
ServerVars(Vec<String>),
Notice(String),
Warning(String),
Error(String),
Unreachable,
//Deprecated messages
LegacyReady(bool, Vec<String>),
}
fn special_chat(nick: &str, msg: String) -> HwServerMessage {
HwServerMessage::ChatMsg {
nick: nick.to_string(),
msg,
}
}
pub fn server_chat(msg: String) -> HwServerMessage {
special_chat("[server]", msg)
}
pub fn global_chat(msg: String) -> HwServerMessage {
special_chat("(global notice)", msg)
}
impl ServerVar {
pub fn to_protocol(&self) -> Vec<String> {
use ServerVar::*;
match self {
MOTDNew(s) => vec!["MOTD_NEW".to_string(), s.clone()],
MOTDOld(s) => vec!["MOTD_OLD".to_string(), s.clone()],
LatestProto(n) => vec!["LATEST_PROTO".to_string(), n.to_string()],
}
}
}
impl VoteType {
pub fn to_protocol(&self) -> Vec<String> {
use VoteType::*;
match self {
Kick(nick) => vec!["KICK".to_string(), nick.clone()],
Map(None) => vec!["MAP".to_string()],
Map(Some(name)) => vec!["MAP".to_string(), name.clone()],
Pause => vec!["PAUSE".to_string()],
NewSeed => vec!["NEWSEED".to_string()],
HedgehogsPerTeam(count) => vec!["HEDGEHOGS".to_string(), count.to_string()],
}
}
}
impl GameCfg {
pub fn to_protocol(&self) -> (String, Vec<String>) {
use GameCfg::*;
match self {
FeatureSize(s) => ("FEATURE_SIZE".to_string(), vec![s.to_string()]),
MapType(t) => ("MAP".to_string(), vec![t.to_string()]),
MapGenerator(g) => ("MAPGEN".to_string(), vec![g.to_string()]),
MazeSize(s) => ("MAZE_SIZE".to_string(), vec![s.to_string()]),
Seed(s) => ("SEED".to_string(), vec![s.to_string()]),
Template(t) => ("TEMPLATE".to_string(), vec![t.to_string()]),
Ammo(n, None) => ("AMMO".to_string(), vec![n.to_string()]),
Ammo(n, Some(s)) => ("AMMO".to_string(), vec![n.to_string(), s.to_string()]),
Scheme(n, s) if s.is_empty() => ("SCHEME".to_string(), vec![n.to_string()]),
Scheme(n, s) => ("SCHEME".to_string(), {
let mut v = vec![n.to_string()];
v.extend(s.clone().into_iter());
v
}),
Script(s) => ("SCRIPT".to_string(), vec![s.to_string()]),
Theme(t) => ("THEME".to_string(), vec![t.to_string()]),
DrawnMap(m) => ("DRAWNMAP".to_string(), vec![m.to_string()]),
}
}
pub fn to_server_msg(&self) -> HwServerMessage {
use self::HwServerMessage::ConfigEntry;
let (name, args) = self.to_protocol();
HwServerMessage::ConfigEntry(name, args)
}
}
impl TeamInfo {
pub fn to_protocol(&self) -> Vec<String> {
let mut info = vec![
self.name.clone(),
self.grave.clone(),
self.fort.clone(),
self.voice_pack.clone(),
self.flag.clone(),
self.owner.clone(),
self.difficulty.to_string(),
];
let hogs = self
.hedgehogs
.iter()
.flat_map(|h| once(h.name.clone()).chain(once(h.hat.clone())));
info.extend(hogs);
info
}
}
macro_rules! const_braces {
($e: expr) => {
"{}\n"
};
}
macro_rules! msg {
[$($part: expr),*] => {
format!(concat!($(const_braces!($part)),*, "\n"), $($part),*);
};
}
#[cfg(test)]
macro_rules! several {
[$part: expr] => { once($part) };
[$part: expr, $($other: expr),*] => { once($part).chain(several![$($other),*]) };
}
impl HwProtocolMessage {
/** Converts the message to a raw `String`, which can be sent over the network.
*
* This is the inverse of the `message` parser.
*/
#[cfg(test)]
pub(crate) fn to_raw_protocol(&self) -> String {
use self::HwProtocolMessage::*;
match self {
Ping => msg!["PING"],
Pong => msg!["PONG"],
Quit(None) => msg!["QUIT"],
Quit(Some(msg)) => msg!["QUIT", msg],
Global(msg) => msg!["CMD", format!("GLOBAL {}", msg)],
Watch(name) => msg!["CMD", format!("WATCH {}", name)],
ToggleServerRegisteredOnly => msg!["CMD", "REGISTERED_ONLY"],
SuperPower => msg!["CMD", "SUPER_POWER"],
Info(info) => msg!["CMD", format!("INFO {}", info)],
Nick(nick) => msg!("NICK", nick),
Proto(version) => msg!["PROTO", version],
Password(p, s) => msg!["PASSWORD", p, s],
Checker(i, n, p) => msg!["CHECKER", i, n, p],
List => msg!["LIST"],
Chat(msg) => msg!["CHAT", msg],
CreateRoom(name, None) => msg!["CREATE_ROOM", name],
CreateRoom(name, Some(password)) => msg!["CREATE_ROOM", name, password],
JoinRoom(name, None) => msg!["JOIN_ROOM", name],
JoinRoom(name, Some(password)) => msg!["JOIN_ROOM", name, password],
Follow(name) => msg!["FOLLOW", name],
Rnd(args) => {
if args.is_empty() {
msg!["CMD", "RND"]
} else {
msg!["CMD", format!("RND {}", args.join(" "))]
}
}
Kick(name) => msg!["KICK", name],
Ban(name, reason, time) => msg!["BAN", name, reason, time],
BanIp(ip, reason, time) => msg!["BAN_IP", ip, reason, time],
BanNick(nick, reason, time) => msg!("BAN_NICK", nick, reason, time),
BanList => msg!["BANLIST"],
Unban(name) => msg!["UNBAN", name],
SetServerVar(var) => construct_message(&["SET_SERVER_VAR"], &var.to_protocol()),
GetServerVar => msg!["GET_SERVER_VAR"],
RestartServer => msg!["CMD", "RESTART_SERVER YES"],
Stats => msg!["CMD", "STATS"],
Part(None) => msg!["PART"],
Part(Some(msg)) => msg!["PART", msg],
Cfg(config) => {
let (name, args) = config.to_protocol();
msg!["CFG", name, args.join("\n")]
}
AddTeam(info) => msg![
"ADD_TEAM",
info.name,
info.color,
info.grave,
info.fort,
info.voice_pack,
info.flag,
info.difficulty,
info.hedgehogs
.iter()
.flat_map(|h| several![&h.name[..], &h.hat[..]])
.collect::<Vec<_>>()
.join("\n")
],
RemoveTeam(name) => msg!["REMOVE_TEAM", name],
SetHedgehogsNumber(team, number) => msg!["HH_NUM", team, number],
SetTeamColor(team, color) => msg!["TEAM_COLOR", team, color],
ToggleReady => msg!["TOGGLE_READY"],
StartGame => msg!["START_GAME"],
EngineMessage(msg) => msg!["EM", msg],
RoundFinished => msg!["ROUNDFINISHED"],
ToggleRestrictJoin => msg!["TOGGLE_RESTRICT_JOINS"],
ToggleRestrictTeams => msg!["TOGGLE_RESTRICT_TEAMS"],
ToggleRegisteredOnly => msg!["TOGGLE_REGISTERED_ONLY"],
RoomName(name) => msg!["ROOM_NAME", name],
Delegate(name) => msg!["CMD", format!("DELEGATE {}", name)],
TeamChat(msg) => msg!["TEAMCHAT", msg],
MaxTeams(count) => msg!["CMD", format!("MAXTEAMS {}", count)],
Fix => msg!["CMD", "FIX"],
Unfix => msg!["CMD", "UNFIX"],
Greeting(None) => msg!["CMD", "GREETING"],
Greeting(Some(msg)) => msg!["CMD", format!("GREETING {}", msg)],
CallVote(None) => msg!["CMD", "CALLVOTE"],
CallVote(Some(vote)) => {
msg!["CMD", format!("CALLVOTE {}", &vote.to_protocol().join(" "))]
}
Vote(msg) => msg!["CMD", format!("VOTE {}", if *msg { "YES" } else { "NO" })],
ForceVote(msg) => msg!["CMD", format!("FORCE {}", if *msg { "YES" } else { "NO" })],
Save(name, location) => msg!["CMD", format!("SAVE {} {}", name, location)],
Delete(name) => msg!["CMD", format!("DELETE {}", name)],
SaveRoom(name) => msg!["CMD", format!("SAVEROOM {}", name)],
LoadRoom(name) => msg!["CMD", format!("LOADROOM {}", name)],
_ => panic!("Protocol message not yet implemented"),
}
}
}
fn construct_message(header: &[&str], msg: &[String]) -> String {
let mut v: Vec<_> = header.iter().cloned().collect();
v.extend(msg.iter().map(|s| &s[..]));
v.push("\n");
v.join("\n")
}
impl HwServerMessage {
pub fn to_raw_protocol(&self) -> String {
use self::HwServerMessage::*;
match self {
Ping => msg!["PING"],
Pong => msg!["PONG"],
Connected(protocol_version) => msg![
"CONNECTED",
"Hedgewars server https://www.hedgewars.org/",
protocol_version
],
Redirect(port) => msg!["REDIRECT", port],
Bye(msg) => msg!["BYE", msg],
Nick(nick) => msg!["NICK", nick],
Proto(proto) => msg!["PROTO", proto],
AskPassword(salt) => msg!["ASKPASSWORD", salt],
ServerAuth(hash) => msg!["SERVER_AUTH", hash],
LobbyLeft(nick, msg) => msg!["LOBBY:LEFT", nick, msg],
LobbyJoined(nicks) => construct_message(&["LOBBY:JOINED"], &nicks),
ClientFlags(flags, nicks) => construct_message(&["CLIENT_FLAGS", flags], &nicks),
Rooms(info) => construct_message(&["ROOMS"], &info),
RoomAdd(info) => construct_message(&["ROOM", "ADD"], &info),
RoomJoined(nicks) => construct_message(&["JOINED"], &nicks),
RoomLeft(nick, msg) => msg!["LEFT", nick, msg],
RoomRemove(name) => msg!["ROOM", "DEL", name],
RoomUpdated(name, info) => construct_message(&["ROOM", "UPD", name], &info),
Joining(name) => msg!["JOINING", name],
TeamAdd(info) => construct_message(&["ADD_TEAM"], &info),
TeamRemove(name) => msg!["REMOVE_TEAM", name],
TeamAccepted(name) => msg!["TEAM_ACCEPTED", name],
TeamColor(name, color) => msg!["TEAM_COLOR", name, color],
HedgehogsNumber(name, number) => msg!["HH_NUM", name, number],
ConfigEntry(name, values) => construct_message(&["CFG", name], &values),
Kicked => msg!["KICKED"],
RunGame => msg!["RUN_GAME"],
ForwardEngineMessage(em) => construct_message(&["EM"], &em),
RoundFinished => msg!["ROUND_FINISHED"],
ChatMsg { nick, msg } => msg!["CHAT", nick, msg],
Info(info) => construct_message(&["INFO"], &info),
ServerMessage(msg) => msg!["SERVER_MESSAGE", msg],
ServerVars(vars) => construct_message(&["SERVER_VARS"], &vars),
Notice(msg) => msg!["NOTICE", msg],
Warning(msg) => msg!["WARNING", msg],
Error(msg) => msg!["ERROR", msg],
ReplayStart => msg!["REPLAY_START"],
LegacyReady(is_ready, nicks) => {
construct_message(&[if *is_ready { "READY" } else { "NOT_READY" }], &nicks)
}
_ => msg!["ERROR", "UNIMPLEMENTED"],
}
}
}
| 34.330233 | 96 | 0.522964 |
e8308714841ca498013a797e93d6fd86db1e815a
| 5,325 |
#![feature(test, array_methods)]
extern crate test;
use std::collections::HashMap;
use test::{black_box, Bencher};
#[derive(Debug, Clone, PartialEq)]
pub enum Json {
Null,
Bool(bool),
Str(String),
Num(f64),
Array(Vec<Json>),
Object(HashMap<String, Json>),
}
static JSON: &'static [u8] = include_bytes!("sample.json");
#[bench]
fn chumsky(b: &mut Bencher) {
use ::chumsky::prelude::*;
let json = chumsky::json();
b.iter(|| black_box(json.parse(JSON).unwrap()));
}
#[bench]
fn pom(b: &mut Bencher) {
let json = pom::json();
b.iter(|| black_box(json.parse(JSON).unwrap()));
}
mod chumsky {
use chumsky::{error::Cheap, prelude::*};
use super::Json;
use std::{collections::HashMap, str};
pub fn json() -> impl Parser<u8, Json, Error = Cheap<u8>> {
recursive(|value| {
let frac = just(b'.').chain(text::digits(10));
let exp = just(b'e')
.or(just(b'E'))
.ignore_then(just(b'+').or(just(b'-')).or_not())
.chain(text::digits(10));
let number = just(b'-')
.or_not()
.chain(text::int(10))
.chain(frac.or_not().flatten())
.chain::<u8, _, _>(exp.or_not().flatten())
.map(|bytes| str::from_utf8(&bytes.as_slice()).unwrap().parse().unwrap());
let escape = just(b'\\').ignore_then(
just(b'\\')
.or(just(b'/'))
.or(just(b'"'))
.or(just(b'b').to(b'\x08'))
.or(just(b'f').to(b'\x0C'))
.or(just(b'n').to(b'\n'))
.or(just(b'r').to(b'\r'))
.or(just(b't').to(b'\t')),
);
let string = just(b'"')
.ignore_then(filter(|c| *c != b'\\' && *c != b'"').or(escape).repeated())
.then_ignore(just(b'"'))
.map(|bytes| String::from_utf8(bytes).unwrap());
let array = value
.clone()
.chain(just(b',').ignore_then(value.clone()).repeated())
.or_not()
.flatten()
.delimited_by(b'[', b']')
.map(Json::Array);
let member = string.then_ignore(just(b':').padded()).then(value);
let object = member
.clone()
.chain(just(b',').padded().ignore_then(member).repeated())
.or_not()
.flatten()
.padded()
.delimited_by(b'{', b'}')
.collect::<HashMap<String, Json>>()
.map(Json::Object);
just(b"null")
.to(Json::Null)
.or(just(b"true").to(Json::Bool(true)))
.or(just(b"false").to(Json::Bool(false)))
.or(number.map(Json::Num))
.or(string.map(Json::Str))
.or(array)
.or(object)
.padded()
})
.then_ignore(end())
}
}
mod pom {
use pom::parser::*;
use pom::Parser;
use super::Json;
use std::{
collections::HashMap,
str::{self, FromStr},
};
fn space() -> Parser<u8, ()> {
one_of(b" \t\r\n").repeat(0..).discard()
}
fn number() -> Parser<u8, f64> {
let integer = one_of(b"123456789") - one_of(b"0123456789").repeat(0..) | sym(b'0');
let frac = sym(b'.') + one_of(b"0123456789").repeat(1..);
let exp = one_of(b"eE") + one_of(b"+-").opt() + one_of(b"0123456789").repeat(1..);
let number = sym(b'-').opt() + integer + frac.opt() + exp.opt();
number
.collect()
.convert(str::from_utf8)
.convert(|s| f64::from_str(&s))
}
fn string() -> Parser<u8, String> {
let special_char = sym(b'\\')
| sym(b'/')
| sym(b'"')
| sym(b'b').map(|_| b'\x08')
| sym(b'f').map(|_| b'\x0C')
| sym(b'n').map(|_| b'\n')
| sym(b'r').map(|_| b'\r')
| sym(b't').map(|_| b'\t');
let escape_sequence = sym(b'\\') * special_char;
let string = sym(b'"') * (none_of(b"\\\"") | escape_sequence).repeat(0..) - sym(b'"');
string.convert(String::from_utf8)
}
fn array() -> Parser<u8, Vec<Json>> {
let elems = list(call(value), sym(b',') * space());
sym(b'[') * space() * elems - sym(b']')
}
fn object() -> Parser<u8, HashMap<String, Json>> {
let member = string() - space() - sym(b':') - space() + call(value);
let members = list(member, sym(b',') * space());
let obj = sym(b'{') * space() * members - sym(b'}');
obj.map(|members| members.into_iter().collect::<HashMap<_, _>>())
}
fn value() -> Parser<u8, Json> {
(seq(b"null").map(|_| Json::Null)
| seq(b"true").map(|_| Json::Bool(true))
| seq(b"false").map(|_| Json::Bool(false))
| number().map(|num| Json::Num(num))
| string().map(|text| Json::Str(text))
| array().map(|arr| Json::Array(arr))
| object().map(|obj| Json::Object(obj)))
- space()
}
pub fn json() -> Parser<u8, Json> {
space() * value() - end()
}
}
| 31.140351 | 94 | 0.452394 |
ebd29730211de15710fd3c93b99f011d526fe5ba
| 1,426 |
#![cfg(feature = "parser")]
use std::str::FromStr;
use chrono::*;
use icalendar::{
parser::{read_calendar, unfold},
Calendar, Class, Component, Event, Property, Todo,
};
fn main() {
let event = Event::new()
.summary("test event")
.description("here I have something really important to do")
.starts(Utc::now())
.class(Class::Confidential)
.ends(Utc::now() + Duration::days(1))
.append_property(
Property::new("TEST", "FOOBAR")
.add_parameter("IMPORTANCE", "very")
.add_parameter("DUE", "tomorrow")
.done(),
)
.uid("my.own.id")
.done();
let todo = Todo::new().summary("Buy some milk").done();
let mut built_calendar = Calendar::new();
built_calendar.push(event);
built_calendar.push(todo);
// lets write this as **rfc5545**
let ical = built_calendar.to_string();
// and now lets parser it again
let from_parsed = Calendar::from_str(&ical).unwrap();
println!("{}", &ical); // print what we built
println!("{}", from_parsed); // print what parsed
println!("{:#?}", built_calendar); // inner representation of what we built
println!("{:#?}", from_parsed); // inner representation of what we built and then parsed
println!("{:#?}", read_calendar(&unfold(&ical)).unwrap()); // inner presentation of the parser's data structure
}
| 32.409091 | 115 | 0.593268 |
09b4067b1d017eea505bb87ed7caf2f47eb940e4
| 3,588 |
use std::{
convert::TryFrom,
fmt::{Debug, Display, Error, Formatter},
str::Utf8Error
};
use arrayvec::ArrayVec;
use ash::vk::{
MemoryHeap,
MemoryType,
PhysicalDeviceLimits,
PhysicalDeviceSparseProperties,
PhysicalDeviceType
};
use crate::util::{fmt::VkVersion, string::VkSmallString};
vk_result_error! {
#[derive(Debug)]
pub enum EnumerateError {
vk {
ERROR_OUT_OF_HOST_MEMORY,
ERROR_OUT_OF_DEVICE_MEMORY
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct DeviceExtensionProperties {
pub extension_name: VkSmallString,
pub spec_version: VkVersion
}
impl TryFrom<ash::vk::ExtensionProperties> for DeviceExtensionProperties {
type Error = std::str::Utf8Error;
fn try_from(value: ash::vk::ExtensionProperties) -> Result<Self, Self::Error> {
Ok(DeviceExtensionProperties {
extension_name: VkSmallString::try_from(value.extension_name)?,
spec_version: VkVersion(value.spec_version)
})
}
}
impl Display for DeviceExtensionProperties {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{} {}", self.extension_name, self.spec_version)
}
}
vk_result_error! {
#[derive(Debug)]
pub enum ImageFormatPropertiesError {
vk {
ERROR_OUT_OF_HOST_MEMORY,
ERROR_OUT_OF_DEVICE_MEMORY,
ERROR_FORMAT_NOT_SUPPORTED
}
}
}
#[derive(Debug, Clone)] // TODO: arrayvec isn't copy
pub struct PhysicalDeviceMemoryProperties {
pub memory_types: ArrayVec<[MemoryType; 32]>,
pub memory_heaps: ArrayVec<[MemoryHeap; 16]>
}
impl From<ash::vk::PhysicalDeviceMemoryProperties> for PhysicalDeviceMemoryProperties {
fn from(value: ash::vk::PhysicalDeviceMemoryProperties) -> Self {
let mut memory_types = ArrayVec::from(value.memory_types);
unsafe {
memory_types.set_len(value.memory_type_count as usize);
}
let mut memory_heaps = ArrayVec::from(value.memory_heaps);
unsafe {
memory_heaps.set_len(value.memory_heap_count as usize);
}
PhysicalDeviceMemoryProperties {
memory_types,
memory_heaps
}
}
}
pub struct PhysicalDeviceProperties {
pub api_version: VkVersion,
pub driver_version: VkVersion,
pub vendor_id: u32,
pub device_id: u32,
pub device_type: PhysicalDeviceType,
pub device_name: VkSmallString,
pub pipeline_cache_uuid: [u8; 16],
pub limits: PhysicalDeviceLimits,
pub sparse_properties: PhysicalDeviceSparseProperties
}
impl TryFrom<ash::vk::PhysicalDeviceProperties> for PhysicalDeviceProperties {
type Error = Utf8Error;
fn try_from(value: ash::vk::PhysicalDeviceProperties) -> Result<Self, Self::Error> {
Ok(PhysicalDeviceProperties {
api_version: VkVersion(value.api_version),
driver_version: VkVersion(value.driver_version),
vendor_id: value.vendor_id,
device_id: value.device_id,
device_type: value.device_type,
device_name: VkSmallString::try_from(value.device_name)?,
pipeline_cache_uuid: value.pipeline_cache_uuid,
limits: value.limits,
sparse_properties: value.sparse_properties
})
}
}
impl Debug for PhysicalDeviceProperties {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_struct("PhysicalDeviceProperties")
.field("api_version", &self.api_version)
.field("driver_version", &self.driver_version)
.field("vendor_id", &format_args!("0x{:x}", self.vendor_id))
.field("device_id", &format_args!("0x{:x}", self.device_id))
.field("device_type", &self.device_type)
.field("device_name", &self.device_name)
.field(
"pipeline_cache_uuid",
&crate::util::fmt::format_uuid(self.pipeline_cache_uuid)
)
.field("limits", &self.limits)
.field("sparse_properties", &self.sparse_properties)
.finish()
}
}
| 27.389313 | 87 | 0.744705 |
4ab7b2ca9e790080393d165a009b6a634eb46fc6
| 1,201 |
#[derive(Serialize, Deserialize, Default, Clone, Copy, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct CommitmentConfig {
pub commitment: CommitmentLevel,
}
impl CommitmentConfig {
pub fn recent() -> Self {
Self {
commitment: CommitmentLevel::Recent,
}
}
pub fn max() -> Self {
Self {
commitment: CommitmentLevel::Max,
}
}
pub fn root() -> Self {
Self {
commitment: CommitmentLevel::Root,
}
}
pub fn single() -> Self {
Self {
commitment: CommitmentLevel::Single,
}
}
pub fn single_gossip() -> Self {
Self {
commitment: CommitmentLevel::SingleGossip,
}
}
pub fn ok(self) -> Option<Self> {
if self == Self::default() {
None
} else {
Some(self)
}
}
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[serde(rename_all = "camelCase")]
pub enum CommitmentLevel {
Max,
Recent,
Root,
Single,
SingleGossip,
}
impl Default for CommitmentLevel {
fn default() -> Self {
Self::Max
}
}
| 19.370968 | 74 | 0.533722 |
bb51db305f0a8a19860e80ae67554d1b01dc77e0
| 1,067 |
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
macro_rules! color_text {
($text:expr, $color:expr) => {{
format_args!("\x1b[{}m{}\x1b[0m", $color, $text)
}};
}
#[no_mangle]
pub fn main() -> i32 {
println!(
"{}{}{}{}{} {}{}{}{} {}{}{}{}{}{}",
color_text!("H", 31),
color_text!("e", 32),
color_text!("l", 33),
color_text!("l", 34),
color_text!("o", 35),
color_text!("R", 36),
color_text!("u", 37),
color_text!("s", 90),
color_text!("t", 91),
color_text!("u", 92),
color_text!("C", 93),
color_text!("o", 94),
color_text!("r", 95),
color_text!("e", 96),
color_text!("!", 97),
);
let text =
"reguler \x1b[4munderline\x1b[24m \x1b[7mreverse\x1b[27m \x1b[9mstrikethrough\x1b[29m";
println!("\x1b[47m{}\x1b[0m", color_text!(text, 30));
for i in 31..38 {
println!("{}", color_text!(text, i));
}
for i in 90..98 {
println!("{}", color_text!(text, i));
}
0
}
| 24.25 | 95 | 0.47329 |
f43b07db12592abe2eea93286d5a8b8a6ddab20b
| 9,566 |
use {
crate::blockstore::Blockstore,
crossbeam_channel::{bounded, unbounded},
log::*,
solana_measure::measure::Measure,
solana_sdk::clock::Slot,
std::{
cmp::{max, min},
collections::HashSet,
result::Result,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, Instant},
},
};
#[derive(Clone)]
pub struct ConfirmedBlockUploadConfig {
pub force_reupload: bool,
pub max_num_slots_to_check: usize,
pub num_blocks_to_upload_in_parallel: usize,
pub block_read_ahead_depth: usize, // should always be >= `num_blocks_to_upload_in_parallel`
}
impl Default for ConfirmedBlockUploadConfig {
fn default() -> Self {
let num_blocks_to_upload_in_parallel = num_cpus::get() / 2;
ConfirmedBlockUploadConfig {
force_reupload: false,
max_num_slots_to_check: num_blocks_to_upload_in_parallel * 4,
num_blocks_to_upload_in_parallel,
block_read_ahead_depth: num_blocks_to_upload_in_parallel * 2,
}
}
}
struct BlockstoreLoadStats {
pub num_blocks_read: usize,
pub elapsed: Duration,
}
pub async fn upload_confirmed_blocks(
blockstore: Arc<Blockstore>,
bigtable: solana_storage_bigtable::LedgerStorage,
starting_slot: Slot,
ending_slot: Slot,
config: ConfirmedBlockUploadConfig,
exit: Arc<AtomicBool>,
) -> Result<Slot, Box<dyn std::error::Error>> {
let mut measure = Measure::start("entire upload");
info!("Loading ledger slots starting at {}...", starting_slot);
let blockstore_slots: Vec<_> = blockstore
.rooted_slot_iterator(starting_slot)
.map_err(|err| {
format!(
"Failed to load entries starting from slot {}: {:?}",
starting_slot, err
)
})?
.map_while(|slot| (slot <= ending_slot).then(|| slot))
.collect();
if blockstore_slots.is_empty() {
return Err(format!(
"Ledger has no slots from {} to {:?}",
starting_slot, ending_slot
)
.into());
}
let first_blockstore_slot = blockstore_slots.first().unwrap();
let last_blockstore_slot = blockstore_slots.last().unwrap();
info!(
"Found {} slots in the range ({}, {})",
blockstore_slots.len(),
first_blockstore_slot,
last_blockstore_slot,
);
// Gather the blocks that are already present in bigtable, by slot
let bigtable_slots = if !config.force_reupload {
let mut bigtable_slots = vec![];
info!(
"Loading list of bigtable blocks between slots {} and {}...",
first_blockstore_slot, last_blockstore_slot
);
let mut start_slot = *first_blockstore_slot;
while start_slot <= *last_blockstore_slot {
let mut next_bigtable_slots = loop {
let num_bigtable_blocks = min(1000, config.max_num_slots_to_check * 2);
match bigtable
.get_confirmed_blocks(start_slot, num_bigtable_blocks)
.await
{
Ok(slots) => break slots,
Err(err) => {
error!("get_confirmed_blocks for {} failed: {:?}", start_slot, err);
// Consider exponential backoff...
tokio::time::sleep(Duration::from_secs(2)).await;
}
}
};
if next_bigtable_slots.is_empty() {
break;
}
bigtable_slots.append(&mut next_bigtable_slots);
start_slot = bigtable_slots.last().unwrap() + 1;
}
bigtable_slots
.into_iter()
.filter(|slot| slot <= last_blockstore_slot)
.collect::<Vec<_>>()
} else {
Vec::new()
};
// The blocks that still need to be uploaded is the difference between what's already in the
// bigtable and what's in blockstore...
let blocks_to_upload = {
let blockstore_slots = blockstore_slots.iter().cloned().collect::<HashSet<_>>();
let bigtable_slots = bigtable_slots.into_iter().collect::<HashSet<_>>();
let mut blocks_to_upload = blockstore_slots
.difference(&bigtable_slots)
.cloned()
.collect::<Vec<_>>();
blocks_to_upload.sort_unstable();
blocks_to_upload.truncate(config.max_num_slots_to_check);
blocks_to_upload
};
if blocks_to_upload.is_empty() {
info!("No blocks need to be uploaded to bigtable");
return Ok(*last_blockstore_slot);
}
let last_slot = *blocks_to_upload.last().unwrap();
info!(
"{} blocks to be uploaded to the bucket in the range ({}, {})",
blocks_to_upload.len(),
blocks_to_upload.first().unwrap(),
last_slot
);
// Distribute the blockstore reading across a few background threads to speed up the bigtable uploading
let (loader_threads, receiver): (Vec<_>, _) = {
let exit = exit.clone();
let (sender, receiver) = bounded(config.block_read_ahead_depth);
let (slot_sender, slot_receiver) = unbounded();
blocks_to_upload
.into_iter()
.for_each(|b| slot_sender.send(b).unwrap());
drop(slot_sender);
(
(0..config.num_blocks_to_upload_in_parallel)
.map(|_| {
let blockstore = blockstore.clone();
let sender = sender.clone();
let slot_receiver = slot_receiver.clone();
let exit = exit.clone();
std::thread::spawn(move || {
let start = Instant::now();
let mut num_blocks_read = 0;
while let Ok(slot) = slot_receiver.recv() {
if exit.load(Ordering::Relaxed) {
break;
}
let _ = match blockstore.get_rooted_block(slot, true) {
Ok(confirmed_block) => {
num_blocks_read += 1;
sender.send((slot, Some(confirmed_block)))
}
Err(err) => {
warn!(
"Failed to get load confirmed block from slot {}: {:?}",
slot, err
);
sender.send((slot, None))
}
};
}
BlockstoreLoadStats {
num_blocks_read,
elapsed: start.elapsed(),
}
})
})
.collect(),
receiver,
)
};
let mut failures = 0;
use futures::stream::StreamExt;
let mut stream =
tokio_stream::iter(receiver.into_iter()).chunks(config.num_blocks_to_upload_in_parallel);
while let Some(blocks) = stream.next().await {
if exit.load(Ordering::Relaxed) {
break;
}
let mut measure_upload = Measure::start("Upload");
let mut num_blocks = blocks.len();
info!("Preparing the next {} blocks for upload", num_blocks);
let uploads = blocks.into_iter().filter_map(|(slot, block)| match block {
None => {
num_blocks -= 1;
None
}
Some(confirmed_block) => {
let bt = bigtable.clone();
Some(tokio::spawn(async move {
bt.upload_confirmed_block(slot, confirmed_block).await
}))
}
});
for result in futures::future::join_all(uploads).await {
if let Err(err) = result {
error!("upload_confirmed_block() join failed: {:?}", err);
failures += 1;
} else if let Err(err) = result.unwrap() {
error!("upload_confirmed_block() upload failed: {:?}", err);
failures += 1;
}
}
measure_upload.stop();
info!("{} for {} blocks", measure_upload, num_blocks);
}
measure.stop();
info!("{}", measure);
let blockstore_results = loader_threads.into_iter().map(|t| t.join());
let mut blockstore_num_blocks_read = 0;
let mut blockstore_load_wallclock = Duration::default();
let mut blockstore_errors = 0;
for r in blockstore_results {
match r {
Ok(stats) => {
blockstore_num_blocks_read += stats.num_blocks_read;
blockstore_load_wallclock = max(stats.elapsed, blockstore_load_wallclock);
}
Err(e) => {
error!("error joining blockstore thread: {:?}", e);
blockstore_errors += 1;
}
}
}
info!(
"blockstore upload took {:?} for {} blocks ({:.2} blocks/s) errors: {}",
blockstore_load_wallclock,
blockstore_num_blocks_read,
blockstore_num_blocks_read as f64 / blockstore_load_wallclock.as_secs_f64(),
blockstore_errors
);
if failures > 0 {
Err(format!("Incomplete upload, {} operations failed", failures).into())
} else {
Ok(last_slot)
}
}
| 34.164286 | 107 | 0.529688 |
d541c907abc6f8e16d740b565491067632f6dc70
| 5,594 |
extern crate genet_sdk;
use genet_sdk::{cast, decoder::*, prelude::*};
use std::collections::{BTreeMap, HashMap};
#[derive(Debug)]
struct Stream {
pub id: u64,
pub seq: i64,
pub len: usize,
offset: usize,
slices: BTreeMap<usize, ByteSlice>,
}
impl Stream {
fn new(id: u64) -> Stream {
return Stream {
id: id,
seq: -1,
len: 0,
offset: 0,
slices: BTreeMap::new(),
};
}
fn put(&mut self, start: usize, data: ByteSlice) {
if data.len() > 0 {
self.slices.insert(start, data);
let mut end = self.offset;
for (start, slice) in self.slices.iter_mut() {
if *start < end {}
end = *start + slice.len();
}
}
}
fn fetch(&mut self) -> impl Iterator<Item = ByteSlice> {
let mut slices = Vec::new();
loop {
let pos;
if let Some((key, value)) = self.slices.iter().next() {
pos = *key;
if *key == self.offset {
self.offset += value.len();
slices.push(value.clone());
} else {
break;
}
} else {
break;
}
self.slices.remove(&pos);
}
slices.into_iter()
}
}
struct TcpStreamWorker {
map: HashMap<(ByteSlice, ByteSlice, u32, u32), Stream>,
}
impl TcpStreamWorker {
fn new() -> TcpStreamWorker {
TcpStreamWorker {
map: HashMap::new(),
}
}
}
impl Worker for TcpStreamWorker {
fn decode(
&mut self,
_ctx: &mut Context,
stack: &LayerStack,
parent: &mut Parent,
) -> Result<Status> {
if parent.id() == token!("tcp") {
let slice: ByteSlice = parent
.payloads()
.iter()
.find(|p| p.id() == token!("@data:tcp"))
.unwrap()
.data();
let stream_id = {
let parent_src: ByteSlice = stack
.attr(token!("_.src"))
.unwrap()
.try_get(parent)?
.try_into()?;
let parent_dst: ByteSlice = stack
.attr(token!("_.dst"))
.unwrap()
.try_get(parent)?
.try_into()?;
let src: u32 = parent
.attr(token!("tcp.src"))
.unwrap()
.try_get(parent)?
.try_into()?;
let dst: u32 = parent
.attr(token!("tcp.dst"))
.unwrap()
.try_get(parent)?
.try_into()?;
(parent_src, parent_dst, src, dst)
};
let id = self.map.len();
let stream = self
.map
.entry(stream_id)
.or_insert_with(|| Stream::new(id as u64));
let seq: u32 = parent
.attr(token!("tcp.seq"))
.unwrap()
.try_get(parent)?
.try_into()?;
let window: u16 = parent
.attr(token!("tcp.window"))
.unwrap()
.try_get(parent)?
.try_into()?;
let flags: u8 = parent
.attr(token!("tcp.flags"))
.unwrap()
.try_get(parent)?
.try_into()?;
let syn = (flags & (0x1 << 1)) != 0;
if syn {
if stream.seq < 0 {
let offset = stream.len;
stream.seq = seq as i64;
stream.len += slice.len();
stream.put(offset, slice);
}
} else if stream.seq >= 0 {
if slice.len() > 0 {
if seq >= stream.seq as u32 {
let offset = stream.len + (seq - stream.seq as u32) as usize;
stream.seq = seq as i64;
stream.len += slice.len();
stream.put(offset, slice);
} else if stream.seq - seq as i64 > window as i64 {
let offset = stream.len
+ ((std::u32::MAX as u32 - stream.seq as u32) + seq) as usize;
stream.seq = seq as i64;
stream.len += slice.len();
stream.put(offset, slice);
}
} else if (stream.seq + 1) % std::u32::MAX as i64 == seq as i64 {
stream.seq = seq as i64;
}
}
let payloads = stream.fetch();
for payload in payloads {
parent.add_payload(Payload::new(payload, "@stream:tcp"));
}
parent.add_attr(attr!(&STREAM_ATTR));
Ok(Status::Done)
} else {
Ok(Status::Skip)
}
}
}
#[derive(Clone)]
struct TcpStreamDecoder {}
impl Decoder for TcpStreamDecoder {
fn new_worker(&self, _ctx: &Context) -> Box<Worker> {
Box::new(TcpStreamWorker::new())
}
fn metadata(&self) -> Metadata {
Metadata {
exec_type: ExecType::SerialSync,
..Metadata::default()
}
}
}
def_attr_class!(STREAM_ATTR, "tcp.stream",
typ: "@novalue",
cast: cast::UInt8().map(|v| v)
);
genet_decoders!(TcpStreamDecoder {});
| 28.984456 | 90 | 0.417054 |
1c4555b7471d1fcc75fbbe9588e99a206f3a6de4
| 8,579 |
use std::fmt;
use serde::{
de::{Error, MapAccess, SeqAccess, Visitor},
Deserialize, Deserializer,
};
use crate::{
de,
value::{Map, Number, Value},
};
impl std::str::FromStr for Value {
type Err = de::Error;
/// Creates a value from a string reference.
fn from_str(s: &str) -> de::Result<Self> {
let mut de = super::Deserializer::from_str(s)?;
let val = Value::deserialize(&mut de)?;
de.end()?;
Ok(val)
}
}
impl<'de> Deserialize<'de> for Value {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(ValueVisitor)
}
}
struct ValueVisitor;
impl<'de> Visitor<'de> for ValueVisitor {
type Value = Value;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "a RON value")
}
fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Bool(v))
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Number(Number::new(v)))
}
#[cfg(feature = "integer128")]
fn visit_i128<E>(self, v: i128) -> Result<Self::Value, E>
where
E: Error,
{
self.visit_f64(v as f64)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Number(Number::new(v)))
}
#[cfg(feature = "integer128")]
fn visit_u128<E>(self, v: u128) -> Result<Self::Value, E>
where
E: Error,
{
self.visit_f64(v as f64)
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Number(Number::new(v)))
}
fn visit_char<E>(self, v: char) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Char(v))
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: Error,
{
self.visit_string(v.to_owned())
}
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::String(v))
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: Error,
{
self.visit_byte_buf(v.to_vec())
}
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: Error,
{
self.visit_string(String::from_utf8(v).map_err(|e| Error::custom(format!("{}", e)))?)
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Option(None))
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
Ok(Value::Option(Some(Box::new(
deserializer.deserialize_any(ValueVisitor)?,
))))
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Value::Unit)
}
fn visit_newtype_struct<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(ValueVisitor)
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vec = Vec::new();
if let Some(cap) = seq.size_hint() {
vec.reserve_exact(cap);
}
while let Some(x) = seq.next_element()? {
vec.push(x);
}
Ok(Value::Seq(vec))
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut res: Map = Map::new();
while let Some(entry) = map.next_entry()? {
res.insert(entry.0, entry.1);
}
Ok(Value::Map(res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
fn eval(s: &str) -> Value {
s.parse().expect("Failed to parse")
}
#[test]
fn test_none() {
assert_eq!(eval("None"), Value::Option(None));
}
#[test]
fn test_some() {
assert_eq!(eval("Some(())"), Value::Option(Some(Box::new(Value::Unit))));
assert_eq!(
eval("Some ( () )"),
Value::Option(Some(Box::new(Value::Unit)))
);
}
#[test]
fn test_tuples_basic() {
assert_eq!(
eval("(3, 4.0, 5.0)"),
Value::Seq(vec![
Value::Number(Number::new(3)),
Value::Number(Number::new(4.0)),
Value::Number(Number::new(5.0)),
],),
);
}
#[test]
fn test_tuples_ident() {
assert_eq!(
eval("(true, 3, 4, 5.0)"),
Value::Seq(vec![
Value::Bool(true),
Value::Number(Number::new(3)),
Value::Number(Number::new(4)),
Value::Number(Number::new(5.0)),
]),
);
}
#[test]
fn test_tuples_error() {
use crate::de::{Error, ErrorCode, Position};
assert_eq!(
Value::from_str("Foo:").unwrap_err(),
Error {
code: ErrorCode::TrailingCharacters,
position: Position { col: 4, line: 1 }
},
);
}
#[test]
fn test_floats() {
assert_eq!(
eval("(inf, -inf, NaN)"),
Value::Seq(vec![
Value::Number(Number::new(std::f64::INFINITY)),
Value::Number(Number::new(std::f64::NEG_INFINITY)),
Value::Number(Number::new(std::f64::NAN)),
]),
);
}
#[test]
fn test_complex() {
assert_eq!(
eval(
"Some([
Room ( width: 20, height: 5, name: \"The Room\" ),
(
width: 10.0,
height: 10.0,
name: \"Another room\",
enemy_levels: {
\"Enemy1\": 3,
\"Enemy2\": 5,
\"Enemy3\": 7,
},
),
])"
),
Value::Option(Some(Box::new(Value::Seq(vec![
Value::Map(
vec![
(
Value::String("width".to_owned()),
Value::Number(Number::new(20)),
),
(
Value::String("height".to_owned()),
Value::Number(Number::new(5)),
),
(
Value::String("name".to_owned()),
Value::String("The Room".to_owned()),
),
]
.into_iter()
.collect(),
),
Value::Map(
vec![
(
Value::String("width".to_owned()),
Value::Number(Number::new(10.0)),
),
(
Value::String("height".to_owned()),
Value::Number(Number::new(10.0)),
),
(
Value::String("name".to_owned()),
Value::String("Another room".to_owned()),
),
(
Value::String("enemy_levels".to_owned()),
Value::Map(
vec![
(
Value::String("Enemy1".to_owned()),
Value::Number(Number::new(3)),
),
(
Value::String("Enemy2".to_owned()),
Value::Number(Number::new(5)),
),
(
Value::String("Enemy3".to_owned()),
Value::Number(Number::new(7)),
),
]
.into_iter()
.collect(),
),
),
]
.into_iter()
.collect(),
),
]))))
);
}
}
| 25.381657 | 93 | 0.409721 |
edf9ba2c41a15ea46b86e526b05e0e4e67171cf6
| 972 |
// Ensure that a `#[track_caller]` function, returning `caller_location()`,
// which coerced (to a function pointer) and called, inside a `const fn`,
// in turn called, results in the same output irrespective of whether
// we're in a const or runtime context.
// run-pass
// compile-flags: -Z unleash-the-miri-inside-of-you
#![feature(core_intrinsics, const_caller_location, track_caller, const_fn)]
type L = &'static std::panic::Location<'static>;
#[track_caller]
const fn attributed() -> L {
std::intrinsics::caller_location()
}
const fn calling_attributed() -> L {
// We need `-Z unleash-the-miri-inside-of-you` for this as we don't have `const fn` pointers.
let ptr: fn() -> L = attributed;
ptr()
}
fn main() {
const CONSTANT: L = calling_attributed();
let runtime = calling_attributed();
assert_eq!(
(runtime.file(), runtime.line(), runtime.column()),
(CONSTANT.file(), CONSTANT.line(), CONSTANT.column()),
);
}
| 29.454545 | 97 | 0.670782 |
8f2714c2e401f4e4cfa4901e3c41ded66e668edf
| 4,341 |
use super::cli_types::{Address, LiveCell, LiveCellInfo, LiveCellInfoVec};
use super::util::handle_cmd;
use ckb_tool::ckb_types::{core::Capacity, packed::*};
use log::{debug, trace};
use std::collections::HashSet;
use std::process::Command;
pub struct Collector {
locked_cells: HashSet<OutPoint>,
ckb_cli_bin: String,
api_uri: String,
}
impl Collector {
pub fn new(api_uri: String, ckb_cli_bin: String) -> Self {
Collector {
locked_cells: HashSet::default(),
api_uri,
ckb_cli_bin,
}
}
pub fn lock_cell(&mut self, out_point: OutPoint) {
self.locked_cells.insert(out_point);
}
pub fn is_live_cell_locked(&self, live_cell: &LiveCell) -> bool {
self.locked_cells.contains(&live_cell.out_point())
}
pub fn collect_live_cells(&self, address: Address, capacity: Capacity) -> HashSet<LiveCell> {
const BLOCKS_IN_BATCH: u64 = 1000;
const LIMIT: u64 = 2000;
let tip_number = self.get_tip_block_number();
debug!(
"collect live cells: target {} address {} tip_number {}",
capacity, address, tip_number
);
let mut live_cells = HashSet::new();
let mut collected_capacity = 0;
for i in 0.. {
let from = i * BLOCKS_IN_BATCH;
if from > tip_number {
panic!(
"can't find enough live cells, found {} CKB expected {} CKB",
collected_capacity, capacity
);
}
let to = (i + 1) * BLOCKS_IN_BATCH;
let cells = self.get_live_cells_by_lock_hash(address.clone(), from, to, LIMIT);
trace!("get cells: from {} to {} cells {:?}", from, to, cells.len());
if cells.is_empty() {
continue;
}
let iter = cells
.into_iter()
.filter(|cell| cell.data_bytes == 0 && cell.type_hashes.is_none());
for cell in iter {
let cell: LiveCell = cell.into();
// cell is in use, but not yet committed
if self.is_live_cell_locked(&cell) {
continue;
}
let cell_capacity = cell.capacity;
if !live_cells.insert(cell) {
// skip collected cell
continue;
}
collected_capacity += cell_capacity;
if collected_capacity > capacity.as_u64() {
break;
}
}
if collected_capacity > capacity.as_u64() {
break;
}
}
live_cells
}
fn get_tip_block_number(&self) -> u64 {
let output = handle_cmd(
Command::new(&self.ckb_cli_bin)
.arg("--url")
.arg(&self.api_uri)
.arg("rpc")
.arg("--wait-for-sync")
.arg("get_tip_block_number")
.arg("--output-format")
.arg("json")
.output()
.expect("run cmd"),
)
.expect("run cmd error");
let tip_block_number: u64 = serde_json::from_slice(&output).expect("parse resp");
tip_block_number
}
fn get_live_cells_by_lock_hash(
&self,
address: Address,
from: u64,
to: u64,
limit: u64,
) -> Vec<LiveCellInfo> {
let output = handle_cmd(
Command::new(&self.ckb_cli_bin)
.arg("--url")
.arg(&self.api_uri)
.arg("wallet")
.arg("--wait-for-sync")
.arg("get-live-cells")
.arg("--address")
.arg(address.display_with_network(address.network()))
.arg("--from")
.arg(format!("{}", from))
.arg("--to")
.arg(format!("{}", to))
.arg("--limit")
.arg(format!("{}", limit))
.arg("--output-format")
.arg("json")
.output()
.expect("run cmd"),
)
.expect("run cmd error");
let resp: LiveCellInfoVec = serde_json::from_slice(&output).expect("parse resp");
resp.live_cells
}
}
| 32.886364 | 97 | 0.489519 |
0a889a0999d65c8e4456cb158fd8755214780a20
| 1,535 |
// Copyright 2016 Jonathan Anderson <[email protected]>
//
// This software was developed by BAE Systems, the University of Cambridge
// Computer Laboratory, and Memorial University under DARPA/AFRL contract
// FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
// (TC) research program.
//
// Licensed under the Apache License, Version 2.0,
// <LICENSE-APACHE or http://apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate byteorder;
mod error;
mod header;
mod record;
pub use error::*;
pub use header::*;
pub use record::*;
use std::io;
use std::io::ErrorKind::UnexpectedEof;
type Result<T> = std::result::Result<T, self::Error>;
pub fn parse<E>(mut r: &mut io::Read) -> Result<Vec<(Header,Result<Record>)>>
where E: byteorder::ByteOrder
{
let mut v = Vec::new();
loop {
let mut data = [0; 56];
match r.read_exact(&mut data) {
Err(ref e) if e.kind() == UnexpectedEof => {
break;
},
Err(e) => { return Err(Error::IO(e)); },
Ok(()) => {},
};
let header = try![Header::parse(&data)];
let mut data = vec![0; header.length];
try![r.read_exact(&mut data).map_err(Error::IO)];
let record = Record::parse::<E>(&data, &header.record_type);
v.push((header, record));
}
Ok(v)
}
| 26.929825 | 77 | 0.619544 |
21438f4814eeedf8b93818dbf5619729fc8616df
| 13,468 |
use combine::{
choice, error::ParseError, not_followed_by, parser::char::string, try, Parser, Stream,
};
use tokens::{ident_part, Token};
#[derive(Debug, PartialEq, Clone)]
/// A JS Keyword
///
/// #Standard
/// await
/// break
/// case
/// catch
/// class
/// const
/// continue
/// debugger
/// default
/// delete (10)
/// do
/// else
/// export
/// extends
/// finally
/// for
/// function
/// if
/// import
/// in (20)
/// instanceof
/// new
/// return
/// super
/// switch
/// this
/// throw
/// try
/// typeof
/// var (30)
/// void
/// while
/// with
/// yield
/// # Future Reserved
/// enum
/// # Strict Mode Future Reserved
/// implements
/// package
/// protected
/// interface
/// private (40)
/// public
pub enum Keyword {
Await,
Break,
Case,
Catch,
Class,
Const,
Continue,
Debugger,
Default,
Delete, //10
Do,
Else,
Enum,
Export,
Finally,
For,
Function,
If,
Implements,
Import,
In,
InstanceOf,
Interface,
Let,
New,
Package,
Private,
Protected,
Public,
Return,
Static,
Super,
Switch,
This,
Throw,
Try,
TypeOf,
Var,
Void,
While,
With,
Yield,
}
impl<'a> From<&'a str> for Keyword {
/// convert a &str into a Keyword
///
/// panics if invalid keyword
fn from(s: &'a str) -> Self {
match s {
"await" => Keyword::Await,
"break" => Keyword::Break,
"case" => Keyword::Case,
"catch" => Keyword::Catch,
"class" => Keyword::Class,
"const" => Keyword::Const,
"continue" => Keyword::Continue,
"debugger" => Keyword::Debugger,
"default" => Keyword::Default,
"delete" => Keyword::Delete,
"do" => Keyword::Do,
"else" => Keyword::Else,
"finally" => Keyword::Finally,
"for" => Keyword::For,
"function" => Keyword::Function,
"if" => Keyword::If,
"instanceof" => Keyword::InstanceOf,
"in" => Keyword::In,
"new" => Keyword::New,
"return" => Keyword::Return,
"switch" => Keyword::Switch,
"this" => Keyword::This,
"throw" => Keyword::Throw,
"try" => Keyword::Try,
"typeof" => Keyword::TypeOf,
"var" => Keyword::Var,
"void" => Keyword::Void,
"while" => Keyword::While,
"with" => Keyword::With,
"export" => Keyword::Export,
"import" => Keyword::Import,
"super" => Keyword::Super,
"enum" => Keyword::Enum,
"implements" => Keyword::Implements,
"interface" => Keyword::Interface,
"package" => Keyword::Package,
"private" => Keyword::Private,
"protected" => Keyword::Protected,
"public" => Keyword::Public,
"static" => Keyword::Static,
"yield" => Keyword::Yield,
"let" => Keyword::Let,
_ => panic!("Unknown Keyword, `{}`", s),
}
}
}
impl From<String> for Keyword {
/// converts from a String to a Keyword
///
/// panics if an invalid keyword
fn from(s: String) -> Self {
Self::from(s.as_str())
}
}
impl ::std::string::ToString for Keyword {
/// Convert a keyword into a string
fn to_string(&self) -> String {
match self {
Keyword::Await => "await",
Keyword::Break => "break",
Keyword::Case => "case",
Keyword::Catch => "catch",
Keyword::Class => "class",
Keyword::Const => "const",
Keyword::Continue => "continue",
Keyword::Debugger => "debugger",
Keyword::Default => "default",
Keyword::Import => "import",
Keyword::Delete => "delete",
Keyword::Do => "do",
Keyword::Else => "else",
Keyword::Enum => "enum",
Keyword::Export => "export",
Keyword::Finally => "finally",
Keyword::For => "for",
Keyword::Function => "function",
Keyword::If => "if",
Keyword::In => "in",
Keyword::Implements => "implements",
Keyword::InstanceOf => "instanceof",
Keyword::Interface => "interface",
Keyword::Let => "let",
Keyword::New => "new",
Keyword::Package => "package",
Keyword::Private => "private",
Keyword::Protected => "protected",
Keyword::Public => "public",
Keyword::Static => "static",
Keyword::Return => "return",
Keyword::Super => "super",
Keyword::Switch => "switch",
Keyword::This => "this",
Keyword::Throw => "throw",
Keyword::Try => "try",
Keyword::TypeOf => "typeof",
Keyword::Var => "var",
Keyword::Void => "void",
Keyword::While => "while",
Keyword::With => "with",
Keyword::Yield => "yield",
}.into()
}
}
impl Keyword {
/// Is this keyword one of the future reserved words
///
/// - enum
/// - export
/// - implements
/// - super
pub fn is_future_reserved(&self) -> bool {
match self {
Keyword::Enum => true,
Keyword::Export => true,
Keyword::Implements => true,
Keyword::Super => true,
_ => false,
}
}
/// Is this keyword a reserved word when the context
/// has a 'use strict' directive.
///
/// ## Keywords
/// - implements
/// - interface
/// - package
/// - private
/// - protected
/// - public
/// - static
/// - yield
/// - let
pub fn is_strict_reserved(&self) -> bool {
match self {
Keyword::Implements => true,
Keyword::Interface => true,
Keyword::Package => true,
Keyword::Private => true,
Keyword::Protected => true,
Keyword::Public => true,
Keyword::Static => true,
Keyword::Yield => true,
Keyword::Let => true,
_ => false,
}
}
/// Is this keyword a reserved word
///
/// ## Keywords
/// - break
/// - case
/// - catch
/// - continue
/// - debugger
/// - default
/// - delete
/// - do
/// - else
/// - for
/// - function
/// - if
/// - instanceof
/// - in
/// - new
/// - return
/// - switch
/// - this
/// - throw
/// - try
/// - typeof
/// - var
/// - void
/// - while
/// - with
pub fn is_reserved(&self) -> bool {
match self {
Keyword::Break => true,
Keyword::Case => true,
Keyword::Catch => true,
Keyword::Continue => true,
Keyword::Debugger => true,
Keyword::Default => true,
Keyword::Delete => true,
Keyword::Do => true,
Keyword::Else => true,
Keyword::Finally => true,
Keyword::For => true,
Keyword::Function => true,
Keyword::If => true,
Keyword::InstanceOf => true,
Keyword::In => true,
Keyword::New => true,
Keyword::Return => true,
Keyword::Switch => true,
Keyword::This => true,
Keyword::Throw => true,
Keyword::Try => true,
Keyword::TypeOf => true,
Keyword::Var => true,
Keyword::Void => true,
Keyword::While => true,
Keyword::With => true,
_ => false,
}
}
}
/// generate a parser that will return an instance of Token::Keyword on success
pub(crate) fn literal<I>() -> impl Parser<Input = I, Output = Token>
where
I: Stream<Item = char>,
I::Error: ParseError<I::Item, I::Range, I::Position>,
{
choice((
try(future_reserved()),
try(strict_mode_reserved()),
try(reserved()),
)).skip(not_followed_by(ident_part()))
.map(|t| t)
}
/// generate a parser that will return a Token::Keyword with in finds
/// one of the reserved keywords
/// ## Keywords
/// - break
/// - case
/// - catch
/// - continue
/// - debugger
/// - default
/// - delete
/// - do
/// - else
/// - for
/// - function
/// - if
/// - instanceof
/// - in
/// - new
/// - return
/// - switch
/// - this
/// - throw
/// - try
/// - typeof
/// - var
/// - void
/// - while
/// - with
pub(crate) fn reserved<I>() -> impl Parser<Input = I, Output = Token>
where
I: Stream<Item = char>,
I::Error: ParseError<I::Item, I::Range, I::Position>,
{
choice([
try(string("await")),
try(string("break")),
try(string("case")),
try(string("catch")),
try(string("class")),
try(string("const")),
try(string("continue")),
try(string("debugger")),
try(string("default")),
try(string("delete")),
try(string("do")),
try(string("else")),
try(string("finally")),
try(string("for")),
try(string("function")),
try(string("if")),
try(string("instanceof")),
try(string("in")),
try(string("new")),
try(string("return")),
try(string("switch")),
try(string("this")),
try(string("throw")),
try(string("try")),
try(string("typeof")),
try(string("var")),
try(string("void")),
try(string("while")),
try(string("with")),
]).map(|t| Token::Keyword(Keyword::from(t.to_owned())))
}
/// Generate a parser that will return an instance of Token::Keyword when one of the
/// future reserved words are found
///
/// ## Keywords
/// - export
/// - import
/// - super
/// - enum
pub(crate) fn future_reserved<I>() -> impl Parser<Input = I, Output = Token>
where
I: Stream<Item = char>,
I::Error: ParseError<I::Item, I::Range, I::Position>,
{
choice((
try(string("export")),
try(string("import")),
try(string("super")),
try(string("enum")),
)).map(|t| Token::Keyword(Keyword::from(t)))
}
/// Generate a parser that will return an instance of Token::Keyword when a
/// strict mode reserved word is found
///
/// ##Keywords
/// - implements
/// - interface
/// - package
/// - private
/// - protected
/// - public
/// - static
/// - yield
/// - let
pub(crate) fn strict_mode_reserved<I>() -> impl Parser<Input = I, Output = Token>
where
I: Stream<Item = char>,
I::Error: ParseError<I::Item, I::Range, I::Position>,
{
choice((
try(string("implements")),
try(string("interface")),
try(string("package")),
try(string("private")),
try(string("protected")),
try(string("public")),
try(string("static")),
try(string("yield")),
try(string("let")),
)).map(|t| Token::Keyword(Keyword::from(t)))
}
#[cfg(test)]
mod test {
use super::*;
use tokens::{token, Token};
#[test]
fn future_reserved() {
let keywords = ["enum", "export", "import", "super"];
for keyword in keywords.iter() {
let k = token().parse(keyword.clone()).unwrap();
assert_eq!(k, (Token::keyword(*keyword), ""))
}
match super::future_reserved().parse("junk") {
Ok(k) => panic!("parsed junk as {:?}", k),
Err(_) => (),
}
}
#[test]
fn strict_mode_reserved() {
let keywords = [
"implements",
"interface",
"package",
"private",
"protected",
"public",
"static",
"yield",
"let",
];
for keyword in keywords.iter() {
let k = token().parse(keyword.clone()).unwrap();
assert_eq!(k, (Token::keyword(*keyword), ""));
}
match super::strict_mode_reserved().parse("junk") {
Ok(k) => panic!("parsed junk as {:?}", k),
Err(_) => (),
}
}
#[test]
fn reserved_keywords() {
let keys = vec![
"break",
"case",
"catch",
"continue",
"debugger",
"default",
"delete",
"do",
"else",
"finally",
"for",
"function",
"if",
"instanceof",
"in",
"new",
"return",
"switch",
"this",
"throw",
"try",
"typeof",
"var",
"void",
"while",
"with",
];
for key in keys {
let k = token().parse(key.clone()).unwrap();
assert_eq!(k, (Token::keyword(key), ""));
}
}
proptest! {
#[test]
fn keyword_prop(s in r#"await|break|case|catch|class|const|continue|debugger|default|import|delete|do|else|enum|export|finally|for|function|if|in|implements|instanceof|interface|let|new|package|private|protected|public|static|return|super|switch|this|throw|try|typeof|var|void|while|with|yield"#) {
let r = token().easy_parse(s.as_str()).unwrap();
assert!(r.0.is_keyword() && r.0.matches_keyword_str(&s));
}
}
}
| 25.604563 | 306 | 0.478022 |
0a13aeabe8452f3574d5f1f558898df8937ea64a
| 6,153 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Windows-specific extensions to the primitives in the `std::ffi` module.
//!
//! # Overview
//!
//! For historical reasons, the Windows API uses a form of potentially
//! ill-formed UTF-16 encoding for strings. Specifically, the 16-bit
//! code units in Windows strings may contain [isolated surrogate code
//! points which are not paired together][ill-formed-utf-16]. The
//! Unicode standard requires that surrogate code points (those in the
//! range U+D800 to U+DFFF) always be *paired*, because in the UTF-16
//! encoding a *surrogate code unit pair* is used to encode a single
//! character. For compatibility with code that does not enforce
//! these pairings, Windows does not enforce them, either.
//!
//! While it is not always possible to convert such a string losslessly into
//! a valid UTF-16 string (or even UTF-8), it is often desirable to be
//! able to round-trip such a string from and to Windows APIs
//! losslessly. For example, some Rust code may be "bridging" some
//! Windows APIs together, just passing `WCHAR` strings among those
//! APIs without ever really looking into the strings.
//!
//! If Rust code *does* need to look into those strings, it can
//! convert them to valid UTF-8, possibly lossily, by substituting
//! invalid sequences with [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], as is
//! conventionally done in other Rust APIs that deal with string
//! encodings.
//!
//! # `OsStringExt` and `OsStrExt`
//!
//! [`OsString`] is the Rust wrapper for owned strings in the
//! preferred representation of the operating system. On Windows,
//! this struct gets augmented with an implementation of the
//! [`OsStringExt`] trait, which has a [`from_wide`] method. This
//! lets you create an [`OsString`] from a `&[u16]` slice; presumably
//! you get such a slice out of a `WCHAR` Windows API.
//!
//! Similarly, [`OsStr`] is the Rust wrapper for borrowed strings from
//! preferred representation of the operating system. On Windows, the
//! [`OsStrExt`] trait provides the [`encode_wide`] method, which
//! outputs an [`EncodeWide`] iterator. You can [`collect`] this
//! iterator, for example, to obtain a `Vec<u16>`; you can later get a
//! pointer to this vector's contents and feed it to Windows APIs.
//!
//! These traits, along with [`OsString`] and [`OsStr`], work in
//! conjunction so that it is possible to **round-trip** strings from
//! Windows and back, with no loss of data, even if the strings are
//! ill-formed UTF-16.
//!
//! [ill-formed-utf-16]: https://simonsapin.github.io/wtf-8/#ill-formed-utf-16
//! [`OsString`]: ../../../ffi/struct.OsString.html
//! [`OsStr`]: ../../../ffi/struct.OsStr.html
//! [`OsStringExt`]: trait.OsStringExt.html
//! [`OsStrExt`]: trait.OsStrExt.html
//! [`EncodeWide`]: struct.EncodeWide.html
//! [`from_wide`]: trait.OsStringExt.html#tymethod.from_wide
//! [`encode_wide`]: trait.OsStrExt.html#tymethod.encode_wide
//! [`collect`]: ../../../iter/trait.Iterator.html#method.collect
//! [U+FFFD]: ../../../char/constant.REPLACEMENT_CHARACTER.html
#![stable(feature = "rust1", since = "1.0.0")]
use ffi::{OsString, OsStr};
use sys::os_str::Buf;
use sys_common::wtf8::Wtf8Buf;
use sys_common::{FromInner, AsInner};
#[stable(feature = "rust1", since = "1.0.0")]
pub use sys_common::wtf8::EncodeWide;
/// Windows-specific extensions to [`OsString`].
///
/// [`OsString`]: ../../../../std/ffi/struct.OsString.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait OsStringExt {
/// Creates an `OsString` from a potentially ill-formed UTF-16 slice of
/// 16-bit code units.
///
/// This is lossless: calling [`encode_wide`] on the resulting string
/// will always return the original code units.
///
/// # Examples
///
/// ```
/// use std::ffi::OsString;
/// use std::os::windows::prelude::*;
///
/// // UTF-16 encoding for "Unicode".
/// let source = [0x0055, 0x006E, 0x0069, 0x0063, 0x006F, 0x0064, 0x0065];
///
/// let string = OsString::from_wide(&source[..]);
/// ```
///
/// [`encode_wide`]: ./trait.OsStrExt.html#tymethod.encode_wide
#[stable(feature = "rust1", since = "1.0.0")]
fn from_wide(wide: &[u16]) -> Self;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl OsStringExt for OsString {
fn from_wide(wide: &[u16]) -> OsString {
FromInner::from_inner(Buf { inner: Wtf8Buf::from_wide(wide) })
}
}
/// Windows-specific extensions to [`OsStr`].
///
/// [`OsStr`]: ../../../../std/ffi/struct.OsStr.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait OsStrExt {
/// Re-encodes an `OsStr` as a wide character sequence, i.e., potentially
/// ill-formed UTF-16.
///
/// This is lossless: calling [`OsString::from_wide`] and then
/// `encode_wide` on the result will yield the original code units.
/// Note that the encoding does not add a final null terminator.
///
/// # Examples
///
/// ```
/// use std::ffi::OsString;
/// use std::os::windows::prelude::*;
///
/// // UTF-16 encoding for "Unicode".
/// let source = [0x0055, 0x006E, 0x0069, 0x0063, 0x006F, 0x0064, 0x0065];
///
/// let string = OsString::from_wide(&source[..]);
///
/// let result: Vec<u16> = string.encode_wide().collect();
/// assert_eq!(&source[..], &result[..]);
/// ```
///
/// [`OsString::from_wide`]: ./trait.OsStringExt.html#tymethod.from_wide
#[stable(feature = "rust1", since = "1.0.0")]
fn encode_wide(&self) -> EncodeWide;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl OsStrExt for OsStr {
fn encode_wide(&self) -> EncodeWide {
self.as_inner().inner.encode_wide()
}
}
| 40.215686 | 78 | 0.656265 |
e8e4066de249937b005278cefce772104f7e61b9
| 33,654 |
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
extern crate anyhow;
extern crate arc_swap;
extern crate hypervisor;
extern crate option_parser;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate tempfile;
extern crate url;
extern crate vmm_sys_util;
#[cfg(test)]
#[macro_use]
extern crate credibility;
use crate::api::{ApiError, ApiRequest, ApiResponse, ApiResponsePayload, VmInfo, VmmPingResponse};
use crate::config::{
DeviceConfig, DiskConfig, FsConfig, NetConfig, PmemConfig, RestoreConfig, VmConfig, VsockConfig,
};
use crate::migration::{get_vm_snapshot, recv_vm_snapshot};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::vm::{Error as VmError, Vm, VmState};
use libc::EFD_NONBLOCK;
use seccomp::{SeccompAction, SeccompFilter};
use serde::ser::{Serialize, SerializeStruct, Serializer};
use std::fs::File;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::PathBuf;
use std::sync::mpsc::{Receiver, RecvError, SendError, Sender};
use std::sync::{Arc, Mutex};
use std::{result, thread};
use vm_migration::{Pausable, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd;
pub mod api;
pub mod config;
pub mod cpu;
pub mod device_manager;
pub mod device_tree;
pub mod interrupt;
pub mod memory_manager;
pub mod migration;
pub mod seccomp_filters;
pub mod vm;
#[cfg(feature = "acpi")]
mod acpi;
/// Errors associated with VMM management
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub enum Error {
/// API request receive error
ApiRequestRecv(RecvError),
/// API response send error
ApiResponseSend(SendError<ApiResponse>),
/// Cannot bind to the UNIX domain socket path
Bind(io::Error),
/// Cannot clone EventFd.
EventFdClone(io::Error),
/// Cannot create EventFd.
EventFdCreate(io::Error),
/// Cannot read from EventFd.
EventFdRead(io::Error),
/// Cannot create epoll context.
Epoll(io::Error),
/// Cannot create HTTP thread
HttpThreadSpawn(io::Error),
/// Cannot handle the VM STDIN stream
Stdin(VmError),
/// Cannot reboot the VM
VmReboot(VmError),
/// Cannot shut a VM down
VmShutdown(VmError),
/// Cannot create VMM thread
VmmThreadSpawn(io::Error),
/// Cannot shut the VMM down
VmmShutdown(VmError),
// Error following "exe" link
ExePathReadLink(io::Error),
/// Cannot create seccomp filter
CreateSeccompFilter(seccomp::SeccompError),
/// Cannot apply seccomp filter
ApplySeccompFilter(seccomp::Error),
}
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum EpollDispatch {
Exit,
Reset,
Stdin,
Api,
}
pub struct EpollContext {
epoll_file: File,
dispatch_table: Vec<Option<EpollDispatch>>,
}
impl EpollContext {
pub fn new() -> result::Result<EpollContext, io::Error> {
let epoll_fd = epoll::create(true)?;
// Use 'File' to enforce closing on 'epoll_fd'
let epoll_file = unsafe { File::from_raw_fd(epoll_fd) };
// Initial capacity needs to be large enough to hold:
// * 1 exit event
// * 1 reset event
// * 1 stdin event
// * 1 API event
let mut dispatch_table = Vec::with_capacity(5);
dispatch_table.push(None);
Ok(EpollContext {
epoll_file,
dispatch_table,
})
}
pub fn add_stdin(&mut self) -> result::Result<(), io::Error> {
let dispatch_index = self.dispatch_table.len() as u64;
epoll::ctl(
self.epoll_file.as_raw_fd(),
epoll::ControlOptions::EPOLL_CTL_ADD,
libc::STDIN_FILENO,
epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
)?;
self.dispatch_table.push(Some(EpollDispatch::Stdin));
Ok(())
}
fn add_event<T>(&mut self, fd: &T, token: EpollDispatch) -> result::Result<(), io::Error>
where
T: AsRawFd,
{
let dispatch_index = self.dispatch_table.len() as u64;
epoll::ctl(
self.epoll_file.as_raw_fd(),
epoll::ControlOptions::EPOLL_CTL_ADD,
fd.as_raw_fd(),
epoll::Event::new(epoll::Events::EPOLLIN, dispatch_index),
)?;
self.dispatch_table.push(Some(token));
Ok(())
}
}
impl AsRawFd for EpollContext {
fn as_raw_fd(&self) -> RawFd {
self.epoll_file.as_raw_fd()
}
}
pub struct PciDeviceInfo {
pub id: String,
pub bdf: u32,
}
impl Serialize for PciDeviceInfo {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// Transform the PCI b/d/f into a standardized string.
let segment = (self.bdf >> 16) & 0xffff;
let bus = (self.bdf >> 8) & 0xff;
let device = (self.bdf >> 3) & 0x1f;
let function = self.bdf & 0x7;
let bdf_str = format!(
"{:04x}:{:02x}:{:02x}.{:01x}",
segment, bus, device, function
);
// Serialize the structure.
let mut state = serializer.serialize_struct("PciDeviceInfo", 2)?;
state.serialize_field("id", &self.id)?;
state.serialize_field("bdf", &bdf_str)?;
state.end()
}
}
pub fn start_vmm_thread(
vmm_version: String,
http_path: &str,
api_event: EventFd,
api_sender: Sender<ApiRequest>,
api_receiver: Receiver<ApiRequest>,
seccomp_action: &SeccompAction,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
) -> Result<thread::JoinHandle<Result<()>>> {
let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?;
// Retrieve seccomp filter
let vmm_seccomp_filter =
get_seccomp_filter(seccomp_action, Thread::Vmm).map_err(Error::CreateSeccompFilter)?;
// Find the path that the "/proc/<pid>/exe" symlink points to. Must be done before spawning
// a thread as Rust does not put the child threads in the same thread group which prevents the
// link from being followed as per PTRACE_MODE_READ_FSCREDS (see proc(5) and ptrace(2)). The
// alternative is to run always with CAP_SYS_PTRACE but that is not a good idea.
let self_path = format!("/proc/{}/exe", std::process::id());
let vmm_path = std::fs::read_link(PathBuf::from(self_path)).map_err(Error::ExePathReadLink)?;
let vmm_seccomp_action = seccomp_action.clone();
let thread = thread::Builder::new()
.name("vmm".to_string())
.spawn(move || {
// Apply seccomp filter for VMM thread.
SeccompFilter::apply(vmm_seccomp_filter).map_err(Error::ApplySeccompFilter)?;
let mut vmm = Vmm::new(
vmm_version.to_string(),
api_event,
vmm_path,
vmm_seccomp_action,
hypervisor,
)?;
vmm.control_loop(Arc::new(api_receiver))
})
.map_err(Error::VmmThreadSpawn)?;
// The VMM thread is started, we can start serving HTTP requests
api::start_http_thread(http_path, http_api_event, api_sender, seccomp_action)?;
Ok(thread)
}
pub struct Vmm {
epoll: EpollContext,
exit_evt: EventFd,
reset_evt: EventFd,
api_evt: EventFd,
version: String,
vm: Option<Vm>,
vm_config: Option<Arc<Mutex<VmConfig>>>,
vmm_path: PathBuf,
seccomp_action: SeccompAction,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
}
impl Vmm {
fn new(
vmm_version: String,
api_evt: EventFd,
vmm_path: PathBuf,
seccomp_action: SeccompAction,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
) -> Result<Self> {
let mut epoll = EpollContext::new().map_err(Error::Epoll)?;
let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
let reset_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
if unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0 {
epoll.add_stdin().map_err(Error::Epoll)?;
}
epoll
.add_event(&exit_evt, EpollDispatch::Exit)
.map_err(Error::Epoll)?;
epoll
.add_event(&reset_evt, EpollDispatch::Reset)
.map_err(Error::Epoll)?;
epoll
.add_event(&api_evt, EpollDispatch::Api)
.map_err(Error::Epoll)?;
Ok(Vmm {
epoll,
exit_evt,
reset_evt,
api_evt,
version: vmm_version,
vm: None,
vm_config: None,
vmm_path,
seccomp_action,
hypervisor,
})
}
fn vm_boot(&mut self) -> result::Result<(), VmError> {
// Create a new VM is we don't have one yet.
if self.vm.is_none() {
let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
if let Some(ref vm_config) = self.vm_config {
let vm = Vm::new(
Arc::clone(vm_config),
exit_evt,
reset_evt,
self.vmm_path.clone(),
&self.seccomp_action,
self.hypervisor.clone(),
)?;
self.vm = Some(vm);
}
}
// Now we can boot the VM.
if let Some(ref mut vm) = self.vm {
vm.boot()
} else {
Err(VmError::VmNotCreated)
}
}
fn vm_pause(&mut self) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
vm.pause().map_err(VmError::Pause)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_resume(&mut self) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
vm.resume().map_err(VmError::Resume)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_snapshot(&mut self, destination_url: &str) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
vm.snapshot()
.map_err(VmError::Snapshot)
.and_then(|snapshot| {
vm.send(&snapshot, destination_url)
.map_err(VmError::SnapshotSend)
})
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_restore(&mut self, restore_cfg: RestoreConfig) -> result::Result<(), VmError> {
if self.vm.is_some() || self.vm_config.is_some() {
return Err(VmError::VmAlreadyCreated);
}
let source_url = restore_cfg.source_url.as_path().to_str();
if source_url.is_none() {
return Err(VmError::RestoreSourceUrlPathToStr);
}
// Safe to unwrap as we checked it was Some(&str).
let source_url = source_url.unwrap();
let snapshot = recv_vm_snapshot(source_url).map_err(VmError::Restore)?;
let vm_snapshot = get_vm_snapshot(&snapshot).map_err(VmError::Restore)?;
self.vm_config = Some(Arc::clone(&vm_snapshot.config));
let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
let vm = Vm::new_from_snapshot(
&snapshot,
exit_evt,
reset_evt,
self.vmm_path.clone(),
source_url,
restore_cfg.prefault,
&self.seccomp_action,
self.hypervisor.clone(),
)?;
self.vm = Some(vm);
// Now we can restore the rest of the VM.
if let Some(ref mut vm) = self.vm {
vm.restore(snapshot).map_err(VmError::Restore)
} else {
Err(VmError::VmNotCreated)
}
}
fn vm_shutdown(&mut self) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm.take() {
vm.shutdown()
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_reboot(&mut self) -> result::Result<(), VmError> {
// Without ACPI, a reset is equivalent to a shutdown
#[cfg(not(feature = "acpi"))]
{
if self.vm.is_some() {
self.exit_evt.write(1).unwrap();
return Ok(());
}
}
// First we stop the current VM and create a new one.
if let Some(ref mut vm) = self.vm {
let config = vm.get_config();
self.vm_shutdown()?;
let exit_evt = self.exit_evt.try_clone().map_err(VmError::EventFdClone)?;
let reset_evt = self.reset_evt.try_clone().map_err(VmError::EventFdClone)?;
// The Linux kernel fires off an i8042 reset after doing the ACPI reset so there may be
// an event sitting in the shared reset_evt. Without doing this we get very early reboots
// during the boot process.
if self.reset_evt.read().is_ok() {
warn!("Spurious second reset event received. Ignoring.");
}
self.vm = Some(Vm::new(
config,
exit_evt,
reset_evt,
self.vmm_path.clone(),
&self.seccomp_action,
self.hypervisor.clone(),
)?);
}
// Then we start the new VM.
if let Some(ref mut vm) = self.vm {
vm.boot()?;
} else {
return Err(VmError::VmNotCreated);
}
Ok(())
}
fn vm_info(&self) -> result::Result<VmInfo, VmError> {
match &self.vm_config {
Some(config) => {
let state = match &self.vm {
Some(vm) => vm.get_state()?,
None => VmState::Created,
};
Ok(VmInfo {
config: Arc::clone(config),
state,
})
}
None => Err(VmError::VmNotCreated),
}
}
fn vmm_ping(&self) -> result::Result<VmmPingResponse, ApiError> {
Ok(VmmPingResponse {
version: self.version.clone(),
})
}
fn vm_delete(&mut self) -> result::Result<(), VmError> {
if self.vm_config.is_none() {
return Ok(());
}
// If a VM is booted, we first try to shut it down.
if self.vm.is_some() {
self.vm_shutdown()?;
}
self.vm_config = None;
Ok(())
}
fn vmm_shutdown(&mut self) -> result::Result<(), VmError> {
self.vm_delete()
}
fn vm_resize(
&mut self,
desired_vcpus: Option<u8>,
desired_ram: Option<u64>,
desired_ram_w_balloon: Option<u64>,
) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
if let Err(e) = vm.resize(desired_vcpus, desired_ram, desired_ram_w_balloon) {
error!("Error when resizing VM: {:?}", e);
Err(e)
} else {
Ok(())
}
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_resize_zone(&mut self, id: String, desired_ram: u64) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
if let Err(e) = vm.resize_zone(id, desired_ram) {
error!("Error when resizing VM: {:?}", e);
Err(e)
} else {
Ok(())
}
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_device(&mut self, device_cfg: DeviceConfig) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.add_device(device_cfg).map_err(|e| {
error!("Error when adding new device to the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_remove_device(&mut self, id: String) -> result::Result<(), VmError> {
if let Some(ref mut vm) = self.vm {
if let Err(e) = vm.remove_device(id) {
error!("Error when removing new device to the VM: {:?}", e);
Err(e)
} else {
Ok(())
}
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_disk(&mut self, disk_cfg: DiskConfig) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.add_disk(disk_cfg).map_err(|e| {
error!("Error when adding new disk to the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_fs(&mut self, fs_cfg: FsConfig) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.add_fs(fs_cfg).map_err(|e| {
error!("Error when adding new fs to the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_pmem(&mut self, pmem_cfg: PmemConfig) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.add_pmem(pmem_cfg).map_err(|e| {
error!("Error when adding new pmem device to the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_net(&mut self, net_cfg: NetConfig) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.add_net(net_cfg).map_err(|e| {
error!("Error when adding new network device to the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_add_vsock(&mut self, vsock_cfg: VsockConfig) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.add_vsock(vsock_cfg).map_err(|e| {
error!("Error when adding new vsock device to the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn vm_counters(&mut self) -> result::Result<Vec<u8>, VmError> {
if let Some(ref mut vm) = self.vm {
let info = vm.counters().map_err(|e| {
error!("Error when getting counters from the VM: {:?}", e);
e
})?;
serde_json::to_vec(&info).map_err(VmError::SerializeJson)
} else {
Err(VmError::VmNotRunning)
}
}
fn control_loop(&mut self, api_receiver: Arc<Receiver<ApiRequest>>) -> Result<()> {
const EPOLL_EVENTS_LEN: usize = 100;
let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN];
let epoll_fd = self.epoll.as_raw_fd();
'outer: loop {
let num_events = match epoll::wait(epoll_fd, -1, &mut events[..]) {
Ok(res) => res,
Err(e) => {
if e.kind() == io::ErrorKind::Interrupted {
// It's well defined from the epoll_wait() syscall
// documentation that the epoll loop can be interrupted
// before any of the requested events occurred or the
// timeout expired. In both those cases, epoll_wait()
// returns an error of type EINTR, but this should not
// be considered as a regular error. Instead it is more
// appropriate to retry, by calling into epoll_wait().
continue;
}
return Err(Error::Epoll(e));
}
};
for event in events.iter().take(num_events) {
let dispatch_idx = event.data as usize;
if let Some(dispatch_type) = self.epoll.dispatch_table[dispatch_idx] {
match dispatch_type {
EpollDispatch::Exit => {
// Consume the event.
self.exit_evt.read().map_err(Error::EventFdRead)?;
self.vmm_shutdown().map_err(Error::VmmShutdown)?;
break 'outer;
}
EpollDispatch::Reset => {
// Consume the event.
self.reset_evt.read().map_err(Error::EventFdRead)?;
self.vm_reboot().map_err(Error::VmReboot)?;
}
EpollDispatch::Stdin => {
if let Some(ref vm) = self.vm {
vm.handle_stdin().map_err(Error::Stdin)?;
}
}
EpollDispatch::Api => {
// Consume the event.
self.api_evt.read().map_err(Error::EventFdRead)?;
// Read from the API receiver channel
let api_request = api_receiver.recv().map_err(Error::ApiRequestRecv)?;
match api_request {
ApiRequest::VmCreate(config, sender) => {
// We only store the passed VM config.
// The VM will be created when being asked to boot it.
let response = if self.vm_config.is_none() {
self.vm_config = Some(config);
Ok(ApiResponsePayload::Empty)
} else {
Err(ApiError::VmAlreadyCreated)
};
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmDelete(sender) => {
let response = self
.vm_delete()
.map_err(ApiError::VmDelete)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmBoot(sender) => {
// If we don't have a config, we can not boot a VM.
if self.vm_config.is_none() {
sender
.send(Err(ApiError::VmMissingConfig))
.map_err(Error::ApiResponseSend)?;
continue;
}
let response = self
.vm_boot()
.map_err(ApiError::VmBoot)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmShutdown(sender) => {
let response = self
.vm_shutdown()
.map_err(ApiError::VmShutdown)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmReboot(sender) => {
let response = self
.vm_reboot()
.map_err(ApiError::VmReboot)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmInfo(sender) => {
let response = self
.vm_info()
.map_err(ApiError::VmInfo)
.map(ApiResponsePayload::VmInfo);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmmPing(sender) => {
let response = self.vmm_ping().map(ApiResponsePayload::VmmPing);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmPause(sender) => {
let response = self
.vm_pause()
.map_err(ApiError::VmPause)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmResume(sender) => {
let response = self
.vm_resume()
.map_err(ApiError::VmResume)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmSnapshot(snapshot_data, sender) => {
let response = self
.vm_snapshot(&snapshot_data.destination_url)
.map_err(ApiError::VmSnapshot)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmRestore(restore_data, sender) => {
let response = self
.vm_restore(restore_data.as_ref().clone())
.map_err(ApiError::VmRestore)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmmShutdown(sender) => {
let response = self
.vmm_shutdown()
.map_err(ApiError::VmmShutdown)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
break 'outer;
}
ApiRequest::VmResize(resize_data, sender) => {
let response = self
.vm_resize(
resize_data.desired_vcpus,
resize_data.desired_ram,
resize_data.desired_ram_w_balloon,
)
.map_err(ApiError::VmResize)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmResizeZone(resize_zone_data, sender) => {
let response = self
.vm_resize_zone(
resize_zone_data.id.clone(),
resize_zone_data.desired_ram,
)
.map_err(ApiError::VmResizeZone)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddDevice(add_device_data, sender) => {
let response = self
.vm_add_device(add_device_data.as_ref().clone())
.map_err(ApiError::VmAddDevice)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmRemoveDevice(remove_device_data, sender) => {
let response = self
.vm_remove_device(remove_device_data.id.clone())
.map_err(ApiError::VmRemoveDevice)
.map(|_| ApiResponsePayload::Empty);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddDisk(add_disk_data, sender) => {
let response = self
.vm_add_disk(add_disk_data.as_ref().clone())
.map_err(ApiError::VmAddDisk)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddFs(add_fs_data, sender) => {
let response = self
.vm_add_fs(add_fs_data.as_ref().clone())
.map_err(ApiError::VmAddFs)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddPmem(add_pmem_data, sender) => {
let response = self
.vm_add_pmem(add_pmem_data.as_ref().clone())
.map_err(ApiError::VmAddPmem)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddNet(add_net_data, sender) => {
let response = self
.vm_add_net(add_net_data.as_ref().clone())
.map_err(ApiError::VmAddNet)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmAddVsock(add_vsock_data, sender) => {
let response = self
.vm_add_vsock(add_vsock_data.as_ref().clone())
.map_err(ApiError::VmAddVsock)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
ApiRequest::VmCounters(sender) => {
let response = self
.vm_counters()
.map_err(ApiError::VmInfo)
.map(ApiResponsePayload::VmAction);
sender.send(response).map_err(Error::ApiResponseSend)?;
}
}
}
}
}
}
}
Ok(())
}
}
const CPU_MANAGER_SNAPSHOT_ID: &str = "cpu-manager";
const MEMORY_MANAGER_SNAPSHOT_ID: &str = "memory-manager";
const DEVICE_MANAGER_SNAPSHOT_ID: &str = "device-manager";
| 38.11325 | 101 | 0.476437 |
21b4983a1af0270020f62aa12abde52f27a710e5
| 6,847 |
// Note: More specifically this lint is largely inspired (aka copied) from
// *rustc*'s
// [`missing_doc`].
//
// [`missing_doc`]: https://github.com/rust-lang/rust/blob/cf9cf7c923eb01146971429044f216a3ca905e06/compiler/rustc_lint/src/builtin.rs#L415
//
use clippy_utils::diagnostics::span_lint;
use if_chain::if_chain;
use rustc_ast::ast::{self, MetaItem, MetaItemKind};
use rustc_ast::attr;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::ty;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::source_map::Span;
use rustc_span::sym;
declare_clippy_lint! {
/// **What it does:** Warns if there is missing doc for any documentable item
/// (public or private).
///
/// **Why is this bad?** Doc is good. *rustc* has a `MISSING_DOCS`
/// allowed-by-default lint for
/// public members, but has no way to enforce documentation of private items.
/// This lint fixes that.
///
/// **Known problems:** None.
pub MISSING_DOCS_IN_PRIVATE_ITEMS,
restriction,
"detects missing documentation for public and private members"
}
pub struct MissingDoc {
/// Stack of whether #[doc(hidden)] is set
/// at each level which has lint attributes.
doc_hidden_stack: Vec<bool>,
}
impl Default for MissingDoc {
#[must_use]
fn default() -> Self {
Self::new()
}
}
impl MissingDoc {
#[must_use]
pub fn new() -> Self {
Self {
doc_hidden_stack: vec![false],
}
}
fn doc_hidden(&self) -> bool {
*self.doc_hidden_stack.last().expect("empty doc_hidden_stack")
}
fn has_include(meta: Option<MetaItem>) -> bool {
if_chain! {
if let Some(meta) = meta;
if let MetaItemKind::List(list) = meta.kind;
if let Some(meta) = list.get(0);
if let Some(name) = meta.ident();
then {
name.name == sym::include
} else {
false
}
}
}
fn check_missing_docs_attrs(
&self,
cx: &LateContext<'_>,
attrs: &[ast::Attribute],
sp: Span,
article: &'static str,
desc: &'static str,
) {
// If we're building a test harness, then warning about
// documentation is probably not really relevant right now.
if cx.sess().opts.test {
return;
}
// `#[doc(hidden)]` disables missing_docs check.
if self.doc_hidden() {
return;
}
if sp.from_expansion() {
return;
}
let has_doc = attrs
.iter()
.any(|a| a.is_doc_comment() || a.doc_str().is_some() || a.value_str().is_some() || Self::has_include(a.meta()));
if !has_doc {
span_lint(
cx,
MISSING_DOCS_IN_PRIVATE_ITEMS,
sp,
&format!("missing documentation for {} {}", article, desc),
);
}
}
}
impl_lint_pass!(MissingDoc => [MISSING_DOCS_IN_PRIVATE_ITEMS]);
impl<'tcx> LateLintPass<'tcx> for MissingDoc {
fn enter_lint_attrs(&mut self, _: &LateContext<'tcx>, attrs: &'tcx [ast::Attribute]) {
let doc_hidden = self.doc_hidden()
|| attrs.iter().any(|attr| {
attr.has_name(sym::doc)
&& match attr.meta_item_list() {
None => false,
Some(l) => attr::list_contains_name(&l[..], sym::hidden),
}
});
self.doc_hidden_stack.push(doc_hidden);
}
fn exit_lint_attrs(&mut self, _: &LateContext<'tcx>, _: &'tcx [ast::Attribute]) {
self.doc_hidden_stack.pop().expect("empty doc_hidden_stack");
}
fn check_crate(&mut self, cx: &LateContext<'tcx>, krate: &'tcx hir::Crate<'_>) {
let attrs = cx.tcx.hir().attrs(hir::CRATE_HIR_ID);
self.check_missing_docs_attrs(cx, attrs, krate.item.inner, "the", "crate");
}
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx hir::Item<'_>) {
match it.kind {
hir::ItemKind::Fn(..) => {
// ignore main()
if it.ident.name == sym::main {
let def_key = cx.tcx.hir().def_key(it.def_id);
if def_key.parent == Some(hir::def_id::CRATE_DEF_INDEX) {
return;
}
}
},
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::Trait(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::OpaqueTy(..) => {},
hir::ItemKind::ExternCrate(..)
| hir::ItemKind::ForeignMod { .. }
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::Impl { .. }
| hir::ItemKind::Use(..) => return,
};
let (article, desc) = cx.tcx.article_and_description(it.def_id.to_def_id());
let attrs = cx.tcx.hir().attrs(it.hir_id());
self.check_missing_docs_attrs(cx, attrs, it.span, article, desc);
}
fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx hir::TraitItem<'_>) {
let (article, desc) = cx.tcx.article_and_description(trait_item.def_id.to_def_id());
let attrs = cx.tcx.hir().attrs(trait_item.hir_id());
self.check_missing_docs_attrs(cx, attrs, trait_item.span, article, desc);
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx hir::ImplItem<'_>) {
// If the method is an impl for a trait, don't doc.
match cx.tcx.associated_item(impl_item.def_id).container {
ty::TraitContainer(_) => return,
ty::ImplContainer(cid) => {
if cx.tcx.impl_trait_ref(cid).is_some() {
return;
}
},
}
let (article, desc) = cx.tcx.article_and_description(impl_item.def_id.to_def_id());
let attrs = cx.tcx.hir().attrs(impl_item.hir_id());
self.check_missing_docs_attrs(cx, attrs, impl_item.span, article, desc);
}
fn check_field_def(&mut self, cx: &LateContext<'tcx>, sf: &'tcx hir::FieldDef<'_>) {
if !sf.is_positional() {
let attrs = cx.tcx.hir().attrs(sf.hir_id);
self.check_missing_docs_attrs(cx, attrs, sf.span, "a", "struct field");
}
}
fn check_variant(&mut self, cx: &LateContext<'tcx>, v: &'tcx hir::Variant<'_>) {
let attrs = cx.tcx.hir().attrs(v.id);
self.check_missing_docs_attrs(cx, attrs, v.span, "a", "variant");
}
}
| 33.729064 | 139 | 0.553819 |
f7eb3a7abe2f7adc327d785c8f53d85a2281662b
| 10,140 |
use crate::prelude::*;
use crate::{scalar, Color, Color4f, ColorSpace, Matrix, Point, Shader, TileMode};
use skia_bindings as sb;
use skia_bindings::SkShader;
impl RCHandle<SkShader> {
pub fn linear_gradient<'a>(
points: (impl Into<Point>, impl Into<Point>),
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Self> {
linear(points, colors, pos, mode, flags, local_matrix)
}
pub fn radial_gradient<'a>(
center: impl Into<Point>,
radius: scalar,
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Self> {
radial(center, radius, colors, pos, mode, flags, local_matrix)
}
#[allow(clippy::too_many_arguments)]
pub fn two_point_conical_gradient<'a>(
start: impl Into<Point>,
start_radius: scalar,
end: impl Into<Point>,
end_radius: scalar,
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Self> {
two_point_conical(
start,
start_radius,
end,
end_radius,
colors,
pos,
mode,
flags,
local_matrix,
)
}
pub fn sweep_gradient<'a>(
center: impl Into<Point>,
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
angles: impl Into<Option<(scalar, scalar)>>,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Self> {
sweep(center, colors, pos, mode, angles, flags, local_matrix)
}
}
bitflags! {
pub struct Flags: u32 {
const INTERPOLATE_COLORS_IN_PREMUL = sb::SkGradientShader_Flags_kInterpolateColorsInPremul_Flag as _;
}
}
impl Default for self::Flags {
fn default() -> Self {
Self::empty()
}
}
pub fn linear<'a>(
points: (impl Into<Point>, impl Into<Point>),
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Shader> {
let points = [points.0.into(), points.1.into()];
let colors = colors.into();
let pos = pos.into();
assert!(pos.is_none() || (pos.unwrap().len() == colors.len()));
let flags = flags.into().unwrap_or_default();
let local_matrix = local_matrix.into();
Shader::from_ptr(unsafe {
match colors {
GradientShaderColors::Colors(colors) => sb::C_SkGradientShader_MakeLinear(
points.native().as_ptr(),
colors.native().as_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
flags.bits(),
local_matrix.native_ptr_or_null(),
),
GradientShaderColors::ColorsInSpace(colors, color_space) => {
sb::C_SkGradientShader_MakeLinear2(
points.native().as_ptr(),
colors.native().as_ptr(),
color_space.into_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
flags.bits(),
local_matrix.native_ptr_or_null(),
)
}
}
})
}
pub fn radial<'a>(
center: impl Into<Point>,
radius: scalar,
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Shader> {
let colors = colors.into();
let center = center.into();
let pos = pos.into();
assert!(pos.is_none() || (pos.unwrap().len() == colors.len()));
let flags = flags.into().unwrap_or_default();
let local_matrix = local_matrix.into();
Shader::from_ptr(unsafe {
match colors {
GradientShaderColors::Colors(colors) => sb::C_SkGradientShader_MakeRadial(
center.native(),
radius,
colors.native().as_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
flags.bits(),
local_matrix.native_ptr_or_null(),
),
GradientShaderColors::ColorsInSpace(colors, color_space) => {
sb::C_SkGradientShader_MakeRadial2(
center.native(),
radius,
colors.native().as_ptr(),
color_space.into_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
flags.bits(),
local_matrix.native_ptr_or_null(),
)
}
}
})
}
#[allow(clippy::too_many_arguments)]
pub fn two_point_conical<'a>(
start: impl Into<Point>,
start_radius: scalar,
end: impl Into<Point>,
end_radius: scalar,
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Shader> {
let colors = colors.into();
let start = start.into();
let end = end.into();
let pos = pos.into();
assert!(pos.is_none() || (pos.unwrap().len() == colors.len()));
let flags = flags.into().unwrap_or_default();
let local_matrix = local_matrix.into();
Shader::from_ptr(unsafe {
match colors {
GradientShaderColors::Colors(colors) => sb::C_SkGradientShader_MakeTwoPointConical(
start.native(),
start_radius,
end.native(),
end_radius,
colors.native().as_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
flags.bits(),
local_matrix.native_ptr_or_null(),
),
GradientShaderColors::ColorsInSpace(colors, color_space) => {
sb::C_SkGradientShader_MakeTwoPointConical2(
start.native(),
start_radius,
end.native(),
end_radius,
colors.native().as_ptr(),
color_space.into_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
flags.bits(),
local_matrix.native_ptr_or_null(),
)
}
}
})
}
pub fn sweep<'a>(
center: impl Into<Point>,
colors: impl Into<GradientShaderColors<'a>>,
pos: impl Into<Option<&'a [scalar]>>,
mode: TileMode,
angles: impl Into<Option<(scalar, scalar)>>,
flags: impl Into<Option<self::Flags>>,
local_matrix: impl Into<Option<&'a Matrix>>,
) -> Option<Shader> {
let center = center.into();
let colors = colors.into();
let pos = pos.into();
assert!(pos.is_none() || (pos.unwrap().len() == colors.len()));
let angles = angles.into();
let flags = flags.into().unwrap_or_default();
let local_matrix = local_matrix.into();
let (start_angle, end_angle) = (
angles.map(|a| a.0).unwrap_or(0.0),
angles.map(|a| a.1).unwrap_or(360.0),
);
Shader::from_ptr(unsafe {
match colors {
GradientShaderColors::Colors(colors) => sb::C_SkGradientShader_MakeSweep(
center.x,
center.y,
colors.native().as_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
start_angle,
end_angle,
flags.bits(),
local_matrix.native_ptr_or_null(),
),
GradientShaderColors::ColorsInSpace(colors, color_space) => {
sb::C_SkGradientShader_MakeSweep2(
center.x,
center.y,
colors.native().as_ptr(),
color_space.into_ptr(),
pos.as_ptr_or_null(),
colors.len().try_into().unwrap(),
mode,
start_angle,
end_angle,
flags.bits(),
local_matrix.native_ptr_or_null(),
)
}
}
})
}
/// Type that represents either a slice of Color, or a slice of Color4f and a color space.
/// Whenever this type is expected, it's either possible to directly pass a &[Color] , or
/// a tuple of type (&[Color4f], &ColorSpace).
pub enum GradientShaderColors<'a> {
Colors(&'a [Color]),
ColorsInSpace(&'a [Color4f], ColorSpace),
}
impl<'a> GradientShaderColors<'a> {
pub fn len(&self) -> usize {
match self {
GradientShaderColors::Colors(colors) => colors.len(),
GradientShaderColors::ColorsInSpace(colors, _) => colors.len(),
}
}
// to keep clippy happy.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<'a> From<&'a [Color]> for GradientShaderColors<'a> {
fn from(colors: &'a [Color]) -> Self {
GradientShaderColors::<'a>::Colors(colors)
}
}
impl<'a> From<(&'a [Color4f], ColorSpace)> for GradientShaderColors<'a> {
fn from(c: (&'a [Color4f], ColorSpace)) -> Self {
GradientShaderColors::<'a>::ColorsInSpace(c.0, c.1)
}
}
| 32.5 | 109 | 0.534517 |
03051dc0034204a7dde4cc65c3882432c3070ef2
| 1,125 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::TyCtxt;
use util::nodemap::DefIdMap;
pub struct DefPathHashes<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
data: DefIdMap<u64>,
}
impl<'a, 'tcx> DefPathHashes<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
DefPathHashes {
tcx: tcx,
data: DefIdMap()
}
}
pub fn hash(&mut self, def_id: DefId) -> u64 {
let tcx = self.tcx;
*self.data.entry(def_id)
.or_insert_with(|| {
let def_path = tcx.def_path(def_id);
def_path.deterministic_hash(tcx)
})
}
}
| 30.405405 | 69 | 0.601778 |
fe4db3760a743e585a3847bafb85f9dc0616c975
| 67,007 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>Associate a virtual private cloud (VPC) subnet endpoint with your custom routing accelerator.</p>
/// <p>The listener port range must be large enough to support the number of IP addresses that can be
/// specified in your subnet. The number of ports required is: subnet size times the number
/// of ports per destination EC2 instances. For example, a subnet defined as /24 requires a listener
/// port range of at least 255 ports. </p>
/// <p>Note: You must have enough remaining listener ports available to
/// map to the subnet ports, or the call will fail with a LimitExceededException.</p>
/// <p>By default, all destinations in a subnet in a custom routing accelerator cannot receive traffic. To enable all
/// destinations to receive traffic, or to specify individual port mappings that can receive
/// traffic, see the <a href="https://docs.aws.amazon.com/global-accelerator/latest/api/API_AllowCustomRoutingTraffic.html">
/// AllowCustomRoutingTraffic</a> operation.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct AddCustomRoutingEndpoints {
_private: (),
}
impl AddCustomRoutingEndpoints {
/// Creates a new builder-style object to manufacture [`AddCustomRoutingEndpointsInput`](crate::input::AddCustomRoutingEndpointsInput)
pub fn builder() -> crate::input::add_custom_routing_endpoints_input::Builder {
crate::input::add_custom_routing_endpoints_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for AddCustomRoutingEndpoints {
type Output = std::result::Result<
crate::output::AddCustomRoutingEndpointsOutput,
crate::error::AddCustomRoutingEndpointsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_add_custom_routing_endpoints_error(response)
} else {
crate::operation_deser::parse_add_custom_routing_endpoints_response(response)
}
}
}
/// <p>Advertises an IPv4 address range that is provisioned for use with your AWS resources
/// through bring your own IP addresses (BYOIP). It can take a few minutes before traffic to
/// the specified addresses starts routing to AWS because of propagation delays. </p>
/// <p>To stop advertising the BYOIP address range, use <a href="https://docs.aws.amazon.com/global-accelerator/latest/api/WithdrawByoipCidr.html">
/// WithdrawByoipCidr</a>.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html">Bring Your Own
/// IP Addresses (BYOIP)</a> in the <i>AWS Global Accelerator Developer Guide</i>.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct AdvertiseByoipCidr {
_private: (),
}
impl AdvertiseByoipCidr {
/// Creates a new builder-style object to manufacture [`AdvertiseByoipCidrInput`](crate::input::AdvertiseByoipCidrInput)
pub fn builder() -> crate::input::advertise_byoip_cidr_input::Builder {
crate::input::advertise_byoip_cidr_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for AdvertiseByoipCidr {
type Output = std::result::Result<
crate::output::AdvertiseByoipCidrOutput,
crate::error::AdvertiseByoipCidrError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_advertise_byoip_cidr_error(response)
} else {
crate::operation_deser::parse_advertise_byoip_cidr_response(response)
}
}
}
/// <p>Specify the Amazon EC2 instance (destination) IP addresses and ports for a VPC subnet endpoint that can receive traffic
/// for a custom routing accelerator. You can allow traffic to all destinations in the subnet endpoint, or allow traffic to a
/// specified list of destination IP addresses and ports in the subnet. Note that you cannot specify IP addresses or ports
/// outside of the range that you configured for the endpoint group.</p>
/// <p>After you make changes, you can verify that the updates are complete by checking the status of your
/// accelerator: the status changes from IN_PROGRESS to DEPLOYED.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct AllowCustomRoutingTraffic {
_private: (),
}
impl AllowCustomRoutingTraffic {
/// Creates a new builder-style object to manufacture [`AllowCustomRoutingTrafficInput`](crate::input::AllowCustomRoutingTrafficInput)
pub fn builder() -> crate::input::allow_custom_routing_traffic_input::Builder {
crate::input::allow_custom_routing_traffic_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for AllowCustomRoutingTraffic {
type Output = std::result::Result<
crate::output::AllowCustomRoutingTrafficOutput,
crate::error::AllowCustomRoutingTrafficError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_allow_custom_routing_traffic_error(response)
} else {
crate::operation_deser::parse_allow_custom_routing_traffic_response(response)
}
}
}
/// <p>Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic
/// to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. </p>
/// <important>
/// <p>Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the
/// US West (Oregon) Region to create or update accelerators.</p>
/// </important>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateAccelerator {
_private: (),
}
impl CreateAccelerator {
/// Creates a new builder-style object to manufacture [`CreateAcceleratorInput`](crate::input::CreateAcceleratorInput)
pub fn builder() -> crate::input::create_accelerator_input::Builder {
crate::input::create_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for CreateAccelerator {
type Output = std::result::Result<
crate::output::CreateAcceleratorOutput,
crate::error::CreateAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_accelerator_error(response)
} else {
crate::operation_deser::parse_create_accelerator_response(response)
}
}
}
/// <p>Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands
/// of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints.</p>
/// <p>Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive
/// traffic. To enable all destinations to receive traffic, or to specify individual port
/// mappings that can receive traffic, see the <a href="https://docs.aws.amazon.com/global-accelerator/latest/api/API_AllowCustomRoutingTraffic.html">
/// AllowCustomRoutingTraffic</a> operation.</p>
/// <important>
/// <p>Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the
/// US West (Oregon) Region to create or update accelerators.</p>
/// </important>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateCustomRoutingAccelerator {
_private: (),
}
impl CreateCustomRoutingAccelerator {
/// Creates a new builder-style object to manufacture [`CreateCustomRoutingAcceleratorInput`](crate::input::CreateCustomRoutingAcceleratorInput)
pub fn builder() -> crate::input::create_custom_routing_accelerator_input::Builder {
crate::input::create_custom_routing_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for CreateCustomRoutingAccelerator {
type Output = std::result::Result<
crate::output::CreateCustomRoutingAcceleratorOutput,
crate::error::CreateCustomRoutingAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_custom_routing_accelerator_error(response)
} else {
crate::operation_deser::parse_create_custom_routing_accelerator_response(response)
}
}
}
/// <p>Create an endpoint group for the specified listener for a custom routing accelerator.
/// An endpoint group is a collection of endpoints in one AWS
/// Region. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateCustomRoutingEndpointGroup {
_private: (),
}
impl CreateCustomRoutingEndpointGroup {
/// Creates a new builder-style object to manufacture [`CreateCustomRoutingEndpointGroupInput`](crate::input::CreateCustomRoutingEndpointGroupInput)
pub fn builder() -> crate::input::create_custom_routing_endpoint_group_input::Builder {
crate::input::create_custom_routing_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for CreateCustomRoutingEndpointGroup {
type Output = std::result::Result<
crate::output::CreateCustomRoutingEndpointGroupOutput,
crate::error::CreateCustomRoutingEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_custom_routing_endpoint_group_error(response)
} else {
crate::operation_deser::parse_create_custom_routing_endpoint_group_response(response)
}
}
}
/// <p>Create a listener to process inbound connections from clients to a custom routing accelerator.
/// Connections arrive to assigned static IP addresses on the port range that you specify. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateCustomRoutingListener {
_private: (),
}
impl CreateCustomRoutingListener {
/// Creates a new builder-style object to manufacture [`CreateCustomRoutingListenerInput`](crate::input::CreateCustomRoutingListenerInput)
pub fn builder() -> crate::input::create_custom_routing_listener_input::Builder {
crate::input::create_custom_routing_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for CreateCustomRoutingListener {
type Output = std::result::Result<
crate::output::CreateCustomRoutingListenerOutput,
crate::error::CreateCustomRoutingListenerError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_custom_routing_listener_error(response)
} else {
crate::operation_deser::parse_create_custom_routing_listener_response(response)
}
}
}
/// <p>Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one AWS
/// Region. A resource must be valid and active when you add it as an endpoint.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateEndpointGroup {
_private: (),
}
impl CreateEndpointGroup {
/// Creates a new builder-style object to manufacture [`CreateEndpointGroupInput`](crate::input::CreateEndpointGroupInput)
pub fn builder() -> crate::input::create_endpoint_group_input::Builder {
crate::input::create_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for CreateEndpointGroup {
type Output = std::result::Result<
crate::output::CreateEndpointGroupOutput,
crate::error::CreateEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_endpoint_group_error(response)
} else {
crate::operation_deser::parse_create_endpoint_group_response(response)
}
}
}
/// <p>Create a listener to process inbound connections from clients to an accelerator. Connections arrive to assigned static
/// IP addresses on a port, port range, or list of port ranges that you specify. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct CreateListener {
_private: (),
}
impl CreateListener {
/// Creates a new builder-style object to manufacture [`CreateListenerInput`](crate::input::CreateListenerInput)
pub fn builder() -> crate::input::create_listener_input::Builder {
crate::input::create_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for CreateListener {
type Output =
std::result::Result<crate::output::CreateListenerOutput, crate::error::CreateListenerError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_create_listener_error(response)
} else {
crate::operation_deser::parse_create_listener_response(response)
}
}
}
/// <p>Delete an accelerator. Before you can delete an accelerator, you must disable it and remove all dependent resources
/// (listeners and endpoint groups). To disable the accelerator, update the accelerator to set <code>Enabled</code> to false.</p>
/// <important>
/// <p>When you create an accelerator, by default, Global Accelerator provides you with a set of two static IP addresses.
/// Alternatively, you can bring your own IP address ranges to Global Accelerator and assign IP addresses from those ranges.
/// </p>
/// <p>The IP addresses are assigned to your accelerator for as long as it exists, even if you disable the accelerator and
/// it no longer accepts or routes traffic. However, when you <i>delete</i> an accelerator, you lose the
/// static IP addresses that are assigned to the accelerator, so you can no longer route traffic by using them.
/// As a best practice, ensure that you have permissions in place to avoid inadvertently deleting accelerators. You
/// can use IAM policies with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information,
/// see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/auth-and-access-control.html">Authentication and Access Control</a> in
/// the <i>AWS Global Accelerator Developer Guide</i>.</p>
/// </important>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteAccelerator {
_private: (),
}
impl DeleteAccelerator {
/// Creates a new builder-style object to manufacture [`DeleteAcceleratorInput`](crate::input::DeleteAcceleratorInput)
pub fn builder() -> crate::input::delete_accelerator_input::Builder {
crate::input::delete_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeleteAccelerator {
type Output = std::result::Result<
crate::output::DeleteAcceleratorOutput,
crate::error::DeleteAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_accelerator_error(response)
} else {
crate::operation_deser::parse_delete_accelerator_response(response)
}
}
}
/// <p>Delete a custom routing accelerator. Before you can delete an accelerator, you must disable it and remove all dependent resources
/// (listeners and endpoint groups). To disable the accelerator, update the accelerator to set <code>Enabled</code> to false.</p>
/// <important>
/// <p>When you create a custom routing accelerator, by default, Global Accelerator provides you with a set of two static IP addresses.
/// </p>
/// <p>The IP
/// addresses are assigned to your accelerator for as long as it exists, even if you disable the accelerator and
/// it no longer accepts or routes traffic. However, when you <i>delete</i> an accelerator, you lose the
/// static IP addresses that are assigned to the accelerator, so you can no longer route traffic by using them.
/// As a best practice, ensure that you have permissions in place to avoid inadvertently deleting accelerators. You
/// can use IAM policies with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information,
/// see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/auth-and-access-control.html">Authentication and Access Control</a> in
/// the <i>AWS Global Accelerator Developer Guide</i>.</p>
/// </important>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteCustomRoutingAccelerator {
_private: (),
}
impl DeleteCustomRoutingAccelerator {
/// Creates a new builder-style object to manufacture [`DeleteCustomRoutingAcceleratorInput`](crate::input::DeleteCustomRoutingAcceleratorInput)
pub fn builder() -> crate::input::delete_custom_routing_accelerator_input::Builder {
crate::input::delete_custom_routing_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeleteCustomRoutingAccelerator {
type Output = std::result::Result<
crate::output::DeleteCustomRoutingAcceleratorOutput,
crate::error::DeleteCustomRoutingAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_custom_routing_accelerator_error(response)
} else {
crate::operation_deser::parse_delete_custom_routing_accelerator_response(response)
}
}
}
/// <p>Delete an endpoint group from a listener for a custom routing accelerator.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteCustomRoutingEndpointGroup {
_private: (),
}
impl DeleteCustomRoutingEndpointGroup {
/// Creates a new builder-style object to manufacture [`DeleteCustomRoutingEndpointGroupInput`](crate::input::DeleteCustomRoutingEndpointGroupInput)
pub fn builder() -> crate::input::delete_custom_routing_endpoint_group_input::Builder {
crate::input::delete_custom_routing_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeleteCustomRoutingEndpointGroup {
type Output = std::result::Result<
crate::output::DeleteCustomRoutingEndpointGroupOutput,
crate::error::DeleteCustomRoutingEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_custom_routing_endpoint_group_error(response)
} else {
crate::operation_deser::parse_delete_custom_routing_endpoint_group_response(response)
}
}
}
/// <p>Delete a listener for a custom routing accelerator.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteCustomRoutingListener {
_private: (),
}
impl DeleteCustomRoutingListener {
/// Creates a new builder-style object to manufacture [`DeleteCustomRoutingListenerInput`](crate::input::DeleteCustomRoutingListenerInput)
pub fn builder() -> crate::input::delete_custom_routing_listener_input::Builder {
crate::input::delete_custom_routing_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeleteCustomRoutingListener {
type Output = std::result::Result<
crate::output::DeleteCustomRoutingListenerOutput,
crate::error::DeleteCustomRoutingListenerError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_custom_routing_listener_error(response)
} else {
crate::operation_deser::parse_delete_custom_routing_listener_response(response)
}
}
}
/// <p>Delete an endpoint group from a listener.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteEndpointGroup {
_private: (),
}
impl DeleteEndpointGroup {
/// Creates a new builder-style object to manufacture [`DeleteEndpointGroupInput`](crate::input::DeleteEndpointGroupInput)
pub fn builder() -> crate::input::delete_endpoint_group_input::Builder {
crate::input::delete_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeleteEndpointGroup {
type Output = std::result::Result<
crate::output::DeleteEndpointGroupOutput,
crate::error::DeleteEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_endpoint_group_error(response)
} else {
crate::operation_deser::parse_delete_endpoint_group_response(response)
}
}
}
/// <p>Delete a listener from an accelerator.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeleteListener {
_private: (),
}
impl DeleteListener {
/// Creates a new builder-style object to manufacture [`DeleteListenerInput`](crate::input::DeleteListenerInput)
pub fn builder() -> crate::input::delete_listener_input::Builder {
crate::input::delete_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeleteListener {
type Output =
std::result::Result<crate::output::DeleteListenerOutput, crate::error::DeleteListenerError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_delete_listener_error(response)
} else {
crate::operation_deser::parse_delete_listener_response(response)
}
}
}
/// <p>Specify the Amazon EC2 instance (destination) IP addresses and ports for a VPC subnet endpoint that cannot receive traffic
/// for a custom routing accelerator. You can deny traffic to all destinations in the VPC endpoint, or deny traffic to a
/// specified list of destination IP addresses and ports. Note that you cannot specify IP addresses
/// or ports outside of the range that you configured for the endpoint group.</p>
/// <p>After you make changes, you can verify that the updates are complete by checking the status of your
/// accelerator: the status changes from IN_PROGRESS to DEPLOYED.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DenyCustomRoutingTraffic {
_private: (),
}
impl DenyCustomRoutingTraffic {
/// Creates a new builder-style object to manufacture [`DenyCustomRoutingTrafficInput`](crate::input::DenyCustomRoutingTrafficInput)
pub fn builder() -> crate::input::deny_custom_routing_traffic_input::Builder {
crate::input::deny_custom_routing_traffic_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DenyCustomRoutingTraffic {
type Output = std::result::Result<
crate::output::DenyCustomRoutingTrafficOutput,
crate::error::DenyCustomRoutingTrafficError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_deny_custom_routing_traffic_error(response)
} else {
crate::operation_deser::parse_deny_custom_routing_traffic_response(response)
}
}
}
/// <p>Releases the specified address range that you provisioned to use with your AWS resources
/// through bring your own IP addresses (BYOIP) and deletes the corresponding address pool. </p>
/// <p>Before you can release an address range, you must stop advertising it by using <a href="https://docs.aws.amazon.com/global-accelerator/latest/api/WithdrawByoipCidr.html">WithdrawByoipCidr</a> and you must not have
/// any accelerators that are using static IP addresses allocated from its address range.
/// </p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html">Bring Your Own
/// IP Addresses (BYOIP)</a> in the <i>AWS Global Accelerator Developer Guide</i>.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DeprovisionByoipCidr {
_private: (),
}
impl DeprovisionByoipCidr {
/// Creates a new builder-style object to manufacture [`DeprovisionByoipCidrInput`](crate::input::DeprovisionByoipCidrInput)
pub fn builder() -> crate::input::deprovision_byoip_cidr_input::Builder {
crate::input::deprovision_byoip_cidr_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DeprovisionByoipCidr {
type Output = std::result::Result<
crate::output::DeprovisionByoipCidrOutput,
crate::error::DeprovisionByoipCidrError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_deprovision_byoip_cidr_error(response)
} else {
crate::operation_deser::parse_deprovision_byoip_cidr_response(response)
}
}
}
/// <p>Describe an accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeAccelerator {
_private: (),
}
impl DescribeAccelerator {
/// Creates a new builder-style object to manufacture [`DescribeAcceleratorInput`](crate::input::DescribeAcceleratorInput)
pub fn builder() -> crate::input::describe_accelerator_input::Builder {
crate::input::describe_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeAccelerator {
type Output = std::result::Result<
crate::output::DescribeAcceleratorOutput,
crate::error::DescribeAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_accelerator_error(response)
} else {
crate::operation_deser::parse_describe_accelerator_response(response)
}
}
}
/// <p>Describe the attributes of an accelerator.
/// </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeAcceleratorAttributes {
_private: (),
}
impl DescribeAcceleratorAttributes {
/// Creates a new builder-style object to manufacture [`DescribeAcceleratorAttributesInput`](crate::input::DescribeAcceleratorAttributesInput)
pub fn builder() -> crate::input::describe_accelerator_attributes_input::Builder {
crate::input::describe_accelerator_attributes_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeAcceleratorAttributes {
type Output = std::result::Result<
crate::output::DescribeAcceleratorAttributesOutput,
crate::error::DescribeAcceleratorAttributesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_accelerator_attributes_error(response)
} else {
crate::operation_deser::parse_describe_accelerator_attributes_response(response)
}
}
}
/// <p>Describe a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeCustomRoutingAccelerator {
_private: (),
}
impl DescribeCustomRoutingAccelerator {
/// Creates a new builder-style object to manufacture [`DescribeCustomRoutingAcceleratorInput`](crate::input::DescribeCustomRoutingAcceleratorInput)
pub fn builder() -> crate::input::describe_custom_routing_accelerator_input::Builder {
crate::input::describe_custom_routing_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeCustomRoutingAccelerator {
type Output = std::result::Result<
crate::output::DescribeCustomRoutingAcceleratorOutput,
crate::error::DescribeCustomRoutingAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_custom_routing_accelerator_error(response)
} else {
crate::operation_deser::parse_describe_custom_routing_accelerator_response(response)
}
}
}
/// <p>Describe the attributes of a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeCustomRoutingAcceleratorAttributes {
_private: (),
}
impl DescribeCustomRoutingAcceleratorAttributes {
/// Creates a new builder-style object to manufacture [`DescribeCustomRoutingAcceleratorAttributesInput`](crate::input::DescribeCustomRoutingAcceleratorAttributesInput)
pub fn builder() -> crate::input::describe_custom_routing_accelerator_attributes_input::Builder
{
crate::input::describe_custom_routing_accelerator_attributes_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeCustomRoutingAcceleratorAttributes {
type Output = std::result::Result<
crate::output::DescribeCustomRoutingAcceleratorAttributesOutput,
crate::error::DescribeCustomRoutingAcceleratorAttributesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_custom_routing_accelerator_attributes_error(
response,
)
} else {
crate::operation_deser::parse_describe_custom_routing_accelerator_attributes_response(
response,
)
}
}
}
/// <p>Describe an endpoint group for a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeCustomRoutingEndpointGroup {
_private: (),
}
impl DescribeCustomRoutingEndpointGroup {
/// Creates a new builder-style object to manufacture [`DescribeCustomRoutingEndpointGroupInput`](crate::input::DescribeCustomRoutingEndpointGroupInput)
pub fn builder() -> crate::input::describe_custom_routing_endpoint_group_input::Builder {
crate::input::describe_custom_routing_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeCustomRoutingEndpointGroup {
type Output = std::result::Result<
crate::output::DescribeCustomRoutingEndpointGroupOutput,
crate::error::DescribeCustomRoutingEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_custom_routing_endpoint_group_error(response)
} else {
crate::operation_deser::parse_describe_custom_routing_endpoint_group_response(response)
}
}
}
/// <p>The description of a listener for a custom routing accelerator.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeCustomRoutingListener {
_private: (),
}
impl DescribeCustomRoutingListener {
/// Creates a new builder-style object to manufacture [`DescribeCustomRoutingListenerInput`](crate::input::DescribeCustomRoutingListenerInput)
pub fn builder() -> crate::input::describe_custom_routing_listener_input::Builder {
crate::input::describe_custom_routing_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeCustomRoutingListener {
type Output = std::result::Result<
crate::output::DescribeCustomRoutingListenerOutput,
crate::error::DescribeCustomRoutingListenerError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_custom_routing_listener_error(response)
} else {
crate::operation_deser::parse_describe_custom_routing_listener_response(response)
}
}
}
/// <p>Describe an endpoint group. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeEndpointGroup {
_private: (),
}
impl DescribeEndpointGroup {
/// Creates a new builder-style object to manufacture [`DescribeEndpointGroupInput`](crate::input::DescribeEndpointGroupInput)
pub fn builder() -> crate::input::describe_endpoint_group_input::Builder {
crate::input::describe_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeEndpointGroup {
type Output = std::result::Result<
crate::output::DescribeEndpointGroupOutput,
crate::error::DescribeEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_endpoint_group_error(response)
} else {
crate::operation_deser::parse_describe_endpoint_group_response(response)
}
}
}
/// <p>Describe a listener. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct DescribeListener {
_private: (),
}
impl DescribeListener {
/// Creates a new builder-style object to manufacture [`DescribeListenerInput`](crate::input::DescribeListenerInput)
pub fn builder() -> crate::input::describe_listener_input::Builder {
crate::input::describe_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for DescribeListener {
type Output = std::result::Result<
crate::output::DescribeListenerOutput,
crate::error::DescribeListenerError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_describe_listener_error(response)
} else {
crate::operation_deser::parse_describe_listener_response(response)
}
}
}
/// <p>List the accelerators for an AWS account. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListAccelerators {
_private: (),
}
impl ListAccelerators {
/// Creates a new builder-style object to manufacture [`ListAcceleratorsInput`](crate::input::ListAcceleratorsInput)
pub fn builder() -> crate::input::list_accelerators_input::Builder {
crate::input::list_accelerators_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListAccelerators {
type Output = std::result::Result<
crate::output::ListAcceleratorsOutput,
crate::error::ListAcceleratorsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_accelerators_error(response)
} else {
crate::operation_deser::parse_list_accelerators_response(response)
}
}
}
/// <p>Lists the IP address ranges that were specified in calls to <a href="https://docs.aws.amazon.com/global-accelerator/latest/api/ProvisionByoipCidr.html">ProvisionByoipCidr</a>, including
/// the current state and a history of state changes.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListByoipCidrs {
_private: (),
}
impl ListByoipCidrs {
/// Creates a new builder-style object to manufacture [`ListByoipCidrsInput`](crate::input::ListByoipCidrsInput)
pub fn builder() -> crate::input::list_byoip_cidrs_input::Builder {
crate::input::list_byoip_cidrs_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListByoipCidrs {
type Output =
std::result::Result<crate::output::ListByoipCidrsOutput, crate::error::ListByoipCidrsError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_byoip_cidrs_error(response)
} else {
crate::operation_deser::parse_list_byoip_cidrs_response(response)
}
}
}
/// <p>List the custom routing accelerators for an AWS account. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListCustomRoutingAccelerators {
_private: (),
}
impl ListCustomRoutingAccelerators {
/// Creates a new builder-style object to manufacture [`ListCustomRoutingAcceleratorsInput`](crate::input::ListCustomRoutingAcceleratorsInput)
pub fn builder() -> crate::input::list_custom_routing_accelerators_input::Builder {
crate::input::list_custom_routing_accelerators_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListCustomRoutingAccelerators {
type Output = std::result::Result<
crate::output::ListCustomRoutingAcceleratorsOutput,
crate::error::ListCustomRoutingAcceleratorsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_custom_routing_accelerators_error(response)
} else {
crate::operation_deser::parse_list_custom_routing_accelerators_response(response)
}
}
}
/// <p>List the endpoint groups that are associated with a listener for a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListCustomRoutingEndpointGroups {
_private: (),
}
impl ListCustomRoutingEndpointGroups {
/// Creates a new builder-style object to manufacture [`ListCustomRoutingEndpointGroupsInput`](crate::input::ListCustomRoutingEndpointGroupsInput)
pub fn builder() -> crate::input::list_custom_routing_endpoint_groups_input::Builder {
crate::input::list_custom_routing_endpoint_groups_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListCustomRoutingEndpointGroups {
type Output = std::result::Result<
crate::output::ListCustomRoutingEndpointGroupsOutput,
crate::error::ListCustomRoutingEndpointGroupsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_custom_routing_endpoint_groups_error(response)
} else {
crate::operation_deser::parse_list_custom_routing_endpoint_groups_response(response)
}
}
}
/// <p>List the listeners for a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListCustomRoutingListeners {
_private: (),
}
impl ListCustomRoutingListeners {
/// Creates a new builder-style object to manufacture [`ListCustomRoutingListenersInput`](crate::input::ListCustomRoutingListenersInput)
pub fn builder() -> crate::input::list_custom_routing_listeners_input::Builder {
crate::input::list_custom_routing_listeners_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListCustomRoutingListeners {
type Output = std::result::Result<
crate::output::ListCustomRoutingListenersOutput,
crate::error::ListCustomRoutingListenersError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_custom_routing_listeners_error(response)
} else {
crate::operation_deser::parse_list_custom_routing_listeners_response(response)
}
}
}
/// <p>Provides a complete mapping from the public accelerator IP address and port to destination EC2 instance
/// IP addresses and ports in the virtual public cloud (VPC) subnet endpoint for a custom routing accelerator.
/// For each subnet endpoint that you add, Global Accelerator creates a new static port mapping for the accelerator. The port
/// mappings don't change after Global Accelerator generates them, so you can retrieve and cache the full mapping on your servers. </p>
/// <p>If you remove a subnet from your accelerator, Global Accelerator removes (reclaims) the port mappings. If you add a subnet to
/// your accelerator, Global Accelerator creates new port mappings (the existing ones don't change). If you add or remove EC2 instances
/// in your subnet, the port mappings don't change, because the mappings are created when you add the subnet to Global Accelerator.</p>
/// <p>The mappings also include a flag for each destination denoting which destination IP addresses and
/// ports are allowed or denied traffic.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListCustomRoutingPortMappings {
_private: (),
}
impl ListCustomRoutingPortMappings {
/// Creates a new builder-style object to manufacture [`ListCustomRoutingPortMappingsInput`](crate::input::ListCustomRoutingPortMappingsInput)
pub fn builder() -> crate::input::list_custom_routing_port_mappings_input::Builder {
crate::input::list_custom_routing_port_mappings_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListCustomRoutingPortMappings {
type Output = std::result::Result<
crate::output::ListCustomRoutingPortMappingsOutput,
crate::error::ListCustomRoutingPortMappingsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_custom_routing_port_mappings_error(response)
} else {
crate::operation_deser::parse_list_custom_routing_port_mappings_response(response)
}
}
}
/// <p>List the port mappings for a specific EC2 instance (destination) in a VPC subnet endpoint. The
/// response is the mappings for one destination IP address. This is useful when your subnet endpoint has mappings that
/// span multiple custom routing accelerators in your account, or for scenarios where you only want to
/// list the port mappings for a specific destination instance.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListCustomRoutingPortMappingsByDestination {
_private: (),
}
impl ListCustomRoutingPortMappingsByDestination {
/// Creates a new builder-style object to manufacture [`ListCustomRoutingPortMappingsByDestinationInput`](crate::input::ListCustomRoutingPortMappingsByDestinationInput)
pub fn builder() -> crate::input::list_custom_routing_port_mappings_by_destination_input::Builder
{
crate::input::list_custom_routing_port_mappings_by_destination_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListCustomRoutingPortMappingsByDestination {
type Output = std::result::Result<
crate::output::ListCustomRoutingPortMappingsByDestinationOutput,
crate::error::ListCustomRoutingPortMappingsByDestinationError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_custom_routing_port_mappings_by_destination_error(
response,
)
} else {
crate::operation_deser::parse_list_custom_routing_port_mappings_by_destination_response(
response,
)
}
}
}
/// <p>List the endpoint groups that are associated with a listener. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListEndpointGroups {
_private: (),
}
impl ListEndpointGroups {
/// Creates a new builder-style object to manufacture [`ListEndpointGroupsInput`](crate::input::ListEndpointGroupsInput)
pub fn builder() -> crate::input::list_endpoint_groups_input::Builder {
crate::input::list_endpoint_groups_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListEndpointGroups {
type Output = std::result::Result<
crate::output::ListEndpointGroupsOutput,
crate::error::ListEndpointGroupsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_endpoint_groups_error(response)
} else {
crate::operation_deser::parse_list_endpoint_groups_response(response)
}
}
}
/// <p>List the listeners for an accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListListeners {
_private: (),
}
impl ListListeners {
/// Creates a new builder-style object to manufacture [`ListListenersInput`](crate::input::ListListenersInput)
pub fn builder() -> crate::input::list_listeners_input::Builder {
crate::input::list_listeners_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListListeners {
type Output =
std::result::Result<crate::output::ListListenersOutput, crate::error::ListListenersError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_listeners_error(response)
} else {
crate::operation_deser::parse_list_listeners_response(response)
}
}
}
/// <p>List all tags for an accelerator. </p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html">Tagging
/// in AWS Global Accelerator</a> in the <i>AWS Global Accelerator Developer Guide</i>. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ListTagsForResource {
_private: (),
}
impl ListTagsForResource {
/// Creates a new builder-style object to manufacture [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput)
pub fn builder() -> crate::input::list_tags_for_resource_input::Builder {
crate::input::list_tags_for_resource_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ListTagsForResource {
type Output = std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_list_tags_for_resource_error(response)
} else {
crate::operation_deser::parse_list_tags_for_resource_response(response)
}
}
}
/// <p>Provisions an IP address range to use with your AWS resources through bring your own IP
/// addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned,
/// it is ready to be advertised using <a href="https://docs.aws.amazon.com/global-accelerator/latest/api/AdvertiseByoipCidr.html">
/// AdvertiseByoipCidr</a>.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html">Bring Your Own
/// IP Addresses (BYOIP)</a> in the <i>AWS Global Accelerator Developer Guide</i>.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct ProvisionByoipCidr {
_private: (),
}
impl ProvisionByoipCidr {
/// Creates a new builder-style object to manufacture [`ProvisionByoipCidrInput`](crate::input::ProvisionByoipCidrInput)
pub fn builder() -> crate::input::provision_byoip_cidr_input::Builder {
crate::input::provision_byoip_cidr_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for ProvisionByoipCidr {
type Output = std::result::Result<
crate::output::ProvisionByoipCidrOutput,
crate::error::ProvisionByoipCidrError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_provision_byoip_cidr_error(response)
} else {
crate::operation_deser::parse_provision_byoip_cidr_response(response)
}
}
}
/// <p>Remove endpoints from a custom routing accelerator.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct RemoveCustomRoutingEndpoints {
_private: (),
}
impl RemoveCustomRoutingEndpoints {
/// Creates a new builder-style object to manufacture [`RemoveCustomRoutingEndpointsInput`](crate::input::RemoveCustomRoutingEndpointsInput)
pub fn builder() -> crate::input::remove_custom_routing_endpoints_input::Builder {
crate::input::remove_custom_routing_endpoints_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for RemoveCustomRoutingEndpoints {
type Output = std::result::Result<
crate::output::RemoveCustomRoutingEndpointsOutput,
crate::error::RemoveCustomRoutingEndpointsError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_remove_custom_routing_endpoints_error(response)
} else {
crate::operation_deser::parse_remove_custom_routing_endpoints_response(response)
}
}
}
/// <p>Add tags to an accelerator resource. </p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html">Tagging
/// in AWS Global Accelerator</a> in the <i>AWS Global Accelerator Developer Guide</i>. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct TagResource {
_private: (),
}
impl TagResource {
/// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput)
pub fn builder() -> crate::input::tag_resource_input::Builder {
crate::input::tag_resource_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for TagResource {
type Output =
std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_tag_resource_error(response)
} else {
crate::operation_deser::parse_tag_resource_response(response)
}
}
}
/// <p>Remove tags from a Global Accelerator resource. When you specify a tag key, the action removes both that key and its associated value.
/// The operation succeeds even if you attempt to remove tags from an accelerator that was already removed.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html">Tagging
/// in AWS Global Accelerator</a> in the <i>AWS Global Accelerator Developer Guide</i>.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UntagResource {
_private: (),
}
impl UntagResource {
/// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput)
pub fn builder() -> crate::input::untag_resource_input::Builder {
crate::input::untag_resource_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UntagResource {
type Output =
std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_untag_resource_error(response)
} else {
crate::operation_deser::parse_untag_resource_response(response)
}
}
}
/// <p>Update an accelerator. </p>
/// <important>
/// <p>Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the
/// US West (Oregon) Region to create or update accelerators.</p>
/// </important>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateAccelerator {
_private: (),
}
impl UpdateAccelerator {
/// Creates a new builder-style object to manufacture [`UpdateAcceleratorInput`](crate::input::UpdateAcceleratorInput)
pub fn builder() -> crate::input::update_accelerator_input::Builder {
crate::input::update_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateAccelerator {
type Output = std::result::Result<
crate::output::UpdateAcceleratorOutput,
crate::error::UpdateAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_accelerator_error(response)
} else {
crate::operation_deser::parse_update_accelerator_response(response)
}
}
}
/// <p>Update the attributes for an accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateAcceleratorAttributes {
_private: (),
}
impl UpdateAcceleratorAttributes {
/// Creates a new builder-style object to manufacture [`UpdateAcceleratorAttributesInput`](crate::input::UpdateAcceleratorAttributesInput)
pub fn builder() -> crate::input::update_accelerator_attributes_input::Builder {
crate::input::update_accelerator_attributes_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateAcceleratorAttributes {
type Output = std::result::Result<
crate::output::UpdateAcceleratorAttributesOutput,
crate::error::UpdateAcceleratorAttributesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_accelerator_attributes_error(response)
} else {
crate::operation_deser::parse_update_accelerator_attributes_response(response)
}
}
}
/// <p>Update a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateCustomRoutingAccelerator {
_private: (),
}
impl UpdateCustomRoutingAccelerator {
/// Creates a new builder-style object to manufacture [`UpdateCustomRoutingAcceleratorInput`](crate::input::UpdateCustomRoutingAcceleratorInput)
pub fn builder() -> crate::input::update_custom_routing_accelerator_input::Builder {
crate::input::update_custom_routing_accelerator_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateCustomRoutingAccelerator {
type Output = std::result::Result<
crate::output::UpdateCustomRoutingAcceleratorOutput,
crate::error::UpdateCustomRoutingAcceleratorError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_custom_routing_accelerator_error(response)
} else {
crate::operation_deser::parse_update_custom_routing_accelerator_response(response)
}
}
}
/// <p>Update the attributes for a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateCustomRoutingAcceleratorAttributes {
_private: (),
}
impl UpdateCustomRoutingAcceleratorAttributes {
/// Creates a new builder-style object to manufacture [`UpdateCustomRoutingAcceleratorAttributesInput`](crate::input::UpdateCustomRoutingAcceleratorAttributesInput)
pub fn builder() -> crate::input::update_custom_routing_accelerator_attributes_input::Builder {
crate::input::update_custom_routing_accelerator_attributes_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateCustomRoutingAcceleratorAttributes {
type Output = std::result::Result<
crate::output::UpdateCustomRoutingAcceleratorAttributesOutput,
crate::error::UpdateCustomRoutingAcceleratorAttributesError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_custom_routing_accelerator_attributes_error(
response,
)
} else {
crate::operation_deser::parse_update_custom_routing_accelerator_attributes_response(
response,
)
}
}
}
/// <p>Update a listener for a custom routing accelerator. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateCustomRoutingListener {
_private: (),
}
impl UpdateCustomRoutingListener {
/// Creates a new builder-style object to manufacture [`UpdateCustomRoutingListenerInput`](crate::input::UpdateCustomRoutingListenerInput)
pub fn builder() -> crate::input::update_custom_routing_listener_input::Builder {
crate::input::update_custom_routing_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateCustomRoutingListener {
type Output = std::result::Result<
crate::output::UpdateCustomRoutingListenerOutput,
crate::error::UpdateCustomRoutingListenerError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_custom_routing_listener_error(response)
} else {
crate::operation_deser::parse_update_custom_routing_listener_response(response)
}
}
}
/// <p>Update an endpoint group. A resource must be valid and active when you add it as an endpoint.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateEndpointGroup {
_private: (),
}
impl UpdateEndpointGroup {
/// Creates a new builder-style object to manufacture [`UpdateEndpointGroupInput`](crate::input::UpdateEndpointGroupInput)
pub fn builder() -> crate::input::update_endpoint_group_input::Builder {
crate::input::update_endpoint_group_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateEndpointGroup {
type Output = std::result::Result<
crate::output::UpdateEndpointGroupOutput,
crate::error::UpdateEndpointGroupError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_endpoint_group_error(response)
} else {
crate::operation_deser::parse_update_endpoint_group_response(response)
}
}
}
/// <p>Update a listener. </p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct UpdateListener {
_private: (),
}
impl UpdateListener {
/// Creates a new builder-style object to manufacture [`UpdateListenerInput`](crate::input::UpdateListenerInput)
pub fn builder() -> crate::input::update_listener_input::Builder {
crate::input::update_listener_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for UpdateListener {
type Output =
std::result::Result<crate::output::UpdateListenerOutput, crate::error::UpdateListenerError>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_update_listener_error(response)
} else {
crate::operation_deser::parse_update_listener_response(response)
}
}
}
/// <p>Stops advertising an address range that is provisioned as an address pool.
/// You can perform this operation at most once every 10 seconds, even if you specify different address
/// ranges each time.</p>
/// <p>It can take a few minutes before traffic to the specified addresses stops routing to AWS because of
/// propagation delays.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html">Bring Your Own
/// IP Addresses (BYOIP)</a> in the <i>AWS Global Accelerator Developer Guide</i>.</p>
#[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)]
pub struct WithdrawByoipCidr {
_private: (),
}
impl WithdrawByoipCidr {
/// Creates a new builder-style object to manufacture [`WithdrawByoipCidrInput`](crate::input::WithdrawByoipCidrInput)
pub fn builder() -> crate::input::withdraw_byoip_cidr_input::Builder {
crate::input::withdraw_byoip_cidr_input::Builder::default()
}
pub fn new() -> Self {
Self { _private: () }
}
}
impl smithy_http::response::ParseStrictResponse for WithdrawByoipCidr {
type Output = std::result::Result<
crate::output::WithdrawByoipCidrOutput,
crate::error::WithdrawByoipCidrError,
>;
fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output {
if !response.status().is_success() && response.status().as_u16() != 200 {
crate::operation_deser::parse_withdraw_byoip_cidr_error(response)
} else {
crate::operation_deser::parse_withdraw_byoip_cidr_response(response)
}
}
}
| 46.890833 | 220 | 0.703509 |
dd1125a3a0986a4f553ce85cf07e53e2ecebf8b5
| 564 |
//! A simple wrapper that can be inserted into a program to turn `exit` calls to `abort`, which `LibAFL` will be able to catch.
//! If you are on `MacOS`, use the env variables `DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES="path/to/target/release/libdeexit.dylib" tool`
//! On Linux, use `LD_PRELOAD="path/to/target/release/libdeexit.so" tool`.
extern "C" {
fn abort();
}
/// Hooked `exit` function
#[no_mangle]
pub fn exit(status: i32) {
println!("DeExit: The target called exit with status code {}", status);
unsafe {
abort();
}
}
| 33.176471 | 146 | 0.684397 |
d9398a1c58cd9a65ca4f9dd8822d03f519889e4f
| 34,314 |
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Name resolution for lifetimes.
//!
//! Name resolution for lifetimes follows MUCH simpler rules than the
//! full resolve. For example, lifetime names are never exported or
//! used between functions, and they operate in a purely top-down
//! way. Therefore we break lifetime name resolution into a separate pass.
pub use self::DefRegion::*;
use self::ScopeChain::*;
use session::Session;
use middle::def::{self, DefMap};
use middle::region;
use middle::subst;
use middle::ty;
use std::fmt;
use std::mem::replace;
use syntax::ast;
use syntax::codemap::Span;
use syntax::parse::token::special_idents;
use util::nodemap::NodeMap;
use rustc_front::hir;
use rustc_front::print::pprust::lifetime_to_string;
use rustc_front::visit::{self, Visitor, FnKind};
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
pub enum DefRegion {
DefStaticRegion,
DefEarlyBoundRegion(/* space */ subst::ParamSpace,
/* index */ u32,
/* lifetime decl */ ast::NodeId),
DefLateBoundRegion(ty::DebruijnIndex,
/* lifetime decl */ ast::NodeId),
DefFreeRegion(/* block scope */ region::DestructionScopeData,
/* lifetime decl */ ast::NodeId),
}
// Maps the id of each lifetime reference to the lifetime decl
// that it corresponds to.
pub type NamedRegionMap = NodeMap<DefRegion>;
struct LifetimeContext<'a> {
sess: &'a Session,
named_region_map: &'a mut NamedRegionMap,
scope: Scope<'a>,
def_map: &'a DefMap,
// Deep breath. Our representation for poly trait refs contains a single
// binder and thus we only allow a single level of quantification. However,
// the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>`
// and `for <'a, 'b> &'b T: Foo<'a>`. In order to get the de Bruijn indices
// correct when representing these constraints, we should only introduce one
// scope. However, we want to support both locations for the quantifier and
// during lifetime resolution we want precise information (so we can't
// desugar in an earlier phase).
// SO, if we encounter a quantifier at the outer scope, we set
// trait_ref_hack to true (and introduce a scope), and then if we encounter
// a quantifier at the inner scope, we error. If trait_ref_hack is false,
// then we introduce the scope at the inner quantifier.
// I'm sorry.
trait_ref_hack: bool,
// List of labels in the function/method currently under analysis.
labels_in_fn: Vec<(ast::Name, Span)>,
}
enum ScopeChain<'a> {
/// EarlyScope(i, ['a, 'b, ...], s) extends s with early-bound
/// lifetimes, assigning indexes 'a => i, 'b => i+1, ... etc.
EarlyScope(subst::ParamSpace, &'a Vec<hir::LifetimeDef>, Scope<'a>),
/// LateScope(['a, 'b, ...], s) extends s with late-bound
/// lifetimes introduced by the declaration binder_id.
LateScope(&'a Vec<hir::LifetimeDef>, Scope<'a>),
/// lifetimes introduced by items within a code block are scoped
/// to that block.
BlockScope(region::DestructionScopeData, Scope<'a>),
RootScope
}
type Scope<'a> = &'a ScopeChain<'a>;
static ROOT_SCOPE: ScopeChain<'static> = RootScope;
pub fn krate(sess: &Session, krate: &hir::Crate, def_map: &DefMap) -> NamedRegionMap {
let mut named_region_map = NodeMap();
visit::walk_crate(&mut LifetimeContext {
sess: sess,
named_region_map: &mut named_region_map,
scope: &ROOT_SCOPE,
def_map: def_map,
trait_ref_hack: false,
labels_in_fn: vec![],
}, krate);
sess.abort_if_errors();
named_region_map
}
impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
fn visit_item(&mut self, item: &hir::Item) {
// Items save/restore the set of labels. This way inner items
// can freely reuse names, be they loop labels or lifetimes.
let saved = replace(&mut self.labels_in_fn, vec![]);
// Items always introduce a new root scope
self.with(RootScope, |_, this| {
match item.node {
hir::ItemFn(..) => {
// Fn lifetimes get added in visit_fn below:
visit::walk_item(this, item);
}
hir::ItemExternCrate(_) |
hir::ItemUse(_) |
hir::ItemMod(..) |
hir::ItemDefaultImpl(..) |
hir::ItemForeignMod(..) |
hir::ItemStatic(..) |
hir::ItemConst(..) => {
// These sorts of items have no lifetime parameters at all.
visit::walk_item(this, item);
}
hir::ItemTy(_, ref generics) |
hir::ItemEnum(_, ref generics) |
hir::ItemStruct(_, ref generics) |
hir::ItemTrait(_, ref generics, _, _) |
hir::ItemImpl(_, _, ref generics, _, _, _) => {
// These kinds of items have only early bound lifetime parameters.
let lifetimes = &generics.lifetimes;
let early_scope = EarlyScope(subst::TypeSpace, lifetimes, &ROOT_SCOPE);
this.with(early_scope, |old_scope, this| {
this.check_lifetime_defs(old_scope, lifetimes);
visit::walk_item(this, item);
});
}
}
});
// Done traversing the item; restore saved set of labels.
replace(&mut self.labels_in_fn, saved);
}
fn visit_foreign_item(&mut self, item: &hir::ForeignItem) {
// Items save/restore the set of labels. This way inner items
// can freely reuse names, be they loop labels or lifetimes.
let saved = replace(&mut self.labels_in_fn, vec![]);
// Items always introduce a new root scope
self.with(RootScope, |_, this| {
match item.node {
hir::ForeignItemFn(_, ref generics) => {
this.visit_early_late(subst::FnSpace, generics, |this| {
visit::walk_foreign_item(this, item);
})
}
hir::ForeignItemStatic(..) => {
visit::walk_foreign_item(this, item);
}
}
});
// Done traversing the item; restore saved set of labels.
replace(&mut self.labels_in_fn, saved);
}
fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl,
b: &'v hir::Block, s: Span, _: ast::NodeId) {
match fk {
FnKind::ItemFn(_, generics, _, _, _, _) => {
self.visit_early_late(subst::FnSpace, generics, |this| {
this.walk_fn(fk, fd, b, s)
})
}
FnKind::Method(_, sig, _) => {
self.visit_early_late(subst::FnSpace, &sig.generics, |this| {
this.walk_fn(fk, fd, b, s)
})
}
FnKind::Closure(..) => {
self.walk_fn(fk, fd, b, s)
}
}
}
fn visit_ty(&mut self, ty: &hir::Ty) {
match ty.node {
hir::TyBareFn(ref c) => {
self.with(LateScope(&c.lifetimes, self.scope), |old_scope, this| {
// a bare fn has no bounds, so everything
// contained within is scoped within its binder.
this.check_lifetime_defs(old_scope, &c.lifetimes);
visit::walk_ty(this, ty);
});
}
hir::TyPath(None, ref path) => {
// if this path references a trait, then this will resolve to
// a trait ref, which introduces a binding scope.
match self.def_map.borrow().get(&ty.id).map(|d| (d.base_def, d.depth)) {
Some((def::DefTrait(..), 0)) => {
self.with(LateScope(&Vec::new(), self.scope), |_, this| {
this.visit_path(path, ty.id);
});
}
_ => {
visit::walk_ty(self, ty);
}
}
}
_ => {
visit::walk_ty(self, ty)
}
}
}
fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) {
// We reset the labels on every trait item, so that different
// methods in an impl can reuse label names.
let saved = replace(&mut self.labels_in_fn, vec![]);
if let hir::MethodTraitItem(ref sig, None) = trait_item.node {
self.visit_early_late(
subst::FnSpace, &sig.generics,
|this| visit::walk_trait_item(this, trait_item))
} else {
visit::walk_trait_item(self, trait_item);
}
replace(&mut self.labels_in_fn, saved);
}
fn visit_block(&mut self, b: &hir::Block) {
self.with(BlockScope(region::DestructionScopeData::new(b.id),
self.scope),
|_, this| visit::walk_block(this, b));
}
fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) {
if lifetime_ref.name == special_idents::static_lifetime.name {
self.insert_lifetime(lifetime_ref, DefStaticRegion);
return;
}
self.resolve_lifetime_ref(lifetime_ref);
}
fn visit_generics(&mut self, generics: &hir::Generics) {
for ty_param in generics.ty_params.iter() {
walk_list!(self, visit_ty_param_bound, &ty_param.bounds);
match ty_param.default {
Some(ref ty) => self.visit_ty(&**ty),
None => {}
}
}
for predicate in &generics.where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ ref bounded_ty,
ref bounds,
ref bound_lifetimes,
.. }) => {
if !bound_lifetimes.is_empty() {
self.trait_ref_hack = true;
let result = self.with(LateScope(bound_lifetimes, self.scope),
|old_scope, this| {
this.check_lifetime_defs(old_scope, bound_lifetimes);
this.visit_ty(&**bounded_ty);
walk_list!(this, visit_ty_param_bound, bounds);
});
self.trait_ref_hack = false;
result
} else {
self.visit_ty(&**bounded_ty);
walk_list!(self, visit_ty_param_bound, bounds);
}
}
&hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
ref bounds,
.. }) => {
self.visit_lifetime(lifetime);
for bound in bounds {
self.visit_lifetime(bound);
}
}
&hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ id,
ref path,
ref ty,
.. }) => {
self.visit_path(path, id);
self.visit_ty(&**ty);
}
}
}
}
fn visit_poly_trait_ref(&mut self,
trait_ref: &hir::PolyTraitRef,
_modifier: &hir::TraitBoundModifier) {
debug!("visit_poly_trait_ref trait_ref={:?}", trait_ref);
if !self.trait_ref_hack || !trait_ref.bound_lifetimes.is_empty() {
if self.trait_ref_hack {
println!("{:?}", trait_ref.span);
span_err!(self.sess, trait_ref.span, E0316,
"nested quantification of lifetimes");
}
self.with(LateScope(&trait_ref.bound_lifetimes, self.scope), |old_scope, this| {
this.check_lifetime_defs(old_scope, &trait_ref.bound_lifetimes);
for lifetime in &trait_ref.bound_lifetimes {
this.visit_lifetime_def(lifetime);
}
visit::walk_path(this, &trait_ref.trait_ref.path)
})
} else {
self.visit_trait_ref(&trait_ref.trait_ref)
}
}
}
#[derive(Copy, Clone, PartialEq)]
enum ShadowKind { Label, Lifetime }
struct Original { kind: ShadowKind, span: Span }
struct Shadower { kind: ShadowKind, span: Span }
fn original_label(span: Span) -> Original {
Original { kind: ShadowKind::Label, span: span }
}
fn shadower_label(span: Span) -> Shadower {
Shadower { kind: ShadowKind::Label, span: span }
}
fn original_lifetime(l: &hir::Lifetime) -> Original {
Original { kind: ShadowKind::Lifetime, span: l.span }
}
fn shadower_lifetime(l: &hir::Lifetime) -> Shadower {
Shadower { kind: ShadowKind::Lifetime, span: l.span }
}
impl ShadowKind {
fn desc(&self) -> &'static str {
match *self {
ShadowKind::Label => "label",
ShadowKind::Lifetime => "lifetime",
}
}
}
fn signal_shadowing_problem(
sess: &Session, name: ast::Name, orig: Original, shadower: Shadower) {
if let (ShadowKind::Lifetime, ShadowKind::Lifetime) = (orig.kind, shadower.kind) {
// lifetime/lifetime shadowing is an error
span_err!(sess, shadower.span, E0496,
"{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(), name, orig.kind.desc());
} else {
// shadowing involving a label is only a warning, due to issues with
// labels and lifetimes not being macro-hygienic.
sess.span_warn(shadower.span,
&format!("{} name `{}` shadows a \
{} name that is already in scope",
shadower.kind.desc(), name, orig.kind.desc()));
}
sess.span_note(orig.span,
&format!("shadowed {} `{}` declared here",
orig.kind.desc(), name));
}
// Adds all labels in `b` to `ctxt.labels_in_fn`, signalling a warning
// if one of the label shadows a lifetime or another label.
fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) {
struct GatherLabels<'a> {
sess: &'a Session,
scope: Scope<'a>,
labels_in_fn: &'a mut Vec<(ast::Name, Span)>,
}
let mut gather = GatherLabels {
sess: ctxt.sess,
scope: ctxt.scope,
labels_in_fn: &mut ctxt.labels_in_fn,
};
gather.visit_block(b);
return;
impl<'v, 'a> Visitor<'v> for GatherLabels<'a> {
fn visit_expr(&mut self, ex: &'v hir::Expr) {
// do not recurse into closures defined in the block
// since they are treated as separate fns from the POV of
// labels_in_fn
if let hir::ExprClosure(..) = ex.node {
return
}
if let Some(label) = expression_label(ex) {
for &(prior, prior_span) in &self.labels_in_fn[..] {
// FIXME (#24278): non-hygienic comparison
if label == prior {
signal_shadowing_problem(self.sess,
label,
original_label(prior_span),
shadower_label(ex.span));
}
}
check_if_label_shadows_lifetime(self.sess,
self.scope,
label,
ex.span);
self.labels_in_fn.push((label, ex.span));
}
visit::walk_expr(self, ex)
}
fn visit_item(&mut self, _: &hir::Item) {
// do not recurse into items defined in the block
}
}
fn expression_label(ex: &hir::Expr) -> Option<ast::Name> {
match ex.node {
hir::ExprWhile(_, _, Some(label)) |
hir::ExprLoop(_, Some(label)) => Some(label.name),
_ => None,
}
}
fn check_if_label_shadows_lifetime<'a>(sess: &'a Session,
mut scope: Scope<'a>,
label: ast::Name,
label_span: Span) {
loop {
match *scope {
BlockScope(_, s) => { scope = s; }
RootScope => { return; }
EarlyScope(_, lifetimes, s) |
LateScope(lifetimes, s) => {
for lifetime_def in lifetimes {
// FIXME (#24278): non-hygienic comparison
if label == lifetime_def.lifetime.name {
signal_shadowing_problem(
sess,
label,
original_lifetime(&lifetime_def.lifetime),
shadower_label(label_span));
return;
}
}
scope = s;
}
}
}
}
}
impl<'a> LifetimeContext<'a> {
// This is just like visit::walk_fn, except that it extracts the
// labels of the function body and swaps them in before visiting
// the function body itself.
fn walk_fn<'b>(&mut self,
fk: FnKind,
fd: &hir::FnDecl,
fb: &'b hir::Block,
_span: Span) {
match fk {
FnKind::ItemFn(_, generics, _, _, _, _) => {
visit::walk_fn_decl(self, fd);
self.visit_generics(generics);
}
FnKind::Method(_, sig, _) => {
visit::walk_fn_decl(self, fd);
self.visit_generics(&sig.generics);
self.visit_explicit_self(&sig.explicit_self);
}
FnKind::Closure(..) => {
visit::walk_fn_decl(self, fd);
}
}
// After inpsecting the decl, add all labels from the body to
// `self.labels_in_fn`.
extract_labels(self, fb);
self.visit_block(fb);
}
fn with<F>(&mut self, wrap_scope: ScopeChain, f: F) where
F: FnOnce(Scope, &mut LifetimeContext),
{
let LifetimeContext {sess, ref mut named_region_map, ..} = *self;
let mut this = LifetimeContext {
sess: sess,
named_region_map: *named_region_map,
scope: &wrap_scope,
def_map: self.def_map,
trait_ref_hack: self.trait_ref_hack,
labels_in_fn: self.labels_in_fn.clone(),
};
debug!("entering scope {:?}", this.scope);
f(self.scope, &mut this);
debug!("exiting scope {:?}", this.scope);
}
/// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
///
/// Handles visiting fns and methods. These are a bit complicated because we must distinguish
/// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
/// within type bounds; those are early bound lifetimes, and the rest are late bound.
///
/// For example:
///
/// fn foo<'a,'b,'c,T:Trait<'b>>(...)
///
/// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
/// lifetimes may be interspersed together.
///
/// If early bound lifetimes are present, we separate them into their own list (and likewise
/// for late bound). They will be numbered sequentially, starting from the lowest index that is
/// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
/// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late<F>(&mut self,
early_space: subst::ParamSpace,
generics: &hir::Generics,
walk: F) where
F: FnOnce(&mut LifetimeContext),
{
let referenced_idents = early_bound_lifetime_names(generics);
debug!("visit_early_late: referenced_idents={:?}",
referenced_idents);
let (early, late): (Vec<_>, _) = generics.lifetimes.iter().cloned().partition(
|l| referenced_idents.iter().any(|&i| i == l.lifetime.name));
self.with(EarlyScope(early_space, &early, self.scope), move |old_scope, this| {
this.with(LateScope(&late, this.scope), move |_, this| {
this.check_lifetime_defs(old_scope, &generics.lifetimes);
walk(this);
});
});
}
fn resolve_lifetime_ref(&mut self, lifetime_ref: &hir::Lifetime) {
// Walk up the scope chain, tracking the number of fn scopes
// that we pass through, until we find a lifetime with the
// given name or we run out of scopes. If we encounter a code
// block, then the lifetime is not bound but free, so switch
// over to `resolve_free_lifetime_ref()` to complete the
// search.
let mut late_depth = 0;
let mut scope = self.scope;
loop {
match *scope {
BlockScope(blk_scope, s) => {
return self.resolve_free_lifetime_ref(blk_scope, lifetime_ref, s);
}
RootScope => {
break;
}
EarlyScope(space, lifetimes, s) => {
match search_lifetimes(lifetimes, lifetime_ref) {
Some((index, lifetime_def)) => {
let decl_id = lifetime_def.id;
let def = DefEarlyBoundRegion(space, index, decl_id);
self.insert_lifetime(lifetime_ref, def);
return;
}
None => {
scope = s;
}
}
}
LateScope(lifetimes, s) => {
match search_lifetimes(lifetimes, lifetime_ref) {
Some((_index, lifetime_def)) => {
let decl_id = lifetime_def.id;
let debruijn = ty::DebruijnIndex::new(late_depth + 1);
let def = DefLateBoundRegion(debruijn, decl_id);
self.insert_lifetime(lifetime_ref, def);
return;
}
None => {
late_depth += 1;
scope = s;
}
}
}
}
}
self.unresolved_lifetime_ref(lifetime_ref);
}
fn resolve_free_lifetime_ref(&mut self,
scope_data: region::DestructionScopeData,
lifetime_ref: &hir::Lifetime,
scope: Scope) {
debug!("resolve_free_lifetime_ref \
scope_data: {:?} lifetime_ref: {:?} scope: {:?}",
scope_data, lifetime_ref, scope);
// Walk up the scope chain, tracking the outermost free scope,
// until we encounter a scope that contains the named lifetime
// or we run out of scopes.
let mut scope_data = scope_data;
let mut scope = scope;
let mut search_result = None;
loop {
debug!("resolve_free_lifetime_ref \
scope_data: {:?} scope: {:?} search_result: {:?}",
scope_data, scope, search_result);
match *scope {
BlockScope(blk_scope_data, s) => {
scope_data = blk_scope_data;
scope = s;
}
RootScope => {
break;
}
EarlyScope(_, lifetimes, s) |
LateScope(lifetimes, s) => {
search_result = search_lifetimes(lifetimes, lifetime_ref);
if search_result.is_some() {
break;
}
scope = s;
}
}
}
match search_result {
Some((_depth, lifetime)) => {
let def = DefFreeRegion(scope_data, lifetime.id);
self.insert_lifetime(lifetime_ref, def);
}
None => {
self.unresolved_lifetime_ref(lifetime_ref);
}
}
}
fn unresolved_lifetime_ref(&self, lifetime_ref: &hir::Lifetime) {
span_err!(self.sess, lifetime_ref.span, E0261,
"use of undeclared lifetime name `{}`",
lifetime_ref.name);
}
fn check_lifetime_defs(&mut self, old_scope: Scope, lifetimes: &Vec<hir::LifetimeDef>) {
for i in 0..lifetimes.len() {
let lifetime_i = &lifetimes[i];
let special_idents = [special_idents::static_lifetime];
for lifetime in lifetimes {
if special_idents.iter().any(|&i| i.name == lifetime.lifetime.name) {
span_err!(self.sess, lifetime.lifetime.span, E0262,
"invalid lifetime parameter name: `{}`", lifetime.lifetime.name);
}
}
// It is a hard error to shadow a lifetime within the same scope.
for j in i + 1..lifetimes.len() {
let lifetime_j = &lifetimes[j];
if lifetime_i.lifetime.name == lifetime_j.lifetime.name {
span_err!(self.sess, lifetime_j.lifetime.span, E0263,
"lifetime name `{}` declared twice in \
the same scope",
lifetime_j.lifetime.name);
}
}
// It is a soft error to shadow a lifetime within a parent scope.
self.check_lifetime_def_for_shadowing(old_scope, &lifetime_i.lifetime);
for bound in &lifetime_i.bounds {
self.resolve_lifetime_ref(bound);
}
}
}
fn check_lifetime_def_for_shadowing(&self,
mut old_scope: Scope,
lifetime: &hir::Lifetime)
{
for &(label, label_span) in &self.labels_in_fn {
// FIXME (#24278): non-hygienic comparison
if lifetime.name == label {
signal_shadowing_problem(self.sess,
lifetime.name,
original_label(label_span),
shadower_lifetime(&lifetime));
return;
}
}
loop {
match *old_scope {
BlockScope(_, s) => {
old_scope = s;
}
RootScope => {
return;
}
EarlyScope(_, lifetimes, s) |
LateScope(lifetimes, s) => {
if let Some((_, lifetime_def)) = search_lifetimes(lifetimes, lifetime) {
signal_shadowing_problem(
self.sess,
lifetime.name,
original_lifetime(&lifetime_def),
shadower_lifetime(&lifetime));
return;
}
old_scope = s;
}
}
}
}
fn insert_lifetime(&mut self,
lifetime_ref: &hir::Lifetime,
def: DefRegion) {
if lifetime_ref.id == ast::DUMMY_NODE_ID {
self.sess.span_bug(lifetime_ref.span,
"lifetime reference not renumbered, \
probably a bug in syntax::fold");
}
debug!("lifetime_ref={:?} id={:?} resolved to {:?}",
lifetime_to_string(lifetime_ref),
lifetime_ref.id,
def);
self.named_region_map.insert(lifetime_ref.id, def);
}
}
fn search_lifetimes<'a>(lifetimes: &'a Vec<hir::LifetimeDef>,
lifetime_ref: &hir::Lifetime)
-> Option<(u32, &'a hir::Lifetime)> {
for (i, lifetime_decl) in lifetimes.iter().enumerate() {
if lifetime_decl.lifetime.name == lifetime_ref.name {
return Some((i as u32, &lifetime_decl.lifetime));
}
}
return None;
}
///////////////////////////////////////////////////////////////////////////
pub fn early_bound_lifetimes<'a>(generics: &'a hir::Generics) -> Vec<hir::LifetimeDef> {
let referenced_idents = early_bound_lifetime_names(generics);
if referenced_idents.is_empty() {
return Vec::new();
}
generics.lifetimes.iter()
.filter(|l| referenced_idents.iter().any(|&i| i == l.lifetime.name))
.cloned()
.collect()
}
/// Given a set of generic declarations, returns a list of names containing all early bound
/// lifetime names for those generics. (In fact, this list may also contain other names.)
fn early_bound_lifetime_names(generics: &hir::Generics) -> Vec<ast::Name> {
// Create two lists, dividing the lifetimes into early/late bound.
// Initially, all of them are considered late, but we will move
// things from late into early as we go if we find references to
// them.
let mut early_bound = Vec::new();
let mut late_bound = generics.lifetimes.iter()
.map(|l| l.lifetime.name)
.collect();
// Any lifetime that appears in a type bound is early.
{
let mut collector =
FreeLifetimeCollector { early_bound: &mut early_bound,
late_bound: &mut late_bound };
for ty_param in generics.ty_params.iter() {
walk_list!(&mut collector, visit_ty_param_bound, &ty_param.bounds);
}
for predicate in &generics.where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bounds,
ref bounded_ty,
..}) => {
collector.visit_ty(&**bounded_ty);
walk_list!(&mut collector, visit_ty_param_bound, bounds);
}
&hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
ref bounds,
..}) => {
collector.visit_lifetime(lifetime);
for bound in bounds {
collector.visit_lifetime(bound);
}
}
&hir::WherePredicate::EqPredicate(_) => unimplemented!()
}
}
}
// Any lifetime that either has a bound or is referenced by a
// bound is early.
for lifetime_def in &generics.lifetimes {
if !lifetime_def.bounds.is_empty() {
shuffle(&mut early_bound, &mut late_bound,
lifetime_def.lifetime.name);
for bound in &lifetime_def.bounds {
shuffle(&mut early_bound, &mut late_bound,
bound.name);
}
}
}
return early_bound;
struct FreeLifetimeCollector<'a> {
early_bound: &'a mut Vec<ast::Name>,
late_bound: &'a mut Vec<ast::Name>,
}
impl<'a, 'v> Visitor<'v> for FreeLifetimeCollector<'a> {
fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) {
shuffle(self.early_bound, self.late_bound,
lifetime_ref.name);
}
}
fn shuffle(early_bound: &mut Vec<ast::Name>,
late_bound: &mut Vec<ast::Name>,
name: ast::Name) {
match late_bound.iter().position(|n| *n == name) {
Some(index) => {
late_bound.swap_remove(index);
early_bound.push(name);
}
None => { }
}
}
}
impl<'a> fmt::Debug for ScopeChain<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
EarlyScope(space, defs, _) => write!(fmt, "EarlyScope({:?}, {:?})", space, defs),
LateScope(defs, _) => write!(fmt, "LateScope({:?})", defs),
BlockScope(id, _) => write!(fmt, "BlockScope({:?})", id),
RootScope => write!(fmt, "RootScope"),
}
}
}
| 39.26087 | 99 | 0.502448 |
11ae63c31ca1724a25f4684ba68586c153bc828c
| 4,792 |
//! Provides set of implementation for hir's objects that allows get back location in file.
use either::Either;
use hir_def::{
nameres::{ModuleOrigin, ModuleSource},
src::{HasChildSource, HasSource as _},
Lookup, VariantId,
};
use hir_expand::InFile;
use syntax::ast;
use crate::{
db::HirDatabase, Const, Enum, EnumVariant, Field, FieldSource, Function, ImplDef,
LifetimeParam, MacroDef, Module, Static, Struct, Trait, TypeAlias, TypeParam, Union,
};
pub trait HasSource {
type Ast;
fn source(self, db: &dyn HirDatabase) -> InFile<Self::Ast>;
}
/// NB: Module is !HasSource, because it has two source nodes at the same time:
/// definition and declaration.
impl Module {
/// Returns a node which defines this module. That is, a file or a `mod foo {}` with items.
pub fn definition_source(self, db: &dyn HirDatabase) -> InFile<ModuleSource> {
let def_map = db.crate_def_map(self.id.krate);
def_map[self.id.local_id].definition_source(db.upcast())
}
pub fn is_mod_rs(self, db: &dyn HirDatabase) -> bool {
let def_map = db.crate_def_map(self.id.krate);
match def_map[self.id.local_id].origin {
ModuleOrigin::File { is_mod_rs, .. } => is_mod_rs,
_ => false,
}
}
/// Returns a node which declares this module, either a `mod foo;` or a `mod foo {}`.
/// `None` for the crate root.
pub fn declaration_source(self, db: &dyn HirDatabase) -> Option<InFile<ast::Module>> {
let def_map = db.crate_def_map(self.id.krate);
def_map[self.id.local_id].declaration_source(db.upcast())
}
}
impl HasSource for Field {
type Ast = FieldSource;
fn source(self, db: &dyn HirDatabase) -> InFile<FieldSource> {
let var = VariantId::from(self.parent);
let src = var.child_source(db.upcast());
src.map(|it| match it[self.id].clone() {
Either::Left(it) => FieldSource::Pos(it),
Either::Right(it) => FieldSource::Named(it),
})
}
}
impl HasSource for Struct {
type Ast = ast::Struct;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Struct> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for Union {
type Ast = ast::Union;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Union> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for Enum {
type Ast = ast::Enum;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Enum> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for EnumVariant {
type Ast = ast::Variant;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Variant> {
self.parent.id.child_source(db.upcast()).map(|map| map[self.id].clone())
}
}
impl HasSource for Function {
type Ast = ast::Fn;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Fn> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for Const {
type Ast = ast::Const;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Const> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for Static {
type Ast = ast::Static;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Static> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for Trait {
type Ast = ast::Trait;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Trait> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for TypeAlias {
type Ast = ast::TypeAlias;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::TypeAlias> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for MacroDef {
type Ast = ast::Macro;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Macro> {
InFile {
file_id: self.id.ast_id.expect("MacroDef without ast_id").file_id,
value: self.id.ast_id.expect("MacroDef without ast_id").to_node(db.upcast()),
}
}
}
impl HasSource for ImplDef {
type Ast = ast::Impl;
fn source(self, db: &dyn HirDatabase) -> InFile<ast::Impl> {
self.id.lookup(db.upcast()).source(db.upcast())
}
}
impl HasSource for TypeParam {
type Ast = Either<ast::Trait, ast::TypeParam>;
fn source(self, db: &dyn HirDatabase) -> InFile<Self::Ast> {
let child_source = self.id.parent.child_source(db.upcast());
child_source.map(|it| it[self.id.local_id].clone())
}
}
impl HasSource for LifetimeParam {
type Ast = ast::LifetimeParam;
fn source(self, db: &dyn HirDatabase) -> InFile<Self::Ast> {
let child_source = self.id.parent.child_source(db.upcast());
child_source.map(|it| it[self.id.local_id].clone())
}
}
| 33.51049 | 95 | 0.630426 |
bf05b6bb46b6717bcc597e83d91499bfd72e6e9e
| 1,526 |
use std::path::Path;
use async_trait::async_trait;
use nix::errno::Errno;
use rand::Rng;
use tracing::{debug, trace};
use super::injector_config::FaultsConfig;
use super::{filter, Injector};
use crate::hookfs::{Error, Result};
#[derive(Debug)]
pub struct FaultInjector {
filter: filter::Filter,
errnos: Vec<(Errno, i32)>,
sum: i32,
}
#[async_trait]
impl Injector for FaultInjector {
async fn inject(&self, method: &filter::Method, path: &Path) -> Result<()> {
debug!("test filter");
if self.filter.filter(method, path) {
debug!("inject io fault");
let mut rng = rand::thread_rng();
let attempt: f64 = rng.gen();
let mut attempt = (attempt * (self.sum as f64)) as i32;
for (err, p) in self.errnos.iter() {
attempt -= p;
if attempt < 0 {
debug!("return with error {}", err);
return Err(Error::Sys(*err));
}
}
}
Ok(())
}
}
impl FaultInjector {
pub fn build(conf: FaultsConfig) -> anyhow::Result<Self> {
trace!("build fault injector");
let errnos: Vec<_> = conf
.faults
.iter()
.map(|item| (Errno::from_i32(item.errno), item.weight))
.collect();
let sum = errnos.iter().fold(0, |acc, w| acc + w.1);
Ok(Self {
filter: filter::Filter::build(conf.filter)?,
errnos,
sum,
})
}
}
| 24.222222 | 80 | 0.516383 |
ed43095177ce3234bcf53443a7c0d52bbdaef0c2
| 5,267 |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use bytes::Bytes;
use fbthrift::{serialize, CompactProtocol, Deserialize, Protocol, Serialize, ThriftEnum};
use indexmap::{IndexMap, IndexSet};
use interface::{
NonstandardCollectionTypes, TestBytesShared, TestEnum, TestEnumEmpty, TestSkipV1, TestSkipV2,
};
use smallvec::SmallVec;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use std::fmt::Debug;
use std::io::Cursor;
#[test]
fn test_nonstandard_collection_types() {
assert_round_trip(NonstandardCollectionTypes {
defaultset: make_btreeset(),
btreeset: make_btreeset(),
hashset: make_hashset(),
indexset_a: make_indexset(),
indexset_b: make_indexset(),
indexset_c: IndexSet::new(),
defaultmap: make_btreemap(),
btreemap: make_btreemap(),
hashmap: make_hashmap(),
indexmap_a: make_indexmap(),
indexmap_b: make_indexmap(),
indexmap_c: IndexMap::new(),
bin_smallvec: SmallVec::from(&b"smallvec"[..]),
bin_bytes: Bytes::from(&b"bytes"[..]),
..Default::default()
});
}
fn make_btreeset() -> BTreeSet<String> {
let mut set = BTreeSet::new();
set.insert("btreeset".to_owned());
set
}
fn make_hashset() -> HashSet<String> {
let mut set = HashSet::new();
set.insert("hashset".to_owned());
set
}
fn make_indexset() -> IndexSet<String> {
let mut set = IndexSet::new();
set.insert("indexset".to_owned());
set
}
fn make_btreemap() -> BTreeMap<String, String> {
let mut map = BTreeMap::new();
map.insert("btreemap".to_owned(), String::new());
map
}
fn make_hashmap() -> HashMap<String, String> {
let mut map = HashMap::new();
map.insert("hashmap".to_owned(), String::new());
map
}
fn make_indexmap() -> IndexMap<String, String> {
let mut map = IndexMap::new();
map.insert("indexmap".to_owned(), String::new());
map
}
fn assert_round_trip<T>(value: T)
where
T: Serialize<<CompactProtocol as Protocol>::Sizer>
+ Serialize<<CompactProtocol as Protocol>::Serializer>
+ Deserialize<<CompactProtocol as Protocol>::Deserializer>
+ PartialEq
+ Debug,
{
let bytes = serialize!(CompactProtocol, |w| Serialize::write(&value, w));
let mut deserializer = <CompactProtocol>::deserializer(Cursor::new(bytes));
let back = Deserialize::read(&mut deserializer).unwrap();
assert_eq!(value, back);
}
#[test]
fn test_variants_fn_enum() {
let expected: &'static [&'static str] = &["FOO", "BAR", "BAZ"];
assert_eq!(TestEnum::variants(), expected);
let expected_empty: &'static [&'static str] = &[];
assert_eq!(TestEnumEmpty::variants(), expected_empty);
}
#[test]
fn test_variant_values_fn_enum() {
let expected = &[TestEnum::FOO, TestEnum::BAR, TestEnum::BAZ];
assert_eq!(TestEnum::variant_values(), expected);
let expected_empty: &'static [TestEnumEmpty] = &[];
assert_eq!(TestEnumEmpty::variant_values(), expected_empty);
}
#[test]
fn test_enumerate_fn_enum() {
let expected = &[
(TestEnum::FOO, "FOO"),
(TestEnum::BAR, "BAR"),
(TestEnum::BAZ, "BAZ"),
];
assert_eq!(TestEnum::enumerate(), expected);
let expected_empty: &'static [(TestEnumEmpty, &str)] = &[];
assert_eq!(TestEnumEmpty::enumerate(), expected_empty);
}
#[test]
fn test_deserialize_skip_seq() {
let v2 = TestSkipV2::default();
let bytes = serialize!(CompactProtocol, |w| Serialize::write(&v2, w));
let mut deserializer = <CompactProtocol>::deserializer(Cursor::new(bytes));
let v1: TestSkipV1 = Deserialize::read(&mut deserializer).unwrap();
assert_eq!(v1, TestSkipV1::default());
}
#[test]
fn test_bytes_shared() {
// Test that when using `Bytes` for `binary` types and for the buffer the
// data is deserialized from, the deserialized structs share data with the
// buffer. Do this by deserializing the same `Bytes`-backed buffer twice
// and checking both deserialized copies have the same address. This
// should be somewhere within the serialized buffer.
let original = TestBytesShared {
b: Bytes::from(&b"data"[..]),
..Default::default()
};
let bytes = serialize!(CompactProtocol, |w| Serialize::write(&original, w));
let mut deserializer1 = <CompactProtocol>::deserializer(Cursor::new(bytes.clone()));
let shared1: TestBytesShared = Deserialize::read(&mut deserializer1).unwrap();
let mut deserializer2 = <CompactProtocol>::deserializer(Cursor::new(bytes));
let shared2: TestBytesShared = Deserialize::read(&mut deserializer2).unwrap();
assert_eq!(shared1.b.as_ptr() as usize, shared2.b.as_ptr() as usize);
}
| 34.201299 | 97 | 0.670211 |
fbefebbc116d91be76b21e021c2b52f44d796c3a
| 506 |
// Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use std::sync::Arc;
use common_exception::Result;
use crate::datasources::local::LocalDatabase;
use crate::datasources::Database;
pub struct LocalFactory;
impl LocalFactory {
pub fn create() -> Self {
Self
}
pub fn load_databases(&self) -> Result<Vec<Arc<dyn Database>>> {
let databases: Vec<Arc<dyn Database>> = vec![Arc::new(LocalDatabase::create())];
Ok(databases)
}
}
| 21.083333 | 88 | 0.664032 |
de7f4a02082e7f2265e94e2213abdaa74683bacf
| 10,571 |
extern crate graph;
extern crate jsonrpc_http_server;
extern crate lazy_static;
extern crate serde;
use graph::prelude::futures03::channel::{mpsc, oneshot};
use graph::prelude::futures03::SinkExt;
use graph::prelude::serde_json;
use graph::prelude::{JsonRpcServer as JsonRpcServerTrait, *};
use jsonrpc_http_server::{
jsonrpc_core::{self, Compatibility, IoHandler, Params, Value},
RestApi, Server, ServerBuilder,
};
use lazy_static::lazy_static;
use std::collections::BTreeMap;
use std::env;
use std::io;
use std::net::{Ipv4Addr, SocketAddrV4};
lazy_static! {
static ref EXTERNAL_HTTP_BASE_URL: Option<String> = env::var_os("EXTERNAL_HTTP_BASE_URL")
.map(|s| s.into_string().expect("invalid external HTTP base URL"));
static ref EXTERNAL_WS_BASE_URL: Option<String> = env::var_os("EXTERNAL_WS_BASE_URL")
.map(|s| s.into_string().expect("invalid external WS base URL"));
}
const JSON_RPC_DEPLOY_ERROR: i64 = 0;
const JSON_RPC_REMOVE_ERROR: i64 = 1;
const JSON_RPC_CREATE_ERROR: i64 = 2;
const JSON_RPC_REASSIGN_ERROR: i64 = 3;
#[derive(Debug, Deserialize)]
struct SubgraphCreateParams {
name: SubgraphName,
}
#[derive(Debug, Deserialize)]
struct SubgraphDeployParams {
name: SubgraphName,
ipfs_hash: DeploymentHash,
node_id: Option<NodeId>,
}
#[derive(Debug, Deserialize)]
struct SubgraphRemoveParams {
name: SubgraphName,
}
#[derive(Debug, Deserialize)]
struct SubgraphReassignParams {
ipfs_hash: DeploymentHash,
node_id: NodeId,
}
pub struct JsonRpcServer<R> {
registrar: Arc<R>,
http_port: u16,
ws_port: u16,
node_id: NodeId,
logger: Logger,
}
impl<R: SubgraphRegistrar> JsonRpcServer<R> {
/// Handler for the `subgraph_create` endpoint.
async fn create_handler(
&self,
params: SubgraphCreateParams,
) -> Result<Value, jsonrpc_core::Error> {
info!(&self.logger, "Received subgraph_create request"; "params" => format!("{:?}", params));
match self.registrar.create_subgraph(params.name.clone()).await {
Ok(result) => {
Ok(serde_json::to_value(result).expect("invalid subgraph creation result"))
}
Err(e) => Err(json_rpc_error(
&self.logger,
"subgraph_create",
e,
JSON_RPC_CREATE_ERROR,
params,
)),
}
}
/// Handler for the `subgraph_deploy` endpoint.
async fn deploy_handler(
&self,
params: SubgraphDeployParams,
) -> Result<Value, jsonrpc_core::Error> {
info!(&self.logger, "Received subgraph_deploy request"; "params" => format!("{:?}", params));
let node_id = params.node_id.clone().unwrap_or(self.node_id.clone());
let routes = subgraph_routes(¶ms.name, self.http_port, self.ws_port);
match self
.registrar
.create_subgraph_version(params.name.clone(), params.ipfs_hash.clone(), node_id)
.await
{
Ok(_) => Ok(routes),
Err(e) => Err(json_rpc_error(
&self.logger,
"subgraph_deploy",
e,
JSON_RPC_DEPLOY_ERROR,
params,
)),
}
}
/// Handler for the `subgraph_remove` endpoint.
async fn remove_handler(
&self,
params: SubgraphRemoveParams,
) -> Result<Value, jsonrpc_core::Error> {
info!(&self.logger, "Received subgraph_remove request"; "params" => format!("{:?}", params));
match self.registrar.remove_subgraph(params.name.clone()).await {
Ok(_) => Ok(Value::Null),
Err(e) => Err(json_rpc_error(
&self.logger,
"subgraph_remove",
e,
JSON_RPC_REMOVE_ERROR,
params,
)),
}
}
/// Handler for the `subgraph_assign` endpoint.
async fn reassign_handler(
&self,
params: SubgraphReassignParams,
) -> Result<Value, jsonrpc_core::Error> {
let logger = self.logger.clone();
info!(logger, "Received subgraph_reassignment request"; "params" => format!("{:?}", params));
match self
.registrar
.reassign_subgraph(¶ms.ipfs_hash, ¶ms.node_id)
.await
{
Ok(_) => Ok(Value::Null),
Err(e) => Err(json_rpc_error(
&logger,
"subgraph_reassign",
e,
JSON_RPC_REASSIGN_ERROR,
params,
)),
}
}
}
impl<R> JsonRpcServerTrait<R> for JsonRpcServer<R>
where
R: SubgraphRegistrar,
{
type Server = Server;
fn serve(
port: u16,
http_port: u16,
ws_port: u16,
registrar: Arc<R>,
node_id: NodeId,
logger: Logger,
) -> Result<Self::Server, io::Error> {
let logger = logger.new(o!("component" => "JsonRpcServer"));
info!(
logger,
"Starting JSON-RPC admin server at: http://localhost:{}", port
);
let addr = SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port);
let mut handler = IoHandler::with_compatibility(Compatibility::Both);
let arc_self = Arc::new(JsonRpcServer {
registrar,
http_port,
ws_port,
node_id,
logger,
});
let (task_sender, task_receiver) =
mpsc::channel::<Box<dyn std::future::Future<Output = ()> + Send + Unpin>>(100);
graph::spawn(task_receiver.for_each(|f| {
async {
// Blocking due to store interactions. Won't be blocking after #905.
graph::spawn_blocking(f);
}
}));
// This is a hack required because the json-rpc crate is not updated to tokio 0.2.
// We should watch the `jsonrpsee` crate and switch to that once it's ready.
async fn tokio02_spawn<I: Send + 'static, ER: Send + 'static>(
mut task_sink: mpsc::Sender<Box<dyn std::future::Future<Output = ()> + Send + Unpin>>,
future: impl std::future::Future<Output = Result<I, ER>> + Send + Unpin + 'static,
) -> Result<I, ER>
where
I: Debug,
ER: Debug,
{
let (return_sender, return_receiver) = oneshot::channel();
task_sink
.send(Box::new(future.map(move |res| {
return_sender.send(res).expect("`return_receiver` dropped");
})))
.await
.expect("task receiver dropped");
return_receiver.await.expect("`return_sender` dropped")
}
let me = arc_self.clone();
let sender = task_sender.clone();
handler.add_method("subgraph_create", move |params: Params| {
let me = me.clone();
Box::pin(tokio02_spawn(
sender.clone(),
async move {
let params = params.parse()?;
me.create_handler(params).await
}
.boxed(),
))
.compat()
});
let me = arc_self.clone();
let sender = task_sender.clone();
handler.add_method("subgraph_deploy", move |params: Params| {
let me = me.clone();
Box::pin(tokio02_spawn(
sender.clone(),
async move {
let params = params.parse()?;
me.deploy_handler(params).await
}
.boxed(),
))
.compat()
});
let me = arc_self.clone();
let sender = task_sender.clone();
handler.add_method("subgraph_remove", move |params: Params| {
let me = me.clone();
Box::pin(tokio02_spawn(
sender.clone(),
async move {
let params = params.parse()?;
me.remove_handler(params).await
}
.boxed(),
))
.compat()
});
let me = arc_self.clone();
let sender = task_sender.clone();
handler.add_method("subgraph_reassign", move |params: Params| {
let me = me.clone();
Box::pin(tokio02_spawn(
sender.clone(),
async move {
let params = params.parse()?;
me.reassign_handler(params).await
}
.boxed(),
))
.compat()
});
ServerBuilder::new(handler)
// Enable REST API:
// POST /<method>/<param1>/<param2>
.rest_api(RestApi::Secure)
.start_http(&addr.into())
}
}
fn json_rpc_error(
logger: &Logger,
operation: &str,
e: SubgraphRegistrarError,
code: i64,
params: impl std::fmt::Debug,
) -> jsonrpc_core::Error {
error!(logger, "{} failed", operation;
"error" => format!("{:?}", e),
"params" => format!("{:?}", params));
let message = if let SubgraphRegistrarError::Unknown(_) = e {
"internal error".to_owned()
} else {
e.to_string()
};
jsonrpc_core::Error {
code: jsonrpc_core::ErrorCode::ServerError(code),
message,
data: None,
}
}
pub fn parse_response(response: Value) -> Result<(), jsonrpc_core::Error> {
// serde deserialization of the `id` field to an `Id` struct is somehow
// incompatible with the `arbitrary-precision` feature which we use, so we
// need custom parsing logic.
let object = response.as_object().unwrap();
if let Some(error) = object.get("error") {
Err(serde_json::from_value(error.clone()).unwrap())
} else {
Ok(())
}
}
fn subgraph_routes(name: &SubgraphName, http_port: u16, ws_port: u16) -> Value {
let http_base_url = EXTERNAL_HTTP_BASE_URL
.clone()
.unwrap_or_else(|| format!(":{}", http_port));
let ws_base_url = EXTERNAL_WS_BASE_URL
.clone()
.unwrap_or_else(|| format!(":{}", ws_port));
let mut map = BTreeMap::new();
map.insert(
"playground",
format!("{}/subgraphs/name/{}/graphql", http_base_url, name),
);
map.insert(
"queries",
format!("{}/subgraphs/name/{}", http_base_url, name),
);
map.insert(
"subscriptions",
format!("{}/subgraphs/name/{}", ws_base_url, name),
);
jsonrpc_core::to_value(map).unwrap()
}
| 30.552023 | 101 | 0.549333 |
644a44f1989dbd5d6e75b2628cf80dfd20116f3f
| 9,996 |
// Inline assembly support.
//
use State::*;
use rustc_data_structures::thin_vec::ThinVec;
use errors::DiagnosticBuilder;
use syntax::ast;
use syntax::ext::base::{self, *};
use syntax::parse;
use syntax::parse::token::{self, Token};
use syntax::ptr::P;
use syntax::symbol::{kw, sym, Symbol};
use syntax::ast::AsmDialect;
use syntax_pos::Span;
use syntax::tokenstream;
use syntax::{span_err, struct_span_err};
enum State {
Asm,
Outputs,
Inputs,
Clobbers,
Options,
StateNone,
}
impl State {
fn next(&self) -> State {
match *self {
Asm => Outputs,
Outputs => Inputs,
Inputs => Clobbers,
Clobbers => Options,
Options => StateNone,
StateNone => StateNone,
}
}
}
const OPTIONS: &[Symbol] = &[sym::volatile, sym::alignstack, sym::intel];
pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt<'_>,
sp: Span,
tts: &[tokenstream::TokenTree])
-> Box<dyn base::MacResult + 'cx> {
let mut inline_asm = match parse_inline_asm(cx, sp, tts) {
Ok(Some(inline_asm)) => inline_asm,
Ok(None) => return DummyResult::any(sp),
Err(mut err) => {
err.emit();
return DummyResult::any(sp);
}
};
// If there are no outputs, the inline assembly is executed just for its side effects,
// so ensure that it is volatile
if inline_asm.outputs.is_empty() {
inline_asm.volatile = true;
}
MacEager::expr(P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::InlineAsm(P(inline_asm)),
span: sp.with_ctxt(cx.backtrace()),
attrs: ThinVec::new(),
}))
}
fn parse_inline_asm<'a>(
cx: &mut ExtCtxt<'a>,
sp: Span,
tts: &[tokenstream::TokenTree],
) -> Result<Option<ast::InlineAsm>, DiagnosticBuilder<'a>> {
// Split the tts before the first colon, to avoid `asm!("x": y)` being
// parsed as `asm!(z)` with `z = "x": y` which is type ascription.
let first_colon = tts.iter()
.position(|tt| {
match *tt {
tokenstream::TokenTree::Token(Token { kind: token::Colon, .. }) |
tokenstream::TokenTree::Token(Token { kind: token::ModSep, .. }) => true,
_ => false,
}
})
.unwrap_or(tts.len());
let mut p = cx.new_parser_from_tts(&tts[first_colon..]);
let mut asm = kw::Invalid;
let mut asm_str_style = None;
let mut outputs = Vec::new();
let mut inputs = Vec::new();
let mut clobs = Vec::new();
let mut volatile = false;
let mut alignstack = false;
let mut dialect = AsmDialect::Att;
let mut state = Asm;
'statement: loop {
match state {
Asm => {
if asm_str_style.is_some() {
// If we already have a string with instructions,
// ending up in Asm state again is an error.
return Err(struct_span_err!(
cx.parse_sess.span_diagnostic,
sp,
E0660,
"malformed inline assembly"
));
}
// Nested parser, stop before the first colon (see above).
let mut p2 = cx.new_parser_from_tts(&tts[..first_colon]);
if p2.token == token::Eof {
let mut err =
cx.struct_span_err(sp, "macro requires a string literal as an argument");
err.span_label(sp, "string literal required");
return Err(err);
}
let expr = p2.parse_expr()?;
let (s, style) =
match expr_to_string(cx, expr, "inline assembly must be a string literal") {
Some((s, st)) => (s, st),
None => return Ok(None),
};
// This is most likely malformed.
if p2.token != token::Eof {
let mut extra_tts = p2.parse_all_token_trees()?;
extra_tts.extend(tts[first_colon..].iter().cloned());
p = parse::stream_to_parser(
cx.parse_sess,
extra_tts.into_iter().collect(),
Some("inline assembly"),
);
}
asm = s;
asm_str_style = Some(style);
}
Outputs => {
while p.token != token::Eof && p.token != token::Colon && p.token != token::ModSep {
if !outputs.is_empty() {
p.eat(&token::Comma);
}
let (constraint, _) = p.parse_str()?;
let span = p.prev_span;
p.expect(&token::OpenDelim(token::Paren))?;
let expr = p.parse_expr()?;
p.expect(&token::CloseDelim(token::Paren))?;
// Expands a read+write operand into two operands.
//
// Use '+' modifier when you want the same expression
// to be both an input and an output at the same time.
// It's the opposite of '=&' which means that the memory
// cannot be shared with any other operand (usually when
// a register is clobbered early.)
let constraint_str = constraint.as_str();
let mut ch = constraint_str.chars();
let output = match ch.next() {
Some('=') => None,
Some('+') => {
Some(Symbol::intern(&format!("={}", ch.as_str())))
}
_ => {
span_err!(cx, span, E0661,
"output operand constraint lacks '=' or '+'");
None
}
};
let is_rw = output.is_some();
let is_indirect = constraint_str.contains("*");
outputs.push(ast::InlineAsmOutput {
constraint: output.unwrap_or(constraint),
expr,
is_rw,
is_indirect,
});
}
}
Inputs => {
while p.token != token::Eof && p.token != token::Colon && p.token != token::ModSep {
if !inputs.is_empty() {
p.eat(&token::Comma);
}
let (constraint, _) = p.parse_str()?;
if constraint.as_str().starts_with("=") {
span_err!(cx, p.prev_span, E0662,
"input operand constraint contains '='");
} else if constraint.as_str().starts_with("+") {
span_err!(cx, p.prev_span, E0663,
"input operand constraint contains '+'");
}
p.expect(&token::OpenDelim(token::Paren))?;
let input = p.parse_expr()?;
p.expect(&token::CloseDelim(token::Paren))?;
inputs.push((constraint, input));
}
}
Clobbers => {
while p.token != token::Eof && p.token != token::Colon && p.token != token::ModSep {
if !clobs.is_empty() {
p.eat(&token::Comma);
}
let (s, _) = p.parse_str()?;
if OPTIONS.iter().any(|&opt| s == opt) {
cx.span_warn(p.prev_span, "expected a clobber, found an option");
} else if s.as_str().starts_with("{") || s.as_str().ends_with("}") {
span_err!(cx, p.prev_span, E0664,
"clobber should not be surrounded by braces");
}
clobs.push(s);
}
}
Options => {
let (option, _) = p.parse_str()?;
if option == sym::volatile {
// Indicates that the inline assembly has side effects
// and must not be optimized out along with its outputs.
volatile = true;
} else if option == sym::alignstack {
alignstack = true;
} else if option == sym::intel {
dialect = AsmDialect::Intel;
} else {
cx.span_warn(p.prev_span, "unrecognized option");
}
if p.token == token::Comma {
p.eat(&token::Comma);
}
}
StateNone => (),
}
loop {
// MOD_SEP is a double colon '::' without space in between.
// When encountered, the state must be advanced twice.
match (&p.token.kind, state.next(), state.next().next()) {
(&token::Colon, StateNone, _) |
(&token::ModSep, _, StateNone) => {
p.bump();
break 'statement;
}
(&token::Colon, st, _) |
(&token::ModSep, _, st) => {
p.bump();
state = st;
}
(&token::Eof, ..) => break 'statement,
_ => break,
}
}
}
Ok(Some(ast::InlineAsm {
asm,
asm_str_style: asm_str_style.unwrap(),
outputs,
inputs,
clobbers: clobs,
volatile,
alignstack,
dialect,
}))
}
| 35.446809 | 100 | 0.439976 |
268a73002d2064dd607578080f5f73ba50fd27a2
| 1,933 |
mod ascii;
mod binary;
mod binary_packet;
use client::Stats;
use enum_dispatch::enum_dispatch;
use error::MemcacheError;
pub(crate) use protocol::ascii::AsciiProtocol;
pub(crate) use protocol::binary::BinaryProtocol;
use std::collections::HashMap;
use stream::Stream;
use value::{FromMemcacheValue, ToMemcacheValue};
#[enum_dispatch]
pub enum Protocol {
Ascii(AsciiProtocol<Stream>),
Binary(BinaryProtocol),
}
#[enum_dispatch(Protocol)]
pub trait ProtocolTrait {
fn auth(&mut self, username: &str, password: &str) -> Result<(), MemcacheError>;
fn version(&mut self) -> Result<String, MemcacheError>;
fn flush(&mut self) -> Result<(), MemcacheError>;
fn flush_with_delay(&mut self, delay: u32) -> Result<(), MemcacheError>;
fn get<V: FromMemcacheValue>(&mut self, key: &str) -> Result<Option<V>, MemcacheError>;
fn gets<V: FromMemcacheValue>(&mut self, keys: Vec<&str>) -> Result<HashMap<String, V>, MemcacheError>;
fn set<V: ToMemcacheValue<Stream>>(&mut self, key: &str, value: V, expiration: u32) -> Result<(), MemcacheError>;
fn add<V: ToMemcacheValue<Stream>>(&mut self, key: &str, value: V, expiration: u32) -> Result<(), MemcacheError>;
fn replace<V: ToMemcacheValue<Stream>>(
&mut self,
key: &str,
value: V,
expiration: u32,
) -> Result<(), MemcacheError>;
fn append<V: ToMemcacheValue<Stream>>(&mut self, key: &str, value: V) -> Result<(), MemcacheError>;
fn prepend<V: ToMemcacheValue<Stream>>(&mut self, key: &str, value: V) -> Result<(), MemcacheError>;
fn delete(&mut self, key: &str) -> Result<bool, MemcacheError>;
fn increment(&mut self, key: &str, amount: u64) -> Result<u64, MemcacheError>;
fn decrement(&mut self, key: &str, amount: u64) -> Result<u64, MemcacheError>;
fn touch(&mut self, key: &str, expiration: u32) -> Result<bool, MemcacheError>;
fn stats(&mut self) -> Result<Stats, MemcacheError>;
}
| 43.931818 | 117 | 0.675116 |
8aa6524f453df401d9d120e25851bb6da936034d
| 3,654 |
use std::collections::VecDeque;
type Component = (usize, usize);
// Component = Tuple[int,int]
fn connects(component: &Component, port: usize) -> bool {
component.0 == port || component.1 == port
}
fn solve(components: &Vec<Component>) -> (usize, usize) {
let starts: Vec<Component> = components
.iter()
.copied()
.filter(|component| component.0 == 0 || component.1 == 0)
.collect();
let mut queue: VecDeque<(usize, usize, Vec<Component>)> = VecDeque::new();
for component in starts {
queue.push_back((
if component.0 == 0 {
component.1
} else {
component.0
},
0,
vec![component],
));
}
let mut longest_strongest_1 = (0, 0);
let mut longest_strongest_2 = (0, 0);
while let Some((last_port, strength, used)) = queue.pop_front() {
let mut continued = false;
for component in components {
if connects(component, last_port) && !used.contains(component) {
continued = true;
let next_port = if component.0 == last_port {
component.1
} else {
component.0
};
let mut new_used = used.clone();
new_used.push(*component);
queue.push_back((next_port, strength + last_port * 2, new_used));
}
}
if !continued {
longest_strongest_1 = longest_strongest_1.max((0, strength + last_port));
longest_strongest_2 = longest_strongest_2.max((used.len(), strength + last_port));
}
}
(longest_strongest_1.1, longest_strongest_2.1)
}
// def solve(components: List[Component]) -> Tuple[int,int]:
// starts = [ component for component in components if 0 in component ]
// queue: List[Tuple[int,int,List[Component]]] = [ (next(port for port in start if port != 0), 0, [start])
// for start in starts ]
// longestStrongest1 = (0, 0)
// longestStrongest2 = (0, 0)
// while queue:
// lastPort, strength, used = queue.pop(0)
// continued = False
// for component in components:
// if lastPort in component and component not in used:
// continued = True
// nextPort = lastPort if component[0] == component[1] else next(port for port in component if port != lastPort)
// newUsed = used[:]
// newUsed.append(component)
// queue.append((nextPort, strength + lastPort * 2, newUsed))
// if not continued:
// longestStrongest1 = max(longestStrongest1, (0, strength + lastPort))
// longestStrongest2 = max(longestStrongest2, (len(used), strength + lastPort))
// return longestStrongest1[1], longestStrongest2[1]
fn get_input(file_path: &String) -> Vec<Component> {
std::fs::read_to_string(file_path)
.expect("Error reading input file!")
.lines()
.map(|line| {
let splits: Vec<&str> = line.split("/").collect();
(splits[0].parse().unwrap(), splits[1].parse().unwrap())
})
.collect()
}
fn main() {
let args: Vec<String> = std::env::args().collect();
if args.len() != 2 {
panic!("Please, add input file path as parameter");
}
let now = std::time::Instant::now();
let (part1_result, part2_result) = solve(&get_input(&args[1]));
let end = now.elapsed().as_secs_f32();
println!("P1: {}", part1_result);
println!("P2: {}", part2_result);
println!();
println!("Time: {:.7}", end);
}
| 37.670103 | 128 | 0.556103 |
226800a8278f5a9b6f7ec6829646a0819e0889dc
| 46,954 |
#[doc = "Reader of register PADREGR"]
pub type R = crate::R<u32, super::PADREGR>;
#[doc = "Writer for register PADREGR"]
pub type W = crate::W<u32, super::PADREGR>;
#[doc = "Register PADREGR `reset()`'s with value 0x1818_1818"]
impl crate::ResetValue for super::PADREGR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x1818_1818
}
}
#[doc = "Pad 71 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD71FNCSEL_A {
#[doc = "0: Configure as the SWO output"]
SWO = 0,
#[doc = "1: IOM/MSPI nCE group 71"]
NCE71 = 1,
#[doc = "2: CTIMER connection 21"]
CT21 = 2,
#[doc = "3: Configure as GPIO71"]
GPIO71 = 3,
#[doc = "4: Configure as the UART0 TX output"]
UART0TX = 4,
#[doc = "5: Configure as the UART0 RX input"]
UART0RX = 5,
#[doc = "6: Configure as the UART1 TX output"]
UART1TX = 6,
#[doc = "7: Configure as the UART1 RX input"]
UART1RX = 7,
}
impl From<PAD71FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD71FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD71FNCSEL`"]
pub type PAD71FNCSEL_R = crate::R<u8, PAD71FNCSEL_A>;
impl PAD71FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD71FNCSEL_A {
match self.bits {
0 => PAD71FNCSEL_A::SWO,
1 => PAD71FNCSEL_A::NCE71,
2 => PAD71FNCSEL_A::CT21,
3 => PAD71FNCSEL_A::GPIO71,
4 => PAD71FNCSEL_A::UART0TX,
5 => PAD71FNCSEL_A::UART0RX,
6 => PAD71FNCSEL_A::UART1TX,
7 => PAD71FNCSEL_A::UART1RX,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SWO`"]
#[inline(always)]
pub fn is_swo(&self) -> bool {
*self == PAD71FNCSEL_A::SWO
}
#[doc = "Checks if the value of the field is `NCE71`"]
#[inline(always)]
pub fn is_nce71(&self) -> bool {
*self == PAD71FNCSEL_A::NCE71
}
#[doc = "Checks if the value of the field is `CT21`"]
#[inline(always)]
pub fn is_ct21(&self) -> bool {
*self == PAD71FNCSEL_A::CT21
}
#[doc = "Checks if the value of the field is `GPIO71`"]
#[inline(always)]
pub fn is_gpio71(&self) -> bool {
*self == PAD71FNCSEL_A::GPIO71
}
#[doc = "Checks if the value of the field is `UART0TX`"]
#[inline(always)]
pub fn is_uart0tx(&self) -> bool {
*self == PAD71FNCSEL_A::UART0TX
}
#[doc = "Checks if the value of the field is `UART0RX`"]
#[inline(always)]
pub fn is_uart0rx(&self) -> bool {
*self == PAD71FNCSEL_A::UART0RX
}
#[doc = "Checks if the value of the field is `UART1TX`"]
#[inline(always)]
pub fn is_uart1tx(&self) -> bool {
*self == PAD71FNCSEL_A::UART1TX
}
#[doc = "Checks if the value of the field is `UART1RX`"]
#[inline(always)]
pub fn is_uart1rx(&self) -> bool {
*self == PAD71FNCSEL_A::UART1RX
}
}
#[doc = "Write proxy for field `PAD71FNCSEL`"]
pub struct PAD71FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD71FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD71FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Configure as the SWO output"]
#[inline(always)]
pub fn swo(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::SWO)
}
#[doc = "IOM/MSPI nCE group 71"]
#[inline(always)]
pub fn nce71(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::NCE71)
}
#[doc = "CTIMER connection 21"]
#[inline(always)]
pub fn ct21(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::CT21)
}
#[doc = "Configure as GPIO71"]
#[inline(always)]
pub fn gpio71(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::GPIO71)
}
#[doc = "Configure as the UART0 TX output"]
#[inline(always)]
pub fn uart0tx(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::UART0TX)
}
#[doc = "Configure as the UART0 RX input"]
#[inline(always)]
pub fn uart0rx(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::UART0RX)
}
#[doc = "Configure as the UART1 TX output"]
#[inline(always)]
pub fn uart1tx(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::UART1TX)
}
#[doc = "Configure as the UART1 RX input"]
#[inline(always)]
pub fn uart1rx(self) -> &'a mut W {
self.variant(PAD71FNCSEL_A::UART1RX)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 27)) | (((value as u32) & 0x07) << 27);
self.w
}
}
#[doc = "Pad 71 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD71STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD71STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD71STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD71STRNG`"]
pub type PAD71STRNG_R = crate::R<bool, PAD71STRNG_A>;
impl PAD71STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD71STRNG_A {
match self.bits {
false => PAD71STRNG_A::LOW,
true => PAD71STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD71STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD71STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD71STRNG`"]
pub struct PAD71STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD71STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD71STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD71STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD71STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Pad 71 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD71INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD71INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD71INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD71INPEN`"]
pub type PAD71INPEN_R = crate::R<bool, PAD71INPEN_A>;
impl PAD71INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD71INPEN_A {
match self.bits {
false => PAD71INPEN_A::DIS,
true => PAD71INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD71INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD71INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD71INPEN`"]
pub struct PAD71INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD71INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD71INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD71INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD71INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Pad 71 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD71PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD71PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD71PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD71PULL`"]
pub type PAD71PULL_R = crate::R<bool, PAD71PULL_A>;
impl PAD71PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD71PULL_A {
match self.bits {
false => PAD71PULL_A::DIS,
true => PAD71PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD71PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD71PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD71PULL`"]
pub struct PAD71PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD71PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD71PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD71PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD71PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Pad 70 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD70FNCSEL_A {
#[doc = "0: Configure as the SWO output"]
SWO = 0,
#[doc = "1: IOM/MSPI nCE group 70"]
NCE70 = 1,
#[doc = "2: CTIMER connection 20"]
CT20 = 2,
#[doc = "3: Configure as GPIO70"]
GPIO70 = 3,
#[doc = "4: Configure as the UART0 TX output"]
UART0TX = 4,
#[doc = "5: Configure as the UART0 RX input"]
UART0RX = 5,
#[doc = "6: Configure as the UART1 TX output"]
UART1TX = 6,
#[doc = "7: Configure as the UART1 RX input"]
UART1RX = 7,
}
impl From<PAD70FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD70FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD70FNCSEL`"]
pub type PAD70FNCSEL_R = crate::R<u8, PAD70FNCSEL_A>;
impl PAD70FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD70FNCSEL_A {
match self.bits {
0 => PAD70FNCSEL_A::SWO,
1 => PAD70FNCSEL_A::NCE70,
2 => PAD70FNCSEL_A::CT20,
3 => PAD70FNCSEL_A::GPIO70,
4 => PAD70FNCSEL_A::UART0TX,
5 => PAD70FNCSEL_A::UART0RX,
6 => PAD70FNCSEL_A::UART1TX,
7 => PAD70FNCSEL_A::UART1RX,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SWO`"]
#[inline(always)]
pub fn is_swo(&self) -> bool {
*self == PAD70FNCSEL_A::SWO
}
#[doc = "Checks if the value of the field is `NCE70`"]
#[inline(always)]
pub fn is_nce70(&self) -> bool {
*self == PAD70FNCSEL_A::NCE70
}
#[doc = "Checks if the value of the field is `CT20`"]
#[inline(always)]
pub fn is_ct20(&self) -> bool {
*self == PAD70FNCSEL_A::CT20
}
#[doc = "Checks if the value of the field is `GPIO70`"]
#[inline(always)]
pub fn is_gpio70(&self) -> bool {
*self == PAD70FNCSEL_A::GPIO70
}
#[doc = "Checks if the value of the field is `UART0TX`"]
#[inline(always)]
pub fn is_uart0tx(&self) -> bool {
*self == PAD70FNCSEL_A::UART0TX
}
#[doc = "Checks if the value of the field is `UART0RX`"]
#[inline(always)]
pub fn is_uart0rx(&self) -> bool {
*self == PAD70FNCSEL_A::UART0RX
}
#[doc = "Checks if the value of the field is `UART1TX`"]
#[inline(always)]
pub fn is_uart1tx(&self) -> bool {
*self == PAD70FNCSEL_A::UART1TX
}
#[doc = "Checks if the value of the field is `UART1RX`"]
#[inline(always)]
pub fn is_uart1rx(&self) -> bool {
*self == PAD70FNCSEL_A::UART1RX
}
}
#[doc = "Write proxy for field `PAD70FNCSEL`"]
pub struct PAD70FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD70FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD70FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Configure as the SWO output"]
#[inline(always)]
pub fn swo(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::SWO)
}
#[doc = "IOM/MSPI nCE group 70"]
#[inline(always)]
pub fn nce70(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::NCE70)
}
#[doc = "CTIMER connection 20"]
#[inline(always)]
pub fn ct20(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::CT20)
}
#[doc = "Configure as GPIO70"]
#[inline(always)]
pub fn gpio70(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::GPIO70)
}
#[doc = "Configure as the UART0 TX output"]
#[inline(always)]
pub fn uart0tx(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::UART0TX)
}
#[doc = "Configure as the UART0 RX input"]
#[inline(always)]
pub fn uart0rx(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::UART0RX)
}
#[doc = "Configure as the UART1 TX output"]
#[inline(always)]
pub fn uart1tx(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::UART1TX)
}
#[doc = "Configure as the UART1 RX input"]
#[inline(always)]
pub fn uart1rx(self) -> &'a mut W {
self.variant(PAD70FNCSEL_A::UART1RX)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 19)) | (((value as u32) & 0x07) << 19);
self.w
}
}
#[doc = "Pad 70 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD70STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD70STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD70STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD70STRNG`"]
pub type PAD70STRNG_R = crate::R<bool, PAD70STRNG_A>;
impl PAD70STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD70STRNG_A {
match self.bits {
false => PAD70STRNG_A::LOW,
true => PAD70STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD70STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD70STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD70STRNG`"]
pub struct PAD70STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD70STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD70STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD70STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD70STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Pad 70 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD70INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD70INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD70INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD70INPEN`"]
pub type PAD70INPEN_R = crate::R<bool, PAD70INPEN_A>;
impl PAD70INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD70INPEN_A {
match self.bits {
false => PAD70INPEN_A::DIS,
true => PAD70INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD70INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD70INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD70INPEN`"]
pub struct PAD70INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD70INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD70INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD70INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD70INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Pad 70 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD70PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD70PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD70PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD70PULL`"]
pub type PAD70PULL_R = crate::R<bool, PAD70PULL_A>;
impl PAD70PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD70PULL_A {
match self.bits {
false => PAD70PULL_A::DIS,
true => PAD70PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD70PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD70PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD70PULL`"]
pub struct PAD70PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD70PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD70PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD70PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD70PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Pad 69 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD69FNCSEL_A {
#[doc = "0: Configure as the SWO output"]
SWO = 0,
#[doc = "1: IOM/MSPI nCE group 69"]
NCE69 = 1,
#[doc = "2: CTIMER connection 19"]
CT19 = 2,
#[doc = "3: Configure as GPIO69"]
GPIO69 = 3,
#[doc = "4: Configure as the UART0 TX output"]
UART0TX = 4,
#[doc = "5: Configure as the UART0 RX input"]
UART0RX = 5,
#[doc = "6: Configure as the UART1 TX output"]
UART1TX = 6,
#[doc = "7: Configure as the UART1 RX input"]
UART1RX = 7,
}
impl From<PAD69FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD69FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD69FNCSEL`"]
pub type PAD69FNCSEL_R = crate::R<u8, PAD69FNCSEL_A>;
impl PAD69FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD69FNCSEL_A {
match self.bits {
0 => PAD69FNCSEL_A::SWO,
1 => PAD69FNCSEL_A::NCE69,
2 => PAD69FNCSEL_A::CT19,
3 => PAD69FNCSEL_A::GPIO69,
4 => PAD69FNCSEL_A::UART0TX,
5 => PAD69FNCSEL_A::UART0RX,
6 => PAD69FNCSEL_A::UART1TX,
7 => PAD69FNCSEL_A::UART1RX,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SWO`"]
#[inline(always)]
pub fn is_swo(&self) -> bool {
*self == PAD69FNCSEL_A::SWO
}
#[doc = "Checks if the value of the field is `NCE69`"]
#[inline(always)]
pub fn is_nce69(&self) -> bool {
*self == PAD69FNCSEL_A::NCE69
}
#[doc = "Checks if the value of the field is `CT19`"]
#[inline(always)]
pub fn is_ct19(&self) -> bool {
*self == PAD69FNCSEL_A::CT19
}
#[doc = "Checks if the value of the field is `GPIO69`"]
#[inline(always)]
pub fn is_gpio69(&self) -> bool {
*self == PAD69FNCSEL_A::GPIO69
}
#[doc = "Checks if the value of the field is `UART0TX`"]
#[inline(always)]
pub fn is_uart0tx(&self) -> bool {
*self == PAD69FNCSEL_A::UART0TX
}
#[doc = "Checks if the value of the field is `UART0RX`"]
#[inline(always)]
pub fn is_uart0rx(&self) -> bool {
*self == PAD69FNCSEL_A::UART0RX
}
#[doc = "Checks if the value of the field is `UART1TX`"]
#[inline(always)]
pub fn is_uart1tx(&self) -> bool {
*self == PAD69FNCSEL_A::UART1TX
}
#[doc = "Checks if the value of the field is `UART1RX`"]
#[inline(always)]
pub fn is_uart1rx(&self) -> bool {
*self == PAD69FNCSEL_A::UART1RX
}
}
#[doc = "Write proxy for field `PAD69FNCSEL`"]
pub struct PAD69FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD69FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD69FNCSEL_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Configure as the SWO output"]
#[inline(always)]
pub fn swo(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::SWO)
}
#[doc = "IOM/MSPI nCE group 69"]
#[inline(always)]
pub fn nce69(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::NCE69)
}
#[doc = "CTIMER connection 19"]
#[inline(always)]
pub fn ct19(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::CT19)
}
#[doc = "Configure as GPIO69"]
#[inline(always)]
pub fn gpio69(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::GPIO69)
}
#[doc = "Configure as the UART0 TX output"]
#[inline(always)]
pub fn uart0tx(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::UART0TX)
}
#[doc = "Configure as the UART0 RX input"]
#[inline(always)]
pub fn uart0rx(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::UART0RX)
}
#[doc = "Configure as the UART1 TX output"]
#[inline(always)]
pub fn uart1tx(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::UART1TX)
}
#[doc = "Configure as the UART1 RX input"]
#[inline(always)]
pub fn uart1rx(self) -> &'a mut W {
self.variant(PAD69FNCSEL_A::UART1RX)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 11)) | (((value as u32) & 0x07) << 11);
self.w
}
}
#[doc = "Pad 69 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD69STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD69STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD69STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD69STRNG`"]
pub type PAD69STRNG_R = crate::R<bool, PAD69STRNG_A>;
impl PAD69STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD69STRNG_A {
match self.bits {
false => PAD69STRNG_A::LOW,
true => PAD69STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD69STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD69STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD69STRNG`"]
pub struct PAD69STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD69STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD69STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD69STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD69STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Pad 69 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD69INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD69INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD69INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD69INPEN`"]
pub type PAD69INPEN_R = crate::R<bool, PAD69INPEN_A>;
impl PAD69INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD69INPEN_A {
match self.bits {
false => PAD69INPEN_A::DIS,
true => PAD69INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD69INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD69INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD69INPEN`"]
pub struct PAD69INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD69INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD69INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD69INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD69INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Pad 69 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD69PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD69PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD69PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD69PULL`"]
pub type PAD69PULL_R = crate::R<bool, PAD69PULL_A>;
impl PAD69PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD69PULL_A {
match self.bits {
false => PAD69PULL_A::DIS,
true => PAD69PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD69PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD69PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD69PULL`"]
pub struct PAD69PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD69PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD69PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD69PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD69PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Pad 68 function select\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PAD68FNCSEL_A {
#[doc = "0: Configure as the MSPI2 4 signal"]
MSPI2_4 = 0,
#[doc = "1: IOM/MSPI nCE group 68"]
NCE68 = 1,
#[doc = "2: CTIMER connection 18"]
CT18 = 2,
#[doc = "3: Configure as GPIO68"]
GPIO68 = 3,
}
impl From<PAD68FNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: PAD68FNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PAD68FNCSEL`"]
pub type PAD68FNCSEL_R = crate::R<u8, PAD68FNCSEL_A>;
impl PAD68FNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, PAD68FNCSEL_A> {
use crate::Variant::*;
match self.bits {
0 => Val(PAD68FNCSEL_A::MSPI2_4),
1 => Val(PAD68FNCSEL_A::NCE68),
2 => Val(PAD68FNCSEL_A::CT18),
3 => Val(PAD68FNCSEL_A::GPIO68),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `MSPI2_4`"]
#[inline(always)]
pub fn is_mspi2_4(&self) -> bool {
*self == PAD68FNCSEL_A::MSPI2_4
}
#[doc = "Checks if the value of the field is `NCE68`"]
#[inline(always)]
pub fn is_nce68(&self) -> bool {
*self == PAD68FNCSEL_A::NCE68
}
#[doc = "Checks if the value of the field is `CT18`"]
#[inline(always)]
pub fn is_ct18(&self) -> bool {
*self == PAD68FNCSEL_A::CT18
}
#[doc = "Checks if the value of the field is `GPIO68`"]
#[inline(always)]
pub fn is_gpio68(&self) -> bool {
*self == PAD68FNCSEL_A::GPIO68
}
}
#[doc = "Write proxy for field `PAD68FNCSEL`"]
pub struct PAD68FNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD68FNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD68FNCSEL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Configure as the MSPI2 4 signal"]
#[inline(always)]
pub fn mspi2_4(self) -> &'a mut W {
self.variant(PAD68FNCSEL_A::MSPI2_4)
}
#[doc = "IOM/MSPI nCE group 68"]
#[inline(always)]
pub fn nce68(self) -> &'a mut W {
self.variant(PAD68FNCSEL_A::NCE68)
}
#[doc = "CTIMER connection 18"]
#[inline(always)]
pub fn ct18(self) -> &'a mut W {
self.variant(PAD68FNCSEL_A::CT18)
}
#[doc = "Configure as GPIO68"]
#[inline(always)]
pub fn gpio68(self) -> &'a mut W {
self.variant(PAD68FNCSEL_A::GPIO68)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 3)) | (((value as u32) & 0x07) << 3);
self.w
}
}
#[doc = "Pad 68 drive strength\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD68STRNG_A {
#[doc = "0: Low drive strength"]
LOW = 0,
#[doc = "1: High drive strength"]
HIGH = 1,
}
impl From<PAD68STRNG_A> for bool {
#[inline(always)]
fn from(variant: PAD68STRNG_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD68STRNG`"]
pub type PAD68STRNG_R = crate::R<bool, PAD68STRNG_A>;
impl PAD68STRNG_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD68STRNG_A {
match self.bits {
false => PAD68STRNG_A::LOW,
true => PAD68STRNG_A::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == PAD68STRNG_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == PAD68STRNG_A::HIGH
}
}
#[doc = "Write proxy for field `PAD68STRNG`"]
pub struct PAD68STRNG_W<'a> {
w: &'a mut W,
}
impl<'a> PAD68STRNG_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD68STRNG_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Low drive strength"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(PAD68STRNG_A::LOW)
}
#[doc = "High drive strength"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(PAD68STRNG_A::HIGH)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Pad 68 input enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD68INPEN_A {
#[doc = "0: Pad input disabled"]
DIS = 0,
#[doc = "1: Pad input enabled"]
EN = 1,
}
impl From<PAD68INPEN_A> for bool {
#[inline(always)]
fn from(variant: PAD68INPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD68INPEN`"]
pub type PAD68INPEN_R = crate::R<bool, PAD68INPEN_A>;
impl PAD68INPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD68INPEN_A {
match self.bits {
false => PAD68INPEN_A::DIS,
true => PAD68INPEN_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD68INPEN_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD68INPEN_A::EN
}
}
#[doc = "Write proxy for field `PAD68INPEN`"]
pub struct PAD68INPEN_W<'a> {
w: &'a mut W,
}
impl<'a> PAD68INPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD68INPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pad input disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD68INPEN_A::DIS)
}
#[doc = "Pad input enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD68INPEN_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Pad 68 pullup enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD68PULL_A {
#[doc = "0: Pullup disabled"]
DIS = 0,
#[doc = "1: Pullup enabled"]
EN = 1,
}
impl From<PAD68PULL_A> for bool {
#[inline(always)]
fn from(variant: PAD68PULL_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `PAD68PULL`"]
pub type PAD68PULL_R = crate::R<bool, PAD68PULL_A>;
impl PAD68PULL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PAD68PULL_A {
match self.bits {
false => PAD68PULL_A::DIS,
true => PAD68PULL_A::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline(always)]
pub fn is_dis(&self) -> bool {
*self == PAD68PULL_A::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline(always)]
pub fn is_en(&self) -> bool {
*self == PAD68PULL_A::EN
}
}
#[doc = "Write proxy for field `PAD68PULL`"]
pub struct PAD68PULL_W<'a> {
w: &'a mut W,
}
impl<'a> PAD68PULL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PAD68PULL_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Pullup disabled"]
#[inline(always)]
pub fn dis(self) -> &'a mut W {
self.variant(PAD68PULL_A::DIS)
}
#[doc = "Pullup enabled"]
#[inline(always)]
pub fn en(self) -> &'a mut W {
self.variant(PAD68PULL_A::EN)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bits 27:29 - Pad 71 function select"]
#[inline(always)]
pub fn pad71fncsel(&self) -> PAD71FNCSEL_R {
PAD71FNCSEL_R::new(((self.bits >> 27) & 0x07) as u8)
}
#[doc = "Bit 26 - Pad 71 drive strength"]
#[inline(always)]
pub fn pad71strng(&self) -> PAD71STRNG_R {
PAD71STRNG_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 25 - Pad 71 input enable"]
#[inline(always)]
pub fn pad71inpen(&self) -> PAD71INPEN_R {
PAD71INPEN_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 24 - Pad 71 pullup enable"]
#[inline(always)]
pub fn pad71pull(&self) -> PAD71PULL_R {
PAD71PULL_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bits 19:21 - Pad 70 function select"]
#[inline(always)]
pub fn pad70fncsel(&self) -> PAD70FNCSEL_R {
PAD70FNCSEL_R::new(((self.bits >> 19) & 0x07) as u8)
}
#[doc = "Bit 18 - Pad 70 drive strength"]
#[inline(always)]
pub fn pad70strng(&self) -> PAD70STRNG_R {
PAD70STRNG_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 17 - Pad 70 input enable"]
#[inline(always)]
pub fn pad70inpen(&self) -> PAD70INPEN_R {
PAD70INPEN_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 16 - Pad 70 pullup enable"]
#[inline(always)]
pub fn pad70pull(&self) -> PAD70PULL_R {
PAD70PULL_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bits 11:13 - Pad 69 function select"]
#[inline(always)]
pub fn pad69fncsel(&self) -> PAD69FNCSEL_R {
PAD69FNCSEL_R::new(((self.bits >> 11) & 0x07) as u8)
}
#[doc = "Bit 10 - Pad 69 drive strength"]
#[inline(always)]
pub fn pad69strng(&self) -> PAD69STRNG_R {
PAD69STRNG_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9 - Pad 69 input enable"]
#[inline(always)]
pub fn pad69inpen(&self) -> PAD69INPEN_R {
PAD69INPEN_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8 - Pad 69 pullup enable"]
#[inline(always)]
pub fn pad69pull(&self) -> PAD69PULL_R {
PAD69PULL_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bits 3:5 - Pad 68 function select"]
#[inline(always)]
pub fn pad68fncsel(&self) -> PAD68FNCSEL_R {
PAD68FNCSEL_R::new(((self.bits >> 3) & 0x07) as u8)
}
#[doc = "Bit 2 - Pad 68 drive strength"]
#[inline(always)]
pub fn pad68strng(&self) -> PAD68STRNG_R {
PAD68STRNG_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1 - Pad 68 input enable"]
#[inline(always)]
pub fn pad68inpen(&self) -> PAD68INPEN_R {
PAD68INPEN_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0 - Pad 68 pullup enable"]
#[inline(always)]
pub fn pad68pull(&self) -> PAD68PULL_R {
PAD68PULL_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 27:29 - Pad 71 function select"]
#[inline(always)]
pub fn pad71fncsel(&mut self) -> PAD71FNCSEL_W {
PAD71FNCSEL_W { w: self }
}
#[doc = "Bit 26 - Pad 71 drive strength"]
#[inline(always)]
pub fn pad71strng(&mut self) -> PAD71STRNG_W {
PAD71STRNG_W { w: self }
}
#[doc = "Bit 25 - Pad 71 input enable"]
#[inline(always)]
pub fn pad71inpen(&mut self) -> PAD71INPEN_W {
PAD71INPEN_W { w: self }
}
#[doc = "Bit 24 - Pad 71 pullup enable"]
#[inline(always)]
pub fn pad71pull(&mut self) -> PAD71PULL_W {
PAD71PULL_W { w: self }
}
#[doc = "Bits 19:21 - Pad 70 function select"]
#[inline(always)]
pub fn pad70fncsel(&mut self) -> PAD70FNCSEL_W {
PAD70FNCSEL_W { w: self }
}
#[doc = "Bit 18 - Pad 70 drive strength"]
#[inline(always)]
pub fn pad70strng(&mut self) -> PAD70STRNG_W {
PAD70STRNG_W { w: self }
}
#[doc = "Bit 17 - Pad 70 input enable"]
#[inline(always)]
pub fn pad70inpen(&mut self) -> PAD70INPEN_W {
PAD70INPEN_W { w: self }
}
#[doc = "Bit 16 - Pad 70 pullup enable"]
#[inline(always)]
pub fn pad70pull(&mut self) -> PAD70PULL_W {
PAD70PULL_W { w: self }
}
#[doc = "Bits 11:13 - Pad 69 function select"]
#[inline(always)]
pub fn pad69fncsel(&mut self) -> PAD69FNCSEL_W {
PAD69FNCSEL_W { w: self }
}
#[doc = "Bit 10 - Pad 69 drive strength"]
#[inline(always)]
pub fn pad69strng(&mut self) -> PAD69STRNG_W {
PAD69STRNG_W { w: self }
}
#[doc = "Bit 9 - Pad 69 input enable"]
#[inline(always)]
pub fn pad69inpen(&mut self) -> PAD69INPEN_W {
PAD69INPEN_W { w: self }
}
#[doc = "Bit 8 - Pad 69 pullup enable"]
#[inline(always)]
pub fn pad69pull(&mut self) -> PAD69PULL_W {
PAD69PULL_W { w: self }
}
#[doc = "Bits 3:5 - Pad 68 function select"]
#[inline(always)]
pub fn pad68fncsel(&mut self) -> PAD68FNCSEL_W {
PAD68FNCSEL_W { w: self }
}
#[doc = "Bit 2 - Pad 68 drive strength"]
#[inline(always)]
pub fn pad68strng(&mut self) -> PAD68STRNG_W {
PAD68STRNG_W { w: self }
}
#[doc = "Bit 1 - Pad 68 input enable"]
#[inline(always)]
pub fn pad68inpen(&mut self) -> PAD68INPEN_W {
PAD68INPEN_W { w: self }
}
#[doc = "Bit 0 - Pad 68 pullup enable"]
#[inline(always)]
pub fn pad68pull(&mut self) -> PAD68PULL_W {
PAD68PULL_W { w: self }
}
}
| 29.273067 | 86 | 0.553286 |
039f2e86cc313f3b3cf400d4291f2a1475653c59
| 1,533 |
use common::util::*;
#[cfg(target_os = "linux")]
#[test]
fn test_count() {
for opt in vec!["-q", "--count"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_boot() {
for opt in vec!["-b", "--boot"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_heading() {
for opt in vec!["-H"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_short() {
for opt in vec!["-s", "--short"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_login() {
for opt in vec!["-l", "--login"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_m() {
for opt in vec!["-m"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_dead() {
for opt in vec!["-d", "--dead"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_all() {
for opt in vec!["-a", "--all"] {
new_ucmd!().arg(opt).run().stdout_is(expected_result(opt));
}
}
#[cfg(target_os = "linux")]
fn expected_result(arg: &str) -> String {
TestScenario::new(util_name!()).cmd_keepenv(util_name!()).env("LANGUAGE", "C").args(&[arg]).run().stdout
}
| 21 | 108 | 0.562948 |
7668b8bf20282dc8422fdb0db9e1645b9765b23f
| 18,081 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use infer::InferCtxt;
use ty::subst::{Subst, Substs};
use ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef};
use syntax::codemap::Span;
use util::common::ErrorReported;
use util::nodemap::FnvHashSet;
use super::{Obligation, ObligationCause, PredicateObligation, SelectionContext, Normalized};
struct PredicateSet<'a,'tcx:'a> {
tcx: &'a TyCtxt<'tcx>,
set: FnvHashSet<ty::Predicate<'tcx>>,
}
impl<'a,'tcx> PredicateSet<'a,'tcx> {
fn new(tcx: &'a TyCtxt<'tcx>) -> PredicateSet<'a,'tcx> {
PredicateSet { tcx: tcx, set: FnvHashSet() }
}
fn insert(&mut self, pred: &ty::Predicate<'tcx>) -> bool {
// We have to be careful here because we want
//
// for<'a> Foo<&'a int>
//
// and
//
// for<'b> Foo<&'b int>
//
// to be considered equivalent. So normalize all late-bound
// regions before we throw things into the underlying set.
let normalized_pred = match *pred {
ty::Predicate::Trait(ref data) =>
ty::Predicate::Trait(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::Equate(ref data) =>
ty::Predicate::Equate(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::RegionOutlives(ref data) =>
ty::Predicate::RegionOutlives(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::TypeOutlives(ref data) =>
ty::Predicate::TypeOutlives(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::Projection(ref data) =>
ty::Predicate::Projection(self.tcx.anonymize_late_bound_regions(data)),
ty::Predicate::WellFormed(data) =>
ty::Predicate::WellFormed(data),
ty::Predicate::ObjectSafe(data) =>
ty::Predicate::ObjectSafe(data),
};
self.set.insert(normalized_pred)
}
}
///////////////////////////////////////////////////////////////////////////
// `Elaboration` iterator
///////////////////////////////////////////////////////////////////////////
/// "Elaboration" is the process of identifying all the predicates that
/// are implied by a source predicate. Currently this basically means
/// walking the "supertraits" and other similar assumptions. For
/// example, if we know that `T : Ord`, the elaborator would deduce
/// that `T : PartialOrd` holds as well. Similarly, if we have `trait
/// Foo : 'static`, and we know that `T : Foo`, then we know that `T :
/// 'static`.
pub struct Elaborator<'cx, 'tcx:'cx> {
tcx: &'cx TyCtxt<'tcx>,
stack: Vec<ty::Predicate<'tcx>>,
visited: PredicateSet<'cx,'tcx>,
}
pub fn elaborate_trait_ref<'cx, 'tcx>(
tcx: &'cx TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>)
-> Elaborator<'cx, 'tcx>
{
elaborate_predicates(tcx, vec![trait_ref.to_predicate()])
}
pub fn elaborate_trait_refs<'cx, 'tcx>(
tcx: &'cx TyCtxt<'tcx>,
trait_refs: &[ty::PolyTraitRef<'tcx>])
-> Elaborator<'cx, 'tcx>
{
let predicates = trait_refs.iter()
.map(|trait_ref| trait_ref.to_predicate())
.collect();
elaborate_predicates(tcx, predicates)
}
pub fn elaborate_predicates<'cx, 'tcx>(
tcx: &'cx TyCtxt<'tcx>,
mut predicates: Vec<ty::Predicate<'tcx>>)
-> Elaborator<'cx, 'tcx>
{
let mut visited = PredicateSet::new(tcx);
predicates.retain(|pred| visited.insert(pred));
Elaborator { tcx: tcx, stack: predicates, visited: visited }
}
impl<'cx, 'tcx> Elaborator<'cx, 'tcx> {
pub fn filter_to_traits(self) -> FilterToTraits<Elaborator<'cx, 'tcx>> {
FilterToTraits::new(self)
}
fn push(&mut self, predicate: &ty::Predicate<'tcx>) {
match *predicate {
ty::Predicate::Trait(ref data) => {
// Predicates declared on the trait.
let predicates = self.tcx.lookup_super_predicates(data.def_id());
let mut predicates: Vec<_> =
predicates.predicates
.iter()
.map(|p| p.subst_supertrait(self.tcx, &data.to_poly_trait_ref()))
.collect();
debug!("super_predicates: data={:?} predicates={:?}",
data, predicates);
// Only keep those bounds that we haven't already
// seen. This is necessary to prevent infinite
// recursion in some cases. One common case is when
// people define `trait Sized: Sized { }` rather than `trait
// Sized { }`.
predicates.retain(|r| self.visited.insert(r));
self.stack.extend(predicates);
}
ty::Predicate::WellFormed(..) => {
// Currently, we do not elaborate WF predicates,
// although we easily could.
}
ty::Predicate::ObjectSafe(..) => {
// Currently, we do not elaborate object-safe
// predicates.
}
ty::Predicate::Equate(..) => {
// Currently, we do not "elaborate" predicates like
// `X == Y`, though conceivably we might. For example,
// `&X == &Y` implies that `X == Y`.
}
ty::Predicate::Projection(..) => {
// Nothing to elaborate in a projection predicate.
}
ty::Predicate::RegionOutlives(..) |
ty::Predicate::TypeOutlives(..) => {
// Currently, we do not "elaborate" predicates like
// `'a : 'b` or `T : 'a`. We could conceivably do
// more here. For example,
//
// &'a int : 'b
//
// implies that
//
// 'a : 'b
//
// and we could get even more if we took WF
// constraints into account. For example,
//
// &'a &'b int : 'c
//
// implies that
//
// 'b : 'a
// 'a : 'c
}
}
}
}
impl<'cx, 'tcx> Iterator for Elaborator<'cx, 'tcx> {
type Item = ty::Predicate<'tcx>;
fn next(&mut self) -> Option<ty::Predicate<'tcx>> {
// Extract next item from top-most stack frame, if any.
let next_predicate = match self.stack.pop() {
Some(predicate) => predicate,
None => {
// No more stack frames. Done.
return None;
}
};
self.push(&next_predicate);
return Some(next_predicate);
}
}
///////////////////////////////////////////////////////////////////////////
// Supertrait iterator
///////////////////////////////////////////////////////////////////////////
pub type Supertraits<'cx, 'tcx> = FilterToTraits<Elaborator<'cx, 'tcx>>;
pub fn supertraits<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>)
-> Supertraits<'cx, 'tcx>
{
elaborate_trait_ref(tcx, trait_ref).filter_to_traits()
}
pub fn transitive_bounds<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
bounds: &[ty::PolyTraitRef<'tcx>])
-> Supertraits<'cx, 'tcx>
{
elaborate_trait_refs(tcx, bounds).filter_to_traits()
}
///////////////////////////////////////////////////////////////////////////
// Iterator over def-ids of supertraits
pub struct SupertraitDefIds<'cx, 'tcx:'cx> {
tcx: &'cx TyCtxt<'tcx>,
stack: Vec<DefId>,
visited: FnvHashSet<DefId>,
}
pub fn supertrait_def_ids<'cx, 'tcx>(tcx: &'cx TyCtxt<'tcx>,
trait_def_id: DefId)
-> SupertraitDefIds<'cx, 'tcx>
{
SupertraitDefIds {
tcx: tcx,
stack: vec![trait_def_id],
visited: Some(trait_def_id).into_iter().collect(),
}
}
impl<'cx, 'tcx> Iterator for SupertraitDefIds<'cx, 'tcx> {
type Item = DefId;
fn next(&mut self) -> Option<DefId> {
let def_id = match self.stack.pop() {
Some(def_id) => def_id,
None => { return None; }
};
let predicates = self.tcx.lookup_super_predicates(def_id);
let visited = &mut self.visited;
self.stack.extend(
predicates.predicates
.iter()
.filter_map(|p| p.to_opt_poly_trait_ref())
.map(|t| t.def_id())
.filter(|&super_def_id| visited.insert(super_def_id)));
Some(def_id)
}
}
///////////////////////////////////////////////////////////////////////////
// Other
///////////////////////////////////////////////////////////////////////////
/// A filter around an iterator of predicates that makes it yield up
/// just trait references.
pub struct FilterToTraits<I> {
base_iterator: I
}
impl<I> FilterToTraits<I> {
fn new(base: I) -> FilterToTraits<I> {
FilterToTraits { base_iterator: base }
}
}
impl<'tcx,I:Iterator<Item=ty::Predicate<'tcx>>> Iterator for FilterToTraits<I> {
type Item = ty::PolyTraitRef<'tcx>;
fn next(&mut self) -> Option<ty::PolyTraitRef<'tcx>> {
loop {
match self.base_iterator.next() {
None => {
return None;
}
Some(ty::Predicate::Trait(data)) => {
return Some(data.to_poly_trait_ref());
}
Some(_) => {
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// Other
///////////////////////////////////////////////////////////////////////////
/// Instantiate all bound parameters of the impl with the given substs,
/// returning the resulting trait ref and all obligations that arise.
/// The obligations are closed under normalization.
pub fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
impl_def_id: DefId,
impl_substs: &Substs<'tcx>)
-> (ty::TraitRef<'tcx>,
Vec<PredicateObligation<'tcx>>)
{
let impl_trait_ref =
selcx.tcx().impl_trait_ref(impl_def_id).unwrap();
let impl_trait_ref =
impl_trait_ref.subst(selcx.tcx(), impl_substs);
let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
super::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
let predicates = selcx.tcx().lookup_predicates(impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
let Normalized { value: predicates, obligations: normalization_obligations2 } =
super::normalize(selcx, ObligationCause::dummy(), &predicates);
let impl_obligations =
predicates_for_generics(ObligationCause::dummy(), 0, &predicates);
let impl_obligations: Vec<_> =
impl_obligations.into_iter()
.chain(normalization_obligations1)
.chain(normalization_obligations2)
.collect();
(impl_trait_ref, impl_obligations)
}
// determine the `self` type, using fresh variables for all variables
// declared on the impl declaration e.g., `impl<A,B> for Box<[(A,B)]>`
// would return ($0, $1) where $0 and $1 are freshly instantiated type
// variables.
pub fn fresh_type_vars_for_impl<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
span: Span,
impl_def_id: DefId)
-> Substs<'tcx>
{
let tcx = infcx.tcx;
let impl_generics = tcx.lookup_item_type(impl_def_id).generics;
infcx.fresh_substs_for_generics(span, &impl_generics)
}
/// See `super::obligations_for_generics`
pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>,
recursion_depth: usize,
generic_bounds: &ty::InstantiatedPredicates<'tcx>)
-> Vec<PredicateObligation<'tcx>>
{
debug!("predicates_for_generics(generic_bounds={:?})",
generic_bounds);
generic_bounds.predicates.iter().map(|predicate| {
Obligation { cause: cause.clone(),
recursion_depth: recursion_depth,
predicate: predicate.clone() }
}).collect()
}
pub fn trait_ref_for_builtin_bound<'tcx>(
tcx: &TyCtxt<'tcx>,
builtin_bound: ty::BuiltinBound,
param_ty: Ty<'tcx>)
-> Result<ty::TraitRef<'tcx>, ErrorReported>
{
match tcx.lang_items.from_builtin_kind(builtin_bound) {
Ok(def_id) => {
Ok(ty::TraitRef {
def_id: def_id,
substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty))
})
}
Err(e) => {
tcx.sess.err(&e);
Err(ErrorReported)
}
}
}
pub fn predicate_for_trait_ref<'tcx>(
cause: ObligationCause<'tcx>,
trait_ref: ty::TraitRef<'tcx>,
recursion_depth: usize)
-> PredicateObligation<'tcx>
{
Obligation {
cause: cause,
recursion_depth: recursion_depth,
predicate: trait_ref.to_predicate(),
}
}
pub fn predicate_for_trait_def<'tcx>(
tcx: &TyCtxt<'tcx>,
cause: ObligationCause<'tcx>,
trait_def_id: DefId,
recursion_depth: usize,
param_ty: Ty<'tcx>,
ty_params: Vec<Ty<'tcx>>)
-> PredicateObligation<'tcx>
{
let trait_ref = ty::TraitRef {
def_id: trait_def_id,
substs: tcx.mk_substs(Substs::new_trait(ty_params, vec![], param_ty))
};
predicate_for_trait_ref(cause, trait_ref, recursion_depth)
}
pub fn predicate_for_builtin_bound<'tcx>(
tcx: &TyCtxt<'tcx>,
cause: ObligationCause<'tcx>,
builtin_bound: ty::BuiltinBound,
recursion_depth: usize,
param_ty: Ty<'tcx>)
-> Result<PredicateObligation<'tcx>, ErrorReported>
{
let trait_ref = trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty)?;
Ok(predicate_for_trait_ref(cause, trait_ref, recursion_depth))
}
/// Cast a trait reference into a reference to one of its super
/// traits; returns `None` if `target_trait_def_id` is not a
/// supertrait.
pub fn upcast<'tcx>(tcx: &TyCtxt<'tcx>,
source_trait_ref: ty::PolyTraitRef<'tcx>,
target_trait_def_id: DefId)
-> Vec<ty::PolyTraitRef<'tcx>>
{
if source_trait_ref.def_id() == target_trait_def_id {
return vec![source_trait_ref]; // shorcut the most common case
}
supertraits(tcx, source_trait_ref)
.filter(|r| r.def_id() == target_trait_def_id)
.collect()
}
/// Given a trait `trait_ref`, returns the number of vtable entries
/// that come from `trait_ref`, excluding its supertraits. Used in
/// computing the vtable base for an upcast trait of a trait object.
pub fn count_own_vtable_entries<'tcx>(tcx: &TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>)
-> usize {
let mut entries = 0;
// Count number of methods and add them to the total offset.
// Skip over associated types and constants.
for trait_item in &tcx.trait_items(trait_ref.def_id())[..] {
if let ty::MethodTraitItem(_) = *trait_item {
entries += 1;
}
}
entries
}
/// Given an upcast trait object described by `object`, returns the
/// index of the method `method_def_id` (which should be part of
/// `object.upcast_trait_ref`) within the vtable for `object`.
pub fn get_vtable_index_of_object_method<'tcx>(tcx: &TyCtxt<'tcx>,
object: &super::VtableObjectData<'tcx>,
method_def_id: DefId) -> usize {
// Count number of methods preceding the one we are selecting and
// add them to the total offset.
// Skip over associated types and constants.
let mut entries = object.vtable_base;
for trait_item in &tcx.trait_items(object.upcast_trait_ref.def_id())[..] {
if trait_item.def_id() == method_def_id {
// The item with the ID we were given really ought to be a method.
assert!(match *trait_item {
ty::MethodTraitItem(_) => true,
_ => false
});
return entries;
}
if let ty::MethodTraitItem(_) = *trait_item {
entries += 1;
}
}
bug!("get_vtable_index_of_object_method: {:?} was not found",
method_def_id);
}
pub enum TupleArgumentsFlag { Yes, No }
pub fn closure_trait_ref_and_return_type<'tcx>(
tcx: &TyCtxt<'tcx>,
fn_trait_def_id: DefId,
self_ty: Ty<'tcx>,
sig: &ty::PolyFnSig<'tcx>,
tuple_arguments: TupleArgumentsFlag)
-> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)>
{
let arguments_tuple = match tuple_arguments {
TupleArgumentsFlag::No => sig.0.inputs[0],
TupleArgumentsFlag::Yes => tcx.mk_tup(sig.0.inputs.to_vec()),
};
let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty);
let trait_ref = ty::TraitRef {
def_id: fn_trait_def_id,
substs: tcx.mk_substs(trait_substs),
};
ty::Binder((trait_ref, sig.0.output.unwrap_or(tcx.mk_nil())))
}
| 35.522593 | 95 | 0.548974 |
9bba364370b8d125d3c404a9b8dd694acbca6b3d
| 407 |
use std::io::{self, BufWriter};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let reader = io::stdin();
let reader = reader.lock();
let writer = BufWriter::new(io::stdout());
let deserializer = serde_yaml::Deserializer::from_reader(reader);
let mut serializer = serde_json::Serializer::new(writer);
serde_transcode::transcode(deserializer, &mut serializer)?;
Ok(())
}
| 29.071429 | 69 | 0.653563 |
16c7176a68db46034057b16a6260ce89917a8bc1
| 716 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(stmt_expr_attributes)]
#![deny(unused_parens)]
// Tests that lint attributes on statements/expressions are
// correctly applied to non-builtin early (AST) lints
fn main() {
#[allow(unused_parens)]
{
let _ = (9);
}
}
| 31.130435 | 68 | 0.713687 |
9b6c3ba3cc33ce409b86d8398aaeea0895d3acdd
| 11,955 |
// ---------------------------------------------------------------------------
// Copyright: (c) 2021 ff. Michael Amrhein ([email protected])
// License: This program is part of a larger application. For license
// details please read the file LICENSE.TXT provided together
// with the application.
// ---------------------------------------------------------------------------
// $Source$
// $Revision$
#![doc = include_str ! ("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(dead_code)]
extern crate alloc;
extern crate core;
use alloc::{format, string::String};
use core::{
cmp::Ordering,
fmt,
ops::{Add, Div, Mul, Sub},
};
#[cfg(feature = "fpdec")]
pub use amnt_dec::{AmountT, Dec, Decimal, AMNT_ONE, AMNT_ZERO};
#[cfg(all(not(feature = "fpdec"), target_pointer_width = "32"))]
pub use amnt_f32::{AmountT, AMNT_ONE, AMNT_ZERO};
#[cfg(all(not(feature = "fpdec"), target_pointer_width = "64"))]
pub use amnt_f64::{AmountT, AMNT_ONE, AMNT_ZERO};
pub use converter::{ConversionTable, Converter};
pub use rate::Rate;
pub use si_prefixes::SIPrefix;
mod converter;
pub mod prelude;
mod rate;
mod si_prefixes;
#[cfg(feature = "fpdec")]
#[doc(hidden)]
pub mod amnt_dec;
#[cfg(all(not(feature = "fpdec"), target_pointer_width = "32"))]
#[doc(hidden)]
pub mod amnt_f32;
#[cfg(all(not(feature = "fpdec"), target_pointer_width = "64"))]
#[doc(hidden)]
pub mod amnt_f64;
#[cfg(feature = "acceleration")]
pub mod acceleration;
#[cfg(feature = "area")]
pub mod area;
#[cfg(feature = "datathroughput")]
pub mod datathroughput;
#[cfg(feature = "datavolume")]
pub mod datavolume;
#[cfg(feature = "duration")]
pub mod duration;
#[cfg(feature = "energy")]
pub mod energy;
#[cfg(feature = "force")]
pub mod force;
#[cfg(feature = "frequency")]
pub mod frequency;
#[cfg(feature = "length")]
pub mod length;
#[cfg(feature = "mass")]
pub mod mass;
#[cfg(feature = "power")]
pub mod power;
#[cfg(feature = "speed")]
pub mod speed;
#[cfg(feature = "temperature")]
pub mod temperature;
#[cfg(feature = "volume")]
pub mod volume;
/// The abstract type of units used to define quantities.
pub trait Unit:
Copy + Eq + PartialEq + Sized + Mul<AmountT> + fmt::Display
{
/// Associated type of quantity
type QuantityType: Quantity<UnitType = Self>;
/// Returns an iterator over the variants of `Self`.
fn iter<'a>() -> core::slice::Iter<'a, Self>;
/// Returns `Some(unit)` where `unit.symbol()` == `symbol`, or `None` if
/// there is no such unit.
fn from_symbol(symbol: &str) -> Option<Self> {
for unit in Self::iter() {
if unit.symbol() == symbol {
return Some(*unit);
}
}
None
}
/// Returns the name of `self`.
fn name(&self) -> &'static str;
/// Returns the symbol used to represent `self`.
fn symbol(&self) -> &'static str;
/// Returns the SI prefix of `self`, or None is `self` is not a SI unit.
fn si_prefix(&self) -> Option<SIPrefix>;
// Returns `1 * self`
fn as_qty(&self) -> Self::QuantityType {
Self::QuantityType::new(AMNT_ONE, *self)
}
fn fmt(&self, form: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.symbol(), form)
}
}
/// Type of units being linear scaled in terms of a reference unit.
pub trait LinearScaledUnit: Unit {
/// Unit used as reference for scaling the units.
const REF_UNIT: Self;
/// Returns `Some(unit)` where `unit.scale()` == `Some(amnt)`, or `None` if
/// there is no such unit.
fn from_scale(amnt: AmountT) -> Option<Self> {
for unit in Self::iter() {
if unit.scale() == amnt {
return Some(*unit);
}
}
None
}
/// Returns `true` if `self` is the reference unit of its unit type.
#[inline(always)]
fn is_ref_unit(&self) -> bool {
*self == Self::REF_UNIT
}
/// Returns `factor` so that `factor` * `Self::REFUNIT` == 1 * `self`.
fn scale(&self) -> AmountT;
/// Returns `factor` so that `factor` * `other` == 1 * `self`.
#[inline(always)]
fn ratio(&self, other: &Self) -> AmountT {
self.scale() / other.scale()
}
}
/// The abstract type of quantities.
pub trait Quantity: Copy + Sized + Mul<AmountT> {
/// Associated type of unit
type UnitType: Unit<QuantityType = Self>;
/// Returns an iterator over the variants of `Self::UnitType`.
fn iter_units<'a>() -> core::slice::Iter<'a, Self::UnitType> {
Self::UnitType::iter()
}
/// Returns `Some(unit)` where `unit.symbol()` == `symbol`, or `None` if
/// there is no such unit.
fn unit_from_symbol(symbol: &str) -> Option<Self::UnitType> {
for unit in Self::iter_units() {
if unit.symbol() == symbol {
return Some(*unit);
}
}
None
}
/// Returns a new instance of the type implementing `Quantity`.
fn new(amount: AmountT, unit: Self::UnitType) -> Self;
/// Returns the amount of `self`.
fn amount(&self) -> AmountT;
/// Returns the unit of `self`.
fn unit(&self) -> Self::UnitType;
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
self.unit() == other.unit() && self.amount() == other.amount()
}
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self.unit() == other.unit() {
PartialOrd::partial_cmp(&self.amount(), &other.amount())
} else {
None
}
}
fn add(self, rhs: Self) -> Self {
if self.unit() == rhs.unit() {
return Self::new(self.amount() + rhs.amount(), self.unit());
}
panic!(
"Can't add '{}' and '{}'.",
self.unit().symbol(),
rhs.unit().symbol()
);
}
fn sub(self, rhs: Self) -> Self {
if self.unit() == rhs.unit() {
return Self::new(self.amount() - rhs.amount(), self.unit());
}
panic!(
"Can't subtract '{}' and '{}'.",
self.unit().symbol(),
rhs.unit().symbol(),
);
}
fn div(self, rhs: Self) -> AmountT {
if self.unit() == rhs.unit() {
return self.amount() / rhs.amount();
}
panic!(
"Can't divide '{}' and '{}'.",
self.unit().symbol(),
rhs.unit().symbol()
);
}
fn fmt(&self, form: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.unit().symbol() {
"" => fmt::Display::fmt(&self.amount(), form),
_ => {
let tmp: String;
let amnt_non_neg = self.amount() >= AMNT_ZERO;
#[cfg(feature = "fpdec")]
let abs_amnt = self.amount().abs();
#[cfg(not(feature = "fpdec"))]
let abs_amnt = if amnt_non_neg {
self.amount()
} else {
-self.amount()
};
if let Some(prec) = form.precision() {
tmp = format!("{:.*} {}", prec, abs_amnt, self.unit())
} else {
tmp = format!("{} {}", abs_amnt, self.unit())
}
form.pad_integral(amnt_non_neg, "", &tmp)
}
}
}
}
/// Trait for quantities having a reference unit
pub trait HasRefUnit: Quantity + Add<Self> + Sub<Self> + Div<Self>
where
<Self as Quantity>::UnitType: LinearScaledUnit,
{
/// Unit used as reference for scaling the units of `Self::UnitType`.
const REF_UNIT: <Self as Quantity>::UnitType;
/// Returns `Some(unit)` where `unit.scale()` == `amnt`, or `None` if
/// there is no such unit.
fn unit_from_scale(amnt: AmountT) -> Option<Self::UnitType> {
for unit in Self::iter_units() {
if unit.scale() == amnt {
return Some(*unit);
}
}
None
}
/// Returns `factor` so that `factor` * `unit` == `self`.
#[inline(always)]
fn equiv_amount(&self, unit: Self::UnitType) -> AmountT {
if self.unit() == unit {
self.amount()
} else {
self.unit().ratio(&unit) * self.amount()
}
}
/// Returns `qty` where `qty` == `self` and `qty.unit()` is `to_unit`.
fn convert(&self, to_unit: Self::UnitType) -> Self {
Self::new(self.equiv_amount(to_unit), to_unit)
}
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
self.amount() == other.equiv_amount(self.unit())
}
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self.unit() == other.unit() {
PartialOrd::partial_cmp(&self.amount(), &other.amount())
} else {
PartialOrd::partial_cmp(
&self.amount(),
&other.equiv_amount(self.unit()),
)
}
}
#[inline]
fn add(self, rhs: Self) -> Self {
Self::new(self.amount() + rhs.equiv_amount(self.unit()), self.unit())
}
#[inline]
fn sub(self, rhs: Self) -> Self {
Self::new(self.amount() - rhs.equiv_amount(self.unit()), self.unit())
}
#[inline]
fn div(self, rhs: Self) -> AmountT {
self.amount() / rhs.equiv_amount(self.unit())
}
#[doc(hidden)]
/// Returns a new instance of the type implementing `HasRefUnit`, equivalent
/// to `amount * Self::REF_UNIT`, converted to the unit with the greatest
/// scale less than or equal to `amount` or - if there is no such unit - to
/// the unit with the smallest scale greater than `amount`, in any case
/// taking only SI units into account if Self::REF_UNIT is a SI unit.
fn _fit(amount: AmountT) -> Self {
let take_all = Self::REF_UNIT.si_prefix().is_none();
let mut it =
Self::iter_units().filter(|u| take_all || u.si_prefix().is_some());
// `it` returns atleast the reference unit, so its safe to unwrap here
let first = it.next().unwrap();
let last = it
.filter(|u| u.scale() > first.scale() && u.scale() <= amount)
.last();
match last {
Some(unit) => Self::new(amount / unit.scale(), *unit),
None => Self::new(amount / first.scale(), *first),
}
}
}
/// The "unit" of the "unitless" quantity.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum One {
One,
}
impl One {
const VARIANTS: [Self; 1] = [ONE];
}
/// Special singleton used as "unit" for the "unitless" quantity.
pub const ONE: One = One::One;
impl Unit for One {
type QuantityType = AmountT;
fn iter<'a>() -> core::slice::Iter<'a, Self> {
Self::VARIANTS.iter()
}
fn name(&self) -> &'static str {
"One"
}
fn symbol(&self) -> &'static str {
""
}
fn si_prefix(&self) -> Option<SIPrefix> {
None
}
}
impl fmt::Display for One {
#[inline(always)]
fn fmt(&self, form: &mut fmt::Formatter<'_>) -> fmt::Result {
<Self as Unit>::fmt(self, form)
}
}
impl LinearScaledUnit for One {
const REF_UNIT: Self = ONE;
fn scale(&self) -> AmountT {
AMNT_ONE
}
}
impl Mul<One> for AmountT {
type Output = AmountT;
#[inline(always)]
fn mul(self, _rhs: One) -> Self::Output {
self
}
}
impl Mul<AmountT> for One {
type Output = AmountT;
#[inline(always)]
fn mul(self, rhs: AmountT) -> Self::Output {
rhs
}
}
impl Quantity for AmountT {
type UnitType = One;
#[inline(always)]
fn new(amount: AmountT, _unit: Self::UnitType) -> Self {
amount
}
#[inline(always)]
fn amount(&self) -> AmountT {
*self
}
#[inline(always)]
fn unit(&self) -> Self::UnitType {
ONE
}
}
impl HasRefUnit for AmountT {
const REF_UNIT: One = ONE;
#[inline(always)]
fn _fit(amount: AmountT) -> Self {
amount
}
}
| 28.195755 | 80 | 0.541865 |
dd5bfa9f81798ec71a54d5df17df00334fe0fa0c
| 99,802 |
mod builder;
pub use self::builder::ClientBuilder;
use crate::{
error::{Error, ErrorType},
ratelimiting::Ratelimiter,
request::{
application::{
command::{
create_global_command::CreateGlobalChatInputCommand,
create_guild_command::CreateGuildChatInputCommand, CreateGlobalCommand,
CreateGuildCommand, DeleteGlobalCommand, DeleteGuildCommand, GetCommandPermissions,
GetGlobalCommand, GetGlobalCommands, GetGuildCommand, GetGuildCommandPermissions,
GetGuildCommands, SetCommandPermissions, SetGlobalCommands, SetGuildCommands,
UpdateCommandPermissions, UpdateGlobalCommand, UpdateGuildCommand,
},
interaction::{
CreateFollowupMessage, DeleteFollowupMessage, DeleteOriginalResponse,
GetFollowupMessage, GetOriginalResponse, InteractionCallback,
UpdateFollowupMessage, UpdateOriginalResponse,
},
InteractionError, InteractionErrorType,
},
channel::{
reaction::delete_reaction::TargetUser,
stage::create_stage_instance::CreateStageInstanceError,
thread::{
AddThreadMember, CreateThread, CreateThreadFromMessage,
GetJoinedPrivateArchivedThreads, GetPrivateArchivedThreads,
GetPublicArchivedThreads, GetThreadMembers, JoinThread, LeaveThread,
RemoveThreadMember, ThreadValidationError, UpdateThread,
},
},
guild::{
create_guild::CreateGuildError,
create_guild_channel::CreateGuildChannelError,
sticker::{
CreateGuildSticker, DeleteGuildSticker, GetGuildSticker, GetGuildStickers,
StickerValidationError, UpdateGuildSticker,
},
update_guild_channel_positions::Position,
},
prelude::*,
sticker::{GetNitroStickerPacks, GetSticker},
GetUserApplicationInfo, Method, Request,
},
response::{future::InvalidToken, ResponseFuture},
API_VERSION,
};
use hyper::{
client::{Client as HyperClient, HttpConnector},
header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, USER_AGENT},
Body,
};
use std::{
convert::TryFrom,
fmt::{Debug, Formatter, Result as FmtResult},
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
},
time::Duration,
};
use tokio::time;
use twilight_model::{
application::{
callback::InteractionResponse,
command::{permissions::CommandPermissions, Command},
},
channel::{
message::{allowed_mentions::AllowedMentions, sticker::StickerId},
thread::AutoArchiveDuration,
ChannelType,
},
guild::Permissions,
id::{
ApplicationId, ChannelId, CommandId, EmojiId, GuildId, IntegrationId, InteractionId,
MessageId, RoleId, UserId, WebhookId,
},
};
#[cfg(feature = "hyper-rustls")]
type HttpsConnector<T> = hyper_rustls::HttpsConnector<T>;
#[cfg(all(feature = "hyper-tls", not(feature = "hyper-rustls")))]
type HttpsConnector<T> = hyper_tls::HttpsConnector<T>;
struct State {
http: HyperClient<HttpsConnector<HttpConnector>, Body>,
default_headers: Option<HeaderMap>,
proxy: Option<Box<str>>,
ratelimiter: Option<Ratelimiter>,
/// Whether to short-circuit when a 401 has been encountered with the client
/// authorization.
///
/// This relates to [`token_invalid`].
///
/// [`token_invalid`]: Self::token_invalid
remember_invalid_token: bool,
timeout: Duration,
token_invalid: Arc<AtomicBool>,
token: Option<Box<str>>,
use_http: bool,
pub(crate) application_id: AtomicU64,
pub(crate) default_allowed_mentions: Option<AllowedMentions>,
}
impl Debug for State {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
f.debug_struct("State")
.field("http", &self.http)
.field("default_headers", &self.default_headers)
.field("proxy", &self.proxy)
.field("ratelimiter", &self.ratelimiter)
.field("token", &self.token)
.field("use_http", &self.use_http)
.finish()
}
}
/// Twilight's http client.
///
/// Almost all of the client methods require authentication, and as such, the client must be
/// supplied with a Discord Token. Get yours [here].
///
/// # OAuth
///
/// To use Bearer tokens prefix the token with `"Bearer "`, including the space
/// at the end like so:
///
/// ```no_run
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::env;
/// use twilight_http::Client;
///
/// let bearer = env::var("BEARER_TOKEN")?;
/// let token = format!("Bearer {}", bearer);
///
/// let client = Client::new(token);
/// # Ok(()) }
/// ```
///
/// # Cloning
///
/// The client internally wraps its data within an Arc. This means that the
/// client can be cloned and passed around tasks and threads cheaply.
///
/// # Unauthorized behavior
///
/// When the client encounters an Unauthorized response it will take note that
/// the configured token is invalid. This may occur when the token has been
/// revoked or expired. When this happens, you must create a new client with the
/// new token. The client will no longer execute requests in order to
/// prevent API bans and will always return [`ErrorType::Unauthorized`].
///
/// # Examples
///
/// Create a client called `client`:
/// ```rust,no_run
/// use twilight_http::Client;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
/// # Ok(()) }
/// ```
///
/// Use [`ClientBuilder`] to create a client called `client`, with a shorter
/// timeout:
/// ```rust,no_run
/// use twilight_http::Client;
/// use std::time::Duration;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::builder()
/// .token("my token".to_owned())
/// .timeout(Duration::from_secs(5))
/// .build();
/// # Ok(()) }
/// ```
///
/// All the examples on this page assume you have already created a client, and have named it
/// `client`.
///
/// [here]: https://discord.com/developers/applications
#[derive(Clone, Debug)]
pub struct Client {
state: Arc<State>,
}
impl Client {
/// Create a new `hyper-rustls` or `hyper-tls` backed client with a token.
#[cfg_attr(docsrs, doc(cfg(any(feature = "hyper-rustls", feature = "hyper-tls"))))]
pub fn new(token: String) -> Self {
ClientBuilder::default().token(token).build()
}
/// Create a new builder to create a client.
///
/// Refer to its documentation for more information.
pub fn builder() -> ClientBuilder {
ClientBuilder::new()
}
/// Retrieve an immutable reference to the token used by the client.
///
/// If the initial token provided is not prefixed with `Bot `, it will be, and this method
/// reflects that.
pub fn token(&self) -> Option<&str> {
self.state.token.as_deref()
}
/// Retrieve the [`ApplicationId`] used by interaction methods.
pub fn application_id(&self) -> Option<ApplicationId> {
let id = self.state.application_id.load(Ordering::Relaxed);
if id != 0 {
return Some(ApplicationId(id));
}
None
}
/// Set a new [`ApplicationId`] after building the client.
///
/// Returns the previous ID, if there was one.
pub fn set_application_id(&self, application_id: ApplicationId) -> Option<ApplicationId> {
let prev = self
.state
.application_id
.swap(application_id.0, Ordering::Relaxed);
if prev != 0 {
return Some(ApplicationId(prev));
}
None
}
/// Get the default [`AllowedMentions`] for sent messages.
pub fn default_allowed_mentions(&self) -> Option<AllowedMentions> {
self.state.default_allowed_mentions.clone()
}
/// Get the Ratelimiter used by the client internally.
///
/// This will return `None` only if ratelimit handling
/// has been explicitly disabled in the [`ClientBuilder`].
pub fn ratelimiter(&self) -> Option<Ratelimiter> {
self.state.ratelimiter.clone()
}
/// Get the audit log for a guild.
///
/// # Examples
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::id::GuildId;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// let guild_id = GuildId(101);
/// let audit_log = client
/// // not done
/// .audit_log(guild_id)
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn audit_log(&self, guild_id: GuildId) -> GetAuditLog<'_> {
GetAuditLog::new(self, guild_id)
}
/// Retrieve the bans for a guild.
///
/// # Examples
///
/// Retrieve the bans for guild `1`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::id::GuildId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(1);
///
/// let bans = client.bans(guild_id).exec().await?;
/// # Ok(()) }
/// ```
pub const fn bans(&self, guild_id: GuildId) -> GetBans<'_> {
GetBans::new(self, guild_id)
}
/// Get information about a ban of a guild.
///
/// Includes the user banned and the reason.
pub const fn ban(&self, guild_id: GuildId, user_id: UserId) -> GetBan<'_> {
GetBan::new(self, guild_id, user_id)
}
/// Bans a user from a guild, optionally with the number of days' worth of
/// messages to delete and the reason.
///
/// # Examples
///
/// Ban user `200` from guild `100`, deleting
/// 1 day's worth of messages, for the reason `"memes"`:
///
/// ```rust,no_run
/// # use twilight_http::{request::AuditLogReason, Client};
/// use twilight_model::id::{GuildId, UserId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(100);
/// let user_id = UserId(200);
/// client.create_ban(guild_id, user_id)
/// .delete_message_days(1)?
/// .reason("memes")?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn create_ban(&self, guild_id: GuildId, user_id: UserId) -> CreateBan<'_> {
CreateBan::new(self, guild_id, user_id)
}
/// Remove a ban from a user in a guild.
///
/// # Examples
///
/// Unban user `200` from guild `100`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::id::{GuildId, UserId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(100);
/// let user_id = UserId(200);
///
/// client.delete_ban(guild_id, user_id).exec().await?;
/// # Ok(()) }
/// ```
pub const fn delete_ban(&self, guild_id: GuildId, user_id: UserId) -> DeleteBan<'_> {
DeleteBan::new(self, guild_id, user_id)
}
/// Get a channel by its ID.
///
/// # Examples
///
/// Get channel `100`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::ChannelId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let channel_id = ChannelId(100);
/// #
/// let channel = client.channel(channel_id).exec().await?;
/// # Ok(()) }
/// ```
pub const fn channel(&self, channel_id: ChannelId) -> GetChannel<'_> {
GetChannel::new(self, channel_id)
}
/// Delete a channel by ID.
pub const fn delete_channel(&self, channel_id: ChannelId) -> DeleteChannel<'_> {
DeleteChannel::new(self, channel_id)
}
/// Update a channel.
pub const fn update_channel(&self, channel_id: ChannelId) -> UpdateChannel<'_> {
UpdateChannel::new(self, channel_id)
}
/// Follows a news channel by [`ChannelId`].
///
/// The type returned is [`FollowedChannel`].
///
/// [`FollowedChannel`]: ::twilight_model::channel::FollowedChannel
pub const fn follow_news_channel(
&self,
channel_id: ChannelId,
webhook_channel_id: ChannelId,
) -> FollowNewsChannel<'_> {
FollowNewsChannel::new(self, channel_id, webhook_channel_id)
}
/// Get the invites for a guild channel.
///
/// Requires the [`MANAGE_CHANNELS`] permission. This method only works if
/// the channel is of type [`GuildChannel`].
///
/// [`MANAGE_CHANNELS`]: twilight_model::guild::Permissions::MANAGE_CHANNELS
/// [`GuildChannel`]: twilight_model::channel::GuildChannel
pub const fn channel_invites(&self, channel_id: ChannelId) -> GetChannelInvites<'_> {
GetChannelInvites::new(self, channel_id)
}
/// Get channel messages, by [`ChannelId`].
///
/// Only one of [`after`], [`around`], and [`before`] can be specified at a time.
/// Once these are specified, the type returned is [`GetChannelMessagesConfigured`].
///
/// If [`limit`] is unspecified, the default set by Discord is 50.
///
/// # Examples
///
/// ```rust,no_run
/// use twilight_http::Client;
/// use twilight_model::id::{ChannelId, MessageId};
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
/// let channel_id = ChannelId(123);
/// let message_id = MessageId(234);
/// let limit: u64 = 6;
///
/// let messages = client
/// .channel_messages(channel_id)
/// .before(message_id)
/// .limit(limit)?
/// .exec()
/// .await?;
///
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns a [`GetChannelMessagesErrorType::LimitInvalid`] error type if
/// the amount is less than 1 or greater than 100.
///
/// [`after`]: GetChannelMessages::after
/// [`around`]: GetChannelMessages::around
/// [`before`]: GetChannelMessages::before
/// [`GetChannelMessagesConfigured`]: crate::request::channel::message::GetChannelMessagesConfigured
/// [`limit`]: GetChannelMessages::limit
/// [`GetChannelMessagesErrorType::LimitInvalid`]: crate::request::channel::message::get_channel_messages::GetChannelMessagesErrorType::LimitInvalid
pub const fn channel_messages(&self, channel_id: ChannelId) -> GetChannelMessages<'_> {
GetChannelMessages::new(self, channel_id)
}
pub const fn delete_channel_permission(
&self,
channel_id: ChannelId,
) -> DeleteChannelPermission<'_> {
DeleteChannelPermission::new(self, channel_id)
}
/// Update the permissions for a role or a user in a channel.
///
/// # Examples:
///
/// Create permission overrides for a role to view the channel, but not send messages:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::guild::Permissions;
/// use twilight_model::id::{ChannelId, RoleId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
///
/// let channel_id = ChannelId(123);
/// let allow = Permissions::VIEW_CHANNEL;
/// let deny = Permissions::SEND_MESSAGES;
/// let role_id = RoleId(432);
///
/// client.update_channel_permission(channel_id, allow, deny)
/// .role(role_id)
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn update_channel_permission(
&self,
channel_id: ChannelId,
allow: Permissions,
deny: Permissions,
) -> UpdateChannelPermission<'_> {
UpdateChannelPermission::new(self, channel_id, allow, deny)
}
/// Get all the webhooks of a channel.
pub const fn channel_webhooks(&self, channel_id: ChannelId) -> GetChannelWebhooks<'_> {
GetChannelWebhooks::new(self, channel_id)
}
/// Get information about the current user.
pub const fn current_user(&self) -> GetCurrentUser<'_> {
GetCurrentUser::new(self)
}
/// Get information about the current bot application.
pub const fn current_user_application(&self) -> GetUserApplicationInfo<'_> {
GetUserApplicationInfo::new(self)
}
/// Update the current user.
///
/// All parameters are optional. If the username is changed, it may cause the discriminator to
/// be randomized.
pub const fn update_current_user(&self) -> UpdateCurrentUser<'_> {
UpdateCurrentUser::new(self)
}
/// Update the current user's voice state.
///
/// All parameters are optional.
///
/// # Caveats
///
/// - `channel_id` must currently point to a stage channel.
/// - Current user must have already joined `channel_id`.
pub const fn update_current_user_voice_state(
&self,
guild_id: GuildId,
channel_id: ChannelId,
) -> UpdateCurrentUserVoiceState<'_> {
UpdateCurrentUserVoiceState::new(self, guild_id, channel_id)
}
/// Get the current user's connections.
///
/// Requires the `connections` `OAuth2` scope.
pub const fn current_user_connections(&self) -> GetCurrentUserConnections<'_> {
GetCurrentUserConnections::new(self)
}
/// Returns a list of guilds for the current user.
///
/// # Examples
///
/// Get the first 25 guilds with an ID after `300` and before
/// `400`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::id::GuildId;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let after = GuildId(300);
/// let before = GuildId(400);
/// let guilds = client.current_user_guilds()
/// .after(after)
/// .before(before)
/// .limit(25)?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn current_user_guilds(&self) -> GetCurrentUserGuilds<'_> {
GetCurrentUserGuilds::new(self)
}
/// Changes the user's nickname in a guild.
pub const fn update_current_user_nick<'a>(
&'a self,
guild_id: GuildId,
nick: &'a str,
) -> UpdateCurrentUserNick<'a> {
UpdateCurrentUserNick::new(self, guild_id, nick)
}
/// Get the emojis for a guild, by the guild's id.
///
/// # Examples
///
/// Get the emojis for guild `100`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::GuildId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(100);
///
/// client.emojis(guild_id).exec().await?;
/// # Ok(()) }
/// ```
pub const fn emojis(&self, guild_id: GuildId) -> GetEmojis<'_> {
GetEmojis::new(self, guild_id)
}
/// Get an emoji for a guild by the the guild's ID and emoji's ID.
///
/// # Examples
///
/// Get emoji `100` from guild `50`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::{EmojiId, GuildId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(50);
/// let emoji_id = EmojiId(100);
///
/// client.emoji(guild_id, emoji_id).exec().await?;
/// # Ok(()) }
/// ```
pub const fn emoji(&self, guild_id: GuildId, emoji_id: EmojiId) -> GetEmoji<'_> {
GetEmoji::new(self, guild_id, emoji_id)
}
/// Create an emoji in a guild.
///
/// The emoji must be a Data URI, in the form of `data:image/{type};base64,{data}` where
/// `{type}` is the image MIME type and `{data}` is the base64-encoded image. Refer to [the
/// discord docs] for more information about image data.
///
/// [the discord docs]: https://discord.com/developers/docs/reference#image-data
pub const fn create_emoji<'a>(
&'a self,
guild_id: GuildId,
name: &'a str,
image: &'a str,
) -> CreateEmoji<'a> {
CreateEmoji::new(self, guild_id, name, image)
}
/// Delete an emoji in a guild, by id.
pub const fn delete_emoji(&self, guild_id: GuildId, emoji_id: EmojiId) -> DeleteEmoji<'_> {
DeleteEmoji::new(self, guild_id, emoji_id)
}
/// Update an emoji in a guild, by id.
pub const fn update_emoji(&self, guild_id: GuildId, emoji_id: EmojiId) -> UpdateEmoji<'_> {
UpdateEmoji::new(self, guild_id, emoji_id)
}
/// Get information about the gateway, optionally with additional information detailing the
/// number of shards to use and sessions remaining.
///
/// # Examples
///
/// Get the gateway connection URL without bot information:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let info = client.gateway().exec().await?;
/// # Ok(()) }
/// ```
///
/// Get the gateway connection URL with additional shard and session information, which
/// requires specifying a bot token:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let info = client.gateway().authed().exec().await?.model().await?;
///
/// println!("URL: {}", info.url);
/// println!("Recommended shards to use: {}", info.shards);
/// # Ok(()) }
/// ```
pub const fn gateway(&self) -> GetGateway<'_> {
GetGateway::new(self)
}
/// Get information about a guild.
pub const fn guild(&self, guild_id: GuildId) -> GetGuild<'_> {
GetGuild::new(self, guild_id)
}
/// Create a new request to create a guild.
///
/// The minimum length of the name is 2 UTF-16 characters and the maximum is 100 UTF-16
/// characters. This endpoint can only be used by bots in less than 10 guilds.
///
/// # Errors
///
/// Returns a [`CreateGuildErrorType::NameInvalid`] error type if the name
/// length is too short or too long.
///
/// [`CreateGuildErrorType::NameInvalid`]: crate::request::guild::create_guild::CreateGuildErrorType::NameInvalid
pub fn create_guild(&self, name: String) -> Result<CreateGuild<'_>, CreateGuildError> {
CreateGuild::new(self, name)
}
/// Delete a guild permanently. The user must be the owner.
pub const fn delete_guild(&self, guild_id: GuildId) -> DeleteGuild<'_> {
DeleteGuild::new(self, guild_id)
}
/// Update a guild.
///
/// All endpoints are optional. Refer to [the discord docs] for more information.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/guild#modify-guild
pub const fn update_guild(&self, guild_id: GuildId) -> UpdateGuild<'_> {
UpdateGuild::new(self, guild_id)
}
/// Leave a guild by id.
pub const fn leave_guild(&self, guild_id: GuildId) -> LeaveGuild<'_> {
LeaveGuild::new(self, guild_id)
}
/// Get the channels in a guild.
pub const fn guild_channels(&self, guild_id: GuildId) -> GetGuildChannels<'_> {
GetGuildChannels::new(self, guild_id)
}
/// Create a new request to create a guild channel.
///
/// All fields are optional except for name. The minimum length of the name
/// is 1 UTF-16 character and the maximum is 100 UTF-16 characters.
///
/// # Errors
///
/// Returns a [`CreateGuildChannelErrorType::NameInvalid`] error type when
/// the length of the name is either fewer than 1 UTF-16 character or more
/// than 100 UTF-16 characters.
///
/// Returns a [`CreateGuildChannelErrorType::RateLimitPerUserInvalid`] error
/// type when the seconds of the rate limit per user is more than 21600.
///
/// Returns a [`CreateGuildChannelErrorType::TopicInvalid`] error type when
/// the length of the topic is more than 1024 UTF-16 characters.
///
/// [`CreateGuildChannelErrorType::NameInvalid`]: crate::request::guild::create_guild_channel::CreateGuildChannelErrorType::NameInvalid
/// [`CreateGuildChannelErrorType::RateLimitPerUserInvalid`]: crate::request::guild::create_guild_channel::CreateGuildChannelErrorType::RateLimitPerUserInvalid
/// [`CreateGuildChannelErrorType::TopicInvalid`]: crate::request::guild::create_guild_channel::CreateGuildChannelErrorType::TopicInvalid
pub fn create_guild_channel<'a>(
&'a self,
guild_id: GuildId,
name: &'a str,
) -> Result<CreateGuildChannel<'a>, CreateGuildChannelError> {
CreateGuildChannel::new(self, guild_id, name)
}
/// Modify the positions of the channels.
///
/// The minimum amount of channels to modify, is a swap between two channels.
///
/// This function accepts an `Iterator` of `(ChannelId, u64)`. It also
/// accepts an `Iterator` of `Position`, which has extra fields.
pub const fn update_guild_channel_positions<'a>(
&'a self,
guild_id: GuildId,
channel_positions: &'a [Position],
) -> UpdateGuildChannelPositions<'a> {
UpdateGuildChannelPositions::new(self, guild_id, channel_positions)
}
/// Get the guild widget.
///
/// Refer to [the discord docs] for more information.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/guild#get-guild-widget
pub const fn guild_widget(&self, guild_id: GuildId) -> GetGuildWidget<'_> {
GetGuildWidget::new(self, guild_id)
}
/// Modify the guild widget.
pub const fn update_guild_widget(&self, guild_id: GuildId) -> UpdateGuildWidget<'_> {
UpdateGuildWidget::new(self, guild_id)
}
/// Get the guild's integrations.
pub const fn guild_integrations(&self, guild_id: GuildId) -> GetGuildIntegrations<'_> {
GetGuildIntegrations::new(self, guild_id)
}
/// Delete an integration for a guild, by the integration's id.
pub const fn delete_guild_integration(
&self,
guild_id: GuildId,
integration_id: IntegrationId,
) -> DeleteGuildIntegration<'_> {
DeleteGuildIntegration::new(self, guild_id, integration_id)
}
/// Get information about the invites of a guild.
///
/// Requires the [`MANAGE_GUILD`] permission.
///
/// [`MANAGE_GUILD`]: twilight_model::guild::Permissions::MANAGE_GUILD
pub const fn guild_invites(&self, guild_id: GuildId) -> GetGuildInvites<'_> {
GetGuildInvites::new(self, guild_id)
}
/// Get the members of a guild, by id.
///
/// The upper limit to this request is 1000. If more than 1000 members are needed, the requests
/// must be chained. Discord defaults the limit to 1.
///
/// # Examples
///
/// Get the first 500 members of guild `100` after user ID `3000`:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::id::{GuildId, UserId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(100);
/// let user_id = UserId(3000);
/// let members = client.guild_members(guild_id).after(user_id).exec().await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns a [`GetGuildMembersErrorType::LimitInvalid`] error type if the
/// limit is invalid.
///
/// [`GetGuildMembersErrorType::LimitInvalid`]: crate::request::guild::member::get_guild_members::GetGuildMembersErrorType::LimitInvalid
pub const fn guild_members(&self, guild_id: GuildId) -> GetGuildMembers<'_> {
GetGuildMembers::new(self, guild_id)
}
/// Search the members of a specific guild by a query.
///
/// The upper limit to this request is 1000. Discord defaults the limit to 1.
///
/// # Examples
///
/// Get the first 10 members of guild `100` matching `Wumpus`:
///
/// ```rust,no_run
/// use twilight_http::Client;
/// use twilight_model::id::GuildId;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let guild_id = GuildId(100);
/// let members = client.search_guild_members(guild_id, "Wumpus")
/// .limit(10)?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns a [`SearchGuildMembersErrorType::LimitInvalid`] error type if
/// the limit is invalid.
///
/// [`GUILD_MEMBERS`]: twilight_model::gateway::Intents::GUILD_MEMBERS
/// [`SearchGuildMembersErrorType::LimitInvalid`]: crate::request::guild::member::search_guild_members::SearchGuildMembersErrorType::LimitInvalid
pub const fn search_guild_members<'a>(
&'a self,
guild_id: GuildId,
query: &'a str,
) -> SearchGuildMembers<'a> {
SearchGuildMembers::new(self, guild_id, query)
}
/// Get a member of a guild, by their id.
pub const fn guild_member(&self, guild_id: GuildId, user_id: UserId) -> GetMember<'_> {
GetMember::new(self, guild_id, user_id)
}
/// Add a user to a guild.
///
/// An access token for the user with `guilds.join` scope is required. All
/// other fields are optional. Refer to [the discord docs] for more
/// information.
///
/// # Errors
///
/// Returns [`AddGuildMemberErrorType::NicknameInvalid`] if the nickname is
/// too short or too long.
///
/// [`AddGuildMemberErrorType::NickNameInvalid`]: crate::request::guild::member::add_guild_member::AddGuildMemberErrorType::NicknameInvalid
///
/// [the discord docs]: https://discord.com/developers/docs/resources/guild#add-guild-member
pub const fn add_guild_member<'a>(
&'a self,
guild_id: GuildId,
user_id: UserId,
access_token: &'a str,
) -> AddGuildMember<'a> {
AddGuildMember::new(self, guild_id, user_id, access_token)
}
/// Kick a member from a guild.
pub const fn remove_guild_member(
&self,
guild_id: GuildId,
user_id: UserId,
) -> RemoveMember<'_> {
RemoveMember::new(self, guild_id, user_id)
}
/// Update a guild member.
///
/// All fields are optional. Refer to [the discord docs] for more information.
///
/// # Examples
///
/// Update a member's nickname to "pinky pie" and server mute them:
///
/// ```rust,no_run
/// use std::env;
/// use twilight_http::Client;
/// use twilight_model::id::{GuildId, UserId};
///
/// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new(env::var("DISCORD_TOKEN")?);
/// let member = client.update_guild_member(GuildId(1), UserId(2))
/// .mute(true)
/// .nick(Some("pinkie pie"))?
/// .exec()
/// .await?
/// .model()
/// .await?;
///
/// println!("user {} now has the nickname '{:?}'", member.user.id, member.nick);
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// Returns [`UpdateGuildMemberErrorType::NicknameInvalid`] if the nickname length is too short or too
/// long.
///
/// [`UpdateGuildMemberErrorType::NicknameInvalid`]: crate::request::guild::member::update_guild_member::UpdateGuildMemberErrorType::NicknameInvalid
///
/// [the discord docs]: https://discord.com/developers/docs/resources/guild#modify-guild-member
pub const fn update_guild_member(
&self,
guild_id: GuildId,
user_id: UserId,
) -> UpdateGuildMember<'_> {
UpdateGuildMember::new(self, guild_id, user_id)
}
/// Add a role to a member in a guild.
///
/// # Examples
///
/// In guild `1`, add role `2` to user `3`, for the reason `"test"`:
///
/// ```rust,no_run
/// # use twilight_http::{request::AuditLogReason, Client};
/// use twilight_model::id::{GuildId, RoleId, UserId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let guild_id = GuildId(1);
/// let role_id = RoleId(2);
/// let user_id = UserId(3);
///
/// client.add_guild_member_role(guild_id, user_id, role_id)
/// .reason("test")?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn add_guild_member_role(
&self,
guild_id: GuildId,
user_id: UserId,
role_id: RoleId,
) -> AddRoleToMember<'_> {
AddRoleToMember::new(self, guild_id, user_id, role_id)
}
/// Remove a role from a member in a guild, by id.
pub const fn remove_guild_member_role(
&self,
guild_id: GuildId,
user_id: UserId,
role_id: RoleId,
) -> RemoveRoleFromMember<'_> {
RemoveRoleFromMember::new(self, guild_id, user_id, role_id)
}
/// For public guilds, get the guild preview.
///
/// This works even if the user is not in the guild.
pub const fn guild_preview(&self, guild_id: GuildId) -> GetGuildPreview<'_> {
GetGuildPreview::new(self, guild_id)
}
/// Get the counts of guild members to be pruned.
pub const fn guild_prune_count(&self, guild_id: GuildId) -> GetGuildPruneCount<'_> {
GetGuildPruneCount::new(self, guild_id)
}
/// Begin a guild prune.
///
/// Refer to [the discord docs] for more information.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/guild#begin-guild-prune
pub const fn create_guild_prune(&self, guild_id: GuildId) -> CreateGuildPrune<'_> {
CreateGuildPrune::new(self, guild_id)
}
/// Get a guild's vanity url, if there is one.
pub const fn guild_vanity_url(&self, guild_id: GuildId) -> GetGuildVanityUrl<'_> {
GetGuildVanityUrl::new(self, guild_id)
}
/// Get voice region data for the guild.
///
/// Can return VIP servers if the guild is VIP-enabled.
pub const fn guild_voice_regions(&self, guild_id: GuildId) -> GetGuildVoiceRegions<'_> {
GetGuildVoiceRegions::new(self, guild_id)
}
/// Get the webhooks of a guild.
pub const fn guild_webhooks(&self, guild_id: GuildId) -> GetGuildWebhooks<'_> {
GetGuildWebhooks::new(self, guild_id)
}
/// Get the guild's welcome screen.
pub const fn guild_welcome_screen(&self, guild_id: GuildId) -> GetGuildWelcomeScreen<'_> {
GetGuildWelcomeScreen::new(self, guild_id)
}
/// Update the guild's welcome screen.
///
/// Requires the [`MANAGE_GUILD`] permission.
///
/// [`MANAGE_GUILD`]: twilight_model::guild::Permissions::MANAGE_GUILD
pub const fn update_guild_welcome_screen(
&self,
guild_id: GuildId,
) -> UpdateGuildWelcomeScreen<'_> {
UpdateGuildWelcomeScreen::new(self, guild_id)
}
/// Get information about an invite by its code.
///
/// If [`with_counts`] is called, the returned invite will contain
/// approximate member counts. If [`with_expiration`] is called, it will
/// contain the expiration date.
///
/// # Examples
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let invite = client
/// .invite("code")
/// .with_counts()
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`with_counts`]: crate::request::channel::invite::GetInvite::with_counts
/// [`with_expiration`]: crate::request::channel::invite::GetInvite::with_expiration
pub const fn invite<'a>(&'a self, code: &'a str) -> GetInvite<'a> {
GetInvite::new(self, code)
}
/// Create an invite, with options.
///
/// Requires the [`CREATE_INVITE`] permission.
///
/// # Examples
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::ChannelId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let channel_id = ChannelId(123);
/// let invite = client
/// .create_invite(channel_id)
/// .max_uses(3)?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`CREATE_INVITE`]: twilight_model::guild::Permissions::CREATE_INVITE
pub const fn create_invite(&self, channel_id: ChannelId) -> CreateInvite<'_> {
CreateInvite::new(self, channel_id)
}
/// Delete an invite by its code.
///
/// Requires the [`MANAGE_CHANNELS`] permission on the channel this invite
/// belongs to, or [`MANAGE_GUILD`] to remove any invite across the guild.
///
/// [`MANAGE_CHANNELS`]: twilight_model::guild::Permissions::MANAGE_CHANNELS
/// [`MANAGE_GUILD`]: twilight_model::guild::Permissions::MANAGE_GUILD
pub const fn delete_invite<'a>(&'a self, code: &'a str) -> DeleteInvite<'a> {
DeleteInvite::new(self, code)
}
/// Get a message by [`ChannelId`] and [`MessageId`].
pub const fn message(&self, channel_id: ChannelId, message_id: MessageId) -> GetMessage<'_> {
GetMessage::new(self, channel_id, message_id)
}
/// Send a message to a channel.
///
/// # Example
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::ChannelId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let channel_id = ChannelId(123);
/// let message = client
/// .create_message(channel_id)
/// .content("Twilight is best pony")?
/// .tts(true)
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// # Errors
///
/// The method [`content`] returns
/// [`CreateMessageErrorType::ContentInvalid`] if the content is over 2000
/// UTF-16 characters.
///
/// The method [`embeds`] returns
/// [`CreateMessageErrorType::EmbedTooLarge`] if the length of the embed
/// is over 6000 characters.
///
/// [`content`]: crate::request::channel::message::create_message::CreateMessage::content
/// [`embeds`]: crate::request::channel::message::create_message::CreateMessage::embeds
/// [`CreateMessageErrorType::ContentInvalid`]:
/// crate::request::channel::message::create_message::CreateMessageErrorType::ContentInvalid
/// [`CreateMessageErrorType::EmbedTooLarge`]:
/// crate::request::channel::message::create_message::CreateMessageErrorType::EmbedTooLarge
pub const fn create_message(&self, channel_id: ChannelId) -> CreateMessage<'_> {
CreateMessage::new(self, channel_id)
}
/// Delete a message by [`ChannelId`] and [`MessageId`].
pub const fn delete_message(
&self,
channel_id: ChannelId,
message_id: MessageId,
) -> DeleteMessage<'_> {
DeleteMessage::new(self, channel_id, message_id)
}
/// Delete messages by [`ChannelId`] and Vec<[`MessageId`]>.
///
/// The vec count can be between 2 and 100. If the supplied [`MessageId`]s are invalid, they
/// still count towards the lower and upper limits. This method will not delete messages older
/// than two weeks. Refer to [the discord docs] for more information.
///
/// [the discord docs]: https://discord.com/developers/docs/resources/channel#bulk-delete-messages
pub const fn delete_messages<'a>(
&'a self,
channel_id: ChannelId,
message_ids: &'a [MessageId],
) -> DeleteMessages<'a> {
DeleteMessages::new(self, channel_id, message_ids)
}
/// Update a message by [`ChannelId`] and [`MessageId`].
///
/// You can pass `None` to any of the methods to remove the associated field.
/// For example, if you have a message with an embed you want to remove, you can
/// use `.[embed](None)` to remove the embed.
///
/// # Examples
///
/// Replace the content with `"test update"`:
///
/// ```rust,no_run
/// use twilight_http::Client;
/// use twilight_model::id::{ChannelId, MessageId};
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
/// client.update_message(ChannelId(1), MessageId(2))
/// .content(Some("test update"))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// Remove the message's content:
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::{ChannelId, MessageId};
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// client.update_message(ChannelId(1), MessageId(2))
/// .content(None)?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [embed]: Self::embed
pub const fn update_message(
&self,
channel_id: ChannelId,
message_id: MessageId,
) -> UpdateMessage<'_> {
UpdateMessage::new(self, channel_id, message_id)
}
/// Crosspost a message by [`ChannelId`] and [`MessageId`].
pub const fn crosspost_message(
&self,
channel_id: ChannelId,
message_id: MessageId,
) -> CrosspostMessage<'_> {
CrosspostMessage::new(self, channel_id, message_id)
}
/// Get the pins of a channel.
pub const fn pins(&self, channel_id: ChannelId) -> GetPins<'_> {
GetPins::new(self, channel_id)
}
/// Create a new pin in a channel, by ID.
pub const fn create_pin(&self, channel_id: ChannelId, message_id: MessageId) -> CreatePin<'_> {
CreatePin::new(self, channel_id, message_id)
}
/// Delete a pin in a channel, by ID.
pub const fn delete_pin(&self, channel_id: ChannelId, message_id: MessageId) -> DeletePin<'_> {
DeletePin::new(self, channel_id, message_id)
}
/// Get a list of users that reacted to a message with an `emoji`.
///
/// This endpoint is limited to 100 users maximum, so if a message has more than 100 reactions,
/// requests must be chained until all reactions are retireved.
pub const fn reactions<'a>(
&'a self,
channel_id: ChannelId,
message_id: MessageId,
emoji: &'a RequestReactionType<'a>,
) -> GetReactions<'a> {
GetReactions::new(self, channel_id, message_id, emoji)
}
/// Create a reaction in a [`ChannelId`] on a [`MessageId`].
///
/// The reaction must be a variant of [`RequestReactionType`].
///
/// # Examples
/// ```rust,no_run
/// # use twilight_http::{Client, request::channel::reaction::RequestReactionType};
/// # use twilight_model::{
/// # id::{ChannelId, MessageId},
/// # };
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// #
/// let channel_id = ChannelId(123);
/// let message_id = MessageId(456);
/// let emoji = RequestReactionType::Unicode { name: "🌃" };
///
/// let reaction = client
/// .create_reaction(channel_id, message_id, &emoji)
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn create_reaction<'a>(
&'a self,
channel_id: ChannelId,
message_id: MessageId,
emoji: &'a RequestReactionType<'a>,
) -> CreateReaction<'a> {
CreateReaction::new(self, channel_id, message_id, emoji)
}
/// Delete the current user's (`@me`) reaction on a message.
pub const fn delete_current_user_reaction<'a>(
&'a self,
channel_id: ChannelId,
message_id: MessageId,
emoji: &'a RequestReactionType<'a>,
) -> DeleteReaction<'a> {
DeleteReaction::new(self, channel_id, message_id, emoji, TargetUser::Current)
}
/// Delete a reaction by a user on a message.
pub const fn delete_reaction<'a>(
&'a self,
channel_id: ChannelId,
message_id: MessageId,
emoji: &'a RequestReactionType<'a>,
user_id: UserId,
) -> DeleteReaction<'a> {
DeleteReaction::new(self, channel_id, message_id, emoji, TargetUser::Id(user_id))
}
/// Remove all reactions on a message of an emoji.
pub const fn delete_all_reaction<'a>(
&'a self,
channel_id: ChannelId,
message_id: MessageId,
emoji: &'a RequestReactionType<'a>,
) -> DeleteAllReaction<'a> {
DeleteAllReaction::new(self, channel_id, message_id, emoji)
}
/// Delete all reactions by all users on a message.
pub const fn delete_all_reactions(
&self,
channel_id: ChannelId,
message_id: MessageId,
) -> DeleteAllReactions<'_> {
DeleteAllReactions::new(self, channel_id, message_id)
}
/// Fire a Typing Start event in the channel.
pub const fn create_typing_trigger(&self, channel_id: ChannelId) -> CreateTypingTrigger<'_> {
CreateTypingTrigger::new(self, channel_id)
}
/// Create a group DM.
///
/// This endpoint is limited to 10 active group DMs.
pub const fn create_private_channel(&self, recipient_id: UserId) -> CreatePrivateChannel<'_> {
CreatePrivateChannel::new(self, recipient_id)
}
/// Get the roles of a guild.
pub const fn roles(&self, guild_id: GuildId) -> GetGuildRoles<'_> {
GetGuildRoles::new(self, guild_id)
}
/// Create a role in a guild.
///
/// # Examples
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// use twilight_model::id::GuildId;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// let guild_id = GuildId(234);
///
/// client.create_role(guild_id)
/// .color(0xd90083)
/// .name("Bright Pink")
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn create_role(&self, guild_id: GuildId) -> CreateRole<'_> {
CreateRole::new(self, guild_id)
}
/// Delete a role in a guild, by id.
pub const fn delete_role(&self, guild_id: GuildId, role_id: RoleId) -> DeleteRole<'_> {
DeleteRole::new(self, guild_id, role_id)
}
/// Update a role by guild id and its id.
pub const fn update_role(&self, guild_id: GuildId, role_id: RoleId) -> UpdateRole<'_> {
UpdateRole::new(self, guild_id, role_id)
}
/// Modify the position of the roles.
///
/// The minimum amount of roles to modify, is a swap between two roles.
pub const fn update_role_positions<'a>(
&'a self,
guild_id: GuildId,
roles: &'a [(RoleId, u64)],
) -> UpdateRolePositions<'a> {
UpdateRolePositions::new(self, guild_id, roles)
}
/// Create a new stage instance associated with a stage channel.
///
/// Requires the user to be a moderator of the stage channel.
///
/// # Errors
///
/// Returns a [`CreateStageInstanceError`] of type [`InvalidTopic`] when the
/// topic is not between 1 and 120 characters in length.
///
/// [`InvalidTopic`]: crate::request::channel::stage::create_stage_instance::CreateStageInstanceErrorType::InvalidTopic
pub fn create_stage_instance<'a>(
&'a self,
channel_id: ChannelId,
topic: &'a str,
) -> Result<CreateStageInstance<'a>, CreateStageInstanceError> {
CreateStageInstance::new(self, channel_id, topic)
}
/// Gets the stage instance associated with a stage channel, if it exists.
pub const fn stage_instance(&self, channel_id: ChannelId) -> GetStageInstance<'_> {
GetStageInstance::new(self, channel_id)
}
/// Update fields of an existing stage instance.
///
/// Requires the user to be a moderator of the stage channel.
pub const fn update_stage_instance(&self, channel_id: ChannelId) -> UpdateStageInstance<'_> {
UpdateStageInstance::new(self, channel_id)
}
/// Delete the stage instance of a stage channel.
///
/// Requires the user to be a moderator of the stage channel.
pub const fn delete_stage_instance(&self, channel_id: ChannelId) -> DeleteStageInstance<'_> {
DeleteStageInstance::new(self, channel_id)
}
/// Create a new guild based on a template.
///
/// This endpoint can only be used by bots in less than 10 guilds.
///
/// # Errors
///
/// Returns a [`CreateGuildFromTemplateErrorType::NameInvalid`] error type
/// if the name is invalid.
///
/// [`CreateGuildFromTemplateErrorType::NameInvalid`]: crate::request::template::create_guild_from_template::CreateGuildFromTemplateErrorType::NameInvalid
pub fn create_guild_from_template<'a>(
&'a self,
template_code: &'a str,
name: &'a str,
) -> Result<CreateGuildFromTemplate<'a>, CreateGuildFromTemplateError> {
CreateGuildFromTemplate::new(self, template_code, name)
}
/// Create a template from the current state of the guild.
///
/// Requires the `MANAGE_GUILD` permission. The name must be at least 1 and
/// at most 100 characters in length.
///
/// # Errors
///
/// Returns a [`CreateTemplateErrorType::NameInvalid`] error type if the
/// name is invalid.
///
/// [`CreateTemplateErrorType::NameInvalid`]: crate::request::template::create_template::CreateTemplateErrorType::NameInvalid
pub fn create_template<'a>(
&'a self,
guild_id: GuildId,
name: &'a str,
) -> Result<CreateTemplate<'a>, CreateTemplateError> {
CreateTemplate::new(self, guild_id, name)
}
/// Delete a template by ID and code.
pub const fn delete_template<'a>(
&'a self,
guild_id: GuildId,
template_code: &'a str,
) -> DeleteTemplate<'a> {
DeleteTemplate::new(self, guild_id, template_code)
}
/// Get a template by its code.
pub const fn get_template<'a>(&'a self, template_code: &'a str) -> GetTemplate<'a> {
GetTemplate::new(self, template_code)
}
/// Get a list of templates in a guild, by ID.
pub const fn get_templates(&self, guild_id: GuildId) -> GetTemplates<'_> {
GetTemplates::new(self, guild_id)
}
/// Sync a template to the current state of the guild, by ID and code.
pub const fn sync_template<'a>(
&'a self,
guild_id: GuildId,
template_code: &'a str,
) -> SyncTemplate<'a> {
SyncTemplate::new(self, guild_id, template_code)
}
/// Update the template's metadata, by ID and code.
pub const fn update_template<'a>(
&'a self,
guild_id: GuildId,
template_code: &'a str,
) -> UpdateTemplate<'a> {
UpdateTemplate::new(self, guild_id, template_code)
}
/// Returns all active threads in the channel.
///
/// Includes public and private threads. Threads are ordered by their ID in
/// descending order.
pub const fn active_threads(&self, guild_id: GuildId) -> GetActiveThreads<'_> {
GetActiveThreads::new(self, guild_id)
}
/// Add another member to a thread.
///
/// Requires the ability to send messages in the thread, and that the thread
/// is not archived.
pub const fn add_thread_member(
&self,
channel_id: ChannelId,
user_id: UserId,
) -> AddThreadMember<'_> {
AddThreadMember::new(self, channel_id, user_id)
}
/// Start a thread that is not connected to a message.
///
/// Values of [`ThreeDays`] and [`Week`] require the guild to be boosted.
/// The guild's features will indicate if a guild is able to use these
/// settings.
///
/// To make a [`GuildPrivateThread`], the guild must also have the
/// `PRIVATE_THREADS` feature.
///
/// [`GuildPrivateThread`]: twilight_model::channel::ChannelType::GuildPrivateThread
/// [`ThreeDays`]: twilight_model::channel::thread::AutoArchiveDuration::ThreeDays
/// [`Week`]: twilight_model::channel::thread::AutoArchiveDuration::Week
pub fn create_thread<'a>(
&'a self,
channel_id: ChannelId,
name: &'a str,
auto_archive_duration: AutoArchiveDuration,
kind: ChannelType,
) -> Result<CreateThread<'_>, ThreadValidationError> {
CreateThread::new(self, channel_id, name, auto_archive_duration, kind)
}
/// Create a new thread from an existing message.
///
/// When called on a [`GuildText`] channel, this creates a
/// [`GuildPublicThread`].
///
/// When called on a [`GuildNews`] channel, this creates a
/// [`GuildNewsThread`].
///
/// Values of [`ThreeDays`] and [`Week`] require the guild to be boosted.
/// The guild's features will indicate if a guild is able to use these
/// settings.
///
/// The thread's ID will be the same as its parent message. This ensures
/// only one thread can be created per message.
///
/// [`GuildNewsThread`]: twilight_model::channel::ChannelType::GuildNewsThread
/// [`GuildNews`]: twilight_model::channel::ChannelType::GuildNews
/// [`GuildPublicThread`]: twilight_model::channel::ChannelType::GuildPublicThread
/// [`GuildText`]: twilight_model::channel::ChannelType::GuildText
/// [`ThreeDays`]: twilight_model::channel::thread::AutoArchiveDuration::ThreeDays
/// [`Week`]: twilight_model::channel::thread::AutoArchiveDuration::Week
pub fn create_thread_from_message<'a>(
&'a self,
channel_id: ChannelId,
message_id: MessageId,
name: &'a str,
auto_archive_duration: AutoArchiveDuration,
) -> Result<CreateThreadFromMessage<'_>, ThreadValidationError> {
CreateThreadFromMessage::new(self, channel_id, message_id, name, auto_archive_duration)
}
/// Add the current user to a thread.
pub const fn join_thread(&self, channel_id: ChannelId) -> JoinThread<'_> {
JoinThread::new(self, channel_id)
}
/// Returns archived private threads in the channel that the current user
/// has joined.
///
/// Threads are ordered by their ID in descending order.
pub const fn joined_private_archived_threads(
&self,
channel_id: ChannelId,
) -> GetJoinedPrivateArchivedThreads<'_> {
GetJoinedPrivateArchivedThreads::new(self, channel_id)
}
/// Remove the current user from a thread.
///
/// Requires that the thread is not archived.
pub const fn leave_thread(&self, channel_id: ChannelId) -> LeaveThread<'_> {
LeaveThread::new(self, channel_id)
}
/// Returns archived private threads in the channel.
///
/// Requires both [`READ_MESSAGE_HISTORY`] and [`MANAGE_THREADS`].
///
/// [`MANAGE_THREADS`]: twilight_model::guild::Permissions::MANAGE_THREADS
/// [`READ_MESSAGE_HISTORY`]: twilight_model::guild::Permissions::READ_MESSAGE_HISTORY
pub const fn private_archived_threads(
&self,
channel_id: ChannelId,
) -> GetPrivateArchivedThreads<'_> {
GetPrivateArchivedThreads::new(self, channel_id)
}
/// Returns archived public threads in the channel.
///
/// Requires the [`READ_MESSAGE_HISTORY`] permission.
///
/// Threads are ordered by [`archive_timestamp`] in descending order.
///
/// When called in a [`GuildText`] channel, returns [`GuildPublicThread`]s.
///
/// When called in a [`GuildNews`] channel, returns [`GuildNewsThread`]s.
///
/// [`archive_timestamp`]: twilight_model::channel::thread::ThreadMetadata::archive_timestamp
/// [`GuildNews`]: twilight_model::channel::ChannelType::GuildNews
/// [`GuildNewsThread`]: twilight_model::channel::ChannelType::GuildNewsThread
/// [`GuildPublicThread`]: twilight_model::channel::ChannelType::GuildPublicThread
/// [`GuildText`]: twilight_model::channel::ChannelType::GuildText
/// [`READ_MESSAGE_HISTORY`]: twilight_model::guild::Permissions::READ_MESSAGE_HISTORY
pub const fn public_archived_threads(
&self,
channel_id: ChannelId,
) -> GetPublicArchivedThreads<'_> {
GetPublicArchivedThreads::new(self, channel_id)
}
/// Remove another member from a thread.
///
/// Requires that the thread is not archived.
///
/// Requires the [`MANAGE_THREADS`] permission, unless both the thread is a
/// [`GuildPrivateThread`], and the current user is the creator of the
/// thread.
///
/// [`GuildPrivateThread`]: twilight_model::channel::ChannelType::GuildPrivateThread
/// [`MANAGE_THREADS`]: twilight_model::guild::Permissions::MANAGE_THREADS
pub const fn remove_thread_member(
&self,
channel_id: ChannelId,
user_id: UserId,
) -> RemoveThreadMember<'_> {
RemoveThreadMember::new(self, channel_id, user_id)
}
/// Returns the [`ThreadMember`]s of the thread.
///
/// [`ThreadMember`]: twilight_model::channel::thread::ThreadMember
pub const fn thread_members(&self, channel_id: ChannelId) -> GetThreadMembers<'_> {
GetThreadMembers::new(self, channel_id)
}
/// Update a thread.
///
/// All fields are optional. The minimum length of the name is 1 UTF-16
/// characters and the maximum is 100 UTF-16 characters.
pub const fn update_thread(&self, channel_id: ChannelId) -> UpdateThread<'_> {
UpdateThread::new(self, channel_id)
}
/// Get a user's information by id.
pub const fn user(&self, user_id: UserId) -> GetUser<'_> {
GetUser::new(self, user_id)
}
/// Update another user's voice state.
///
/// # Caveats
///
/// - `channel_id` must currently point to a stage channel.
/// - User must already have joined `channel_id`.
pub const fn update_user_voice_state(
&self,
guild_id: GuildId,
user_id: UserId,
channel_id: ChannelId,
) -> UpdateUserVoiceState<'_> {
UpdateUserVoiceState::new(self, guild_id, user_id, channel_id)
}
/// Get a list of voice regions that can be used when creating a guild.
pub const fn voice_regions(&self) -> GetVoiceRegions<'_> {
GetVoiceRegions::new(self)
}
/// Get a webhook by ID.
pub const fn webhook(&self, id: WebhookId) -> GetWebhook<'_> {
GetWebhook::new(self, id)
}
/// Create a webhook in a channel.
///
/// # Examples
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::ChannelId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// let channel_id = ChannelId(123);
///
/// let webhook = client
/// .create_webhook(channel_id, "Twily Bot")
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn create_webhook<'a>(
&'a self,
channel_id: ChannelId,
name: &'a str,
) -> CreateWebhook<'a> {
CreateWebhook::new(self, channel_id, name)
}
/// Delete a webhook by its ID.
pub const fn delete_webhook(&self, id: WebhookId) -> DeleteWebhook<'_> {
DeleteWebhook::new(self, id)
}
/// Update a webhook by ID.
pub const fn update_webhook(&self, webhook_id: WebhookId) -> UpdateWebhook<'_> {
UpdateWebhook::new(self, webhook_id)
}
/// Update a webhook, with a token, by ID.
pub const fn update_webhook_with_token<'a>(
&'a self,
webhook_id: WebhookId,
token: &'a str,
) -> UpdateWebhookWithToken<'a> {
UpdateWebhookWithToken::new(self, webhook_id, token)
}
/// Executes a webhook, sending a message to its channel.
///
/// You can only specify one of [`content`], [`embeds`], or [`files`].
///
/// # Examples
///
/// ```rust,no_run
/// # use twilight_http::Client;
/// # use twilight_model::id::WebhookId;
/// #
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("my token".to_owned());
/// let id = WebhookId(432);
/// #
/// let webhook = client
/// .execute_webhook(id, "webhook token")
/// .content("Pinkie...")
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
///
/// [`content`]: crate::request::channel::webhook::ExecuteWebhook::content
/// [`embeds`]: crate::request::channel::webhook::ExecuteWebhook::embeds
/// [`files`]: crate::request::channel::webhook::ExecuteWebhook::files
pub const fn execute_webhook<'a>(
&'a self,
webhook_id: WebhookId,
token: &'a str,
) -> ExecuteWebhook<'a> {
ExecuteWebhook::new(self, webhook_id, token)
}
/// Get a webhook message by [`WebhookId`], token, and [`MessageId`].
///
/// [`WebhookId`]: twilight_model::id::WebhookId
/// [`MessageId`]: twilight_model::id::MessageId
pub const fn webhook_message<'a>(
&'a self,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> GetWebhookMessage<'a> {
GetWebhookMessage::new(self, webhook_id, token, message_id)
}
/// Update a message executed by a webhook.
///
/// # Examples
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_model::id::{MessageId, WebhookId};
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// client.update_webhook_message(WebhookId(1), "token here", MessageId(2))
/// .content(Some("new message content"))?
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn update_webhook_message<'a>(
&'a self,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> UpdateWebhookMessage<'a> {
UpdateWebhookMessage::new(self, webhook_id, token, message_id)
}
/// Delete a message executed by a webhook.
///
/// # Examples
///
/// ```no_run
/// # use twilight_http::Client;
/// use twilight_model::id::{MessageId, WebhookId};
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Client::new("token".to_owned());
/// client
/// .delete_webhook_message(WebhookId(1), "token here", MessageId(2))
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn delete_webhook_message<'a>(
&'a self,
webhook_id: WebhookId,
token: &'a str,
message_id: MessageId,
) -> DeleteWebhookMessage<'a> {
DeleteWebhookMessage::new(self, webhook_id, token, message_id)
}
/// Respond to an interaction, by ID and token.
///
/// For variants of [`InteractionResponse`] that contain a [`CallbackData`],
/// there is an [associated builder] in the [`twilight-util`] crate.
///
/// [`CallbackData`]: twilight_model::application::callback::CallbackData
/// [`twilight-util`]: https://docs.rs/twilight-util/latest/index.html
/// [associated builder]: https://docs.rs/twilight-util/latest/builder/struct.CallbackDataBuilder.html
pub const fn interaction_callback<'a>(
&'a self,
interaction_id: InteractionId,
interaction_token: &'a str,
response: &'a InteractionResponse,
) -> InteractionCallback<'a> {
InteractionCallback::new(self, interaction_id, interaction_token, response)
}
/// Get the original message, by its token.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_interaction_original<'a>(
&'a self,
interaction_token: &'a str,
) -> Result<GetOriginalResponse<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetOriginalResponse::new(
self,
application_id,
interaction_token,
))
}
/// Edit the original message, by its token.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn update_interaction_original<'a>(
&'a self,
interaction_token: &'a str,
) -> Result<UpdateOriginalResponse<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(UpdateOriginalResponse::new(
self,
application_id,
interaction_token,
))
}
/// Get a followup message of an interaction.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn followup_message<'a>(
&'a self,
interaction_token: &'a str,
message_id: MessageId,
) -> Result<GetFollowupMessage<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetFollowupMessage::new(
self,
application_id,
interaction_token,
message_id,
))
}
/// Delete the original message, by its token.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn delete_interaction_original<'a>(
&'a self,
interaction_token: &'a str,
) -> Result<DeleteOriginalResponse<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(DeleteOriginalResponse::new(
self,
application_id,
interaction_token,
))
}
/// Create a followup message, by an interaction token.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn create_followup_message<'a>(
&'a self,
interaction_token: &'a str,
) -> Result<CreateFollowupMessage<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(CreateFollowupMessage::new(
self,
application_id,
interaction_token,
))
}
/// Edit a followup message, by an interaction token.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn update_followup_message<'a>(
&'a self,
interaction_token: &'a str,
message_id: MessageId,
) -> Result<UpdateFollowupMessage<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(UpdateFollowupMessage::new(
self,
application_id,
interaction_token,
message_id,
))
}
/// Delete a followup message by interaction token and the message's ID.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn delete_followup_message<'a>(
&'a self,
interaction_token: &'a str,
message_id: MessageId,
) -> Result<DeleteFollowupMessage<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(DeleteFollowupMessage::new(
self,
application_id,
interaction_token,
message_id,
))
}
/// Create a new chat input command in a guild.
///
/// The name must be between 1 and 32 characters in length. Creating a
/// guild command with the same name as an already-existing guild command in
/// the same guild will overwrite the old command. See [the discord docs]
/// for more information.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// Returns an [`InteractionErrorType::CommandNameValidationFailed`]
/// error type if the command name is not between 1 and 32 characters.
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#create-guild-application-command
#[deprecated(
note = "use `new_create_guild_command`, which does not require a description",
since = "0.6.4"
)]
pub fn create_guild_command<'a>(
&'a self,
guild_id: GuildId,
name: &'a str,
description: &'a str,
) -> Result<CreateGuildChatInputCommand<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
CreateGuildCommand::new(self, application_id, guild_id, name)?.chat_input(description)
}
/// Create a new command in a guild.
///
/// The name must be between 1 and 32 characters in length. Creating a
/// guild command with the same name as an already-existing guild command in
/// the same guild will overwrite the old command. See [the discord docs]
/// for more information.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// Returns an [`InteractionErrorType::CommandNameValidationFailed`]
/// error type if the command name is not between 1 and 32 characters.
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#create-guild-application-command
pub fn new_create_guild_command<'a>(
&'a self,
guild_id: GuildId,
name: &'a str,
) -> Result<CreateGuildCommand<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
CreateGuildCommand::new(self, application_id, guild_id, name)
}
/// Fetch a guild command for your application.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_guild_command(
&self,
guild_id: GuildId,
command_id: CommandId,
) -> Result<GetGuildCommand<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetGuildCommand::new(
self,
application_id,
guild_id,
command_id,
))
}
/// Fetch all commands for a guild, by ID.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_guild_commands(
&self,
guild_id: GuildId,
) -> Result<GetGuildCommands<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetGuildCommands::new(self, application_id, guild_id))
}
/// Edit a command in a guild, by ID.
///
/// You must specify a name and description. See [the discord docs] for more
/// information.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#edit-guild-application-command
pub fn update_guild_command(
&self,
guild_id: GuildId,
command_id: CommandId,
) -> Result<UpdateGuildCommand<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(UpdateGuildCommand::new(
self,
application_id,
guild_id,
command_id,
))
}
/// Delete a command in a guild, by ID.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn delete_guild_command(
&self,
guild_id: GuildId,
command_id: CommandId,
) -> Result<DeleteGuildCommand<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(DeleteGuildCommand::new(
self,
application_id,
guild_id,
command_id,
))
}
/// Set a guild's commands.
///
/// This method is idempotent: it can be used on every start, without being
/// ratelimited if there aren't changes to the commands.
///
/// The [`Command`] struct has an [associated builder] in the
/// [`twilight-util`] crate.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// [`twilight-util`]: https://docs.rs/twilight_util/index.html
/// [associated builder]: https://docs.rs/twilight-util/latest/builder/command/struct.CommandBuilder.html
pub fn set_guild_commands<'a>(
&'a self,
guild_id: GuildId,
commands: &'a [Command],
) -> Result<SetGuildCommands<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(SetGuildCommands::new(
self,
application_id,
guild_id,
commands,
))
}
/// Create a new chat input global command.
///
/// The name must be between 1 and 32 characters in length. The description
/// must be between 1 and 100 characters in length. Creating a command with
/// the same name as an already-existing global command will overwrite the
/// old command. See [the discord docs] for more information.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// Returns an [`InteractionErrorType::CommandNameValidationFailed`]
/// error type if the command name is not between 1 and 32 characters.
///
/// Returns an [`InteractionErrorType::CommandDescriptionValidationFailed`]
/// error type if the command description is not between 1 and 100
/// characters.
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#create-global-application-command
#[deprecated(
note = "use `new_create_global_command`, which does not require a description",
since = "0.6.4"
)]
pub fn create_global_command<'a>(
&'a self,
name: &'a str,
description: &'a str,
) -> Result<CreateGlobalChatInputCommand<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
CreateGlobalCommand::new(self, application_id, name)?.chat_input(description)
}
/// Create a new global command.
///
/// The name must be between 1 and 32 characters in length. Creating a
/// command with the same name as an already-existing global command will
/// overwrite the old command. See [the discord docs] for more information.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// Returns an [`InteractionErrorType::CommandNameValidationFailed`]
/// error type if the command name is not between 1 and 32 characters.
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#create-global-application-command
pub fn new_create_global_command<'a>(
&'a self,
name: &'a str,
) -> Result<CreateGlobalCommand<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
CreateGlobalCommand::new(self, application_id, name)
}
/// Fetch a global command for your application.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_global_command(
&self,
command_id: CommandId,
) -> Result<GetGlobalCommand<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetGlobalCommand::new(self, application_id, command_id))
}
/// Fetch all global commands for your application.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_global_commands(&self) -> Result<GetGlobalCommands<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetGlobalCommands::new(self, application_id))
}
/// Edit a global command, by ID.
///
/// You must specify a name and description. See [the discord docs] for more
/// information.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#edit-global-application-command
pub fn update_global_command(
&self,
command_id: CommandId,
) -> Result<UpdateGlobalCommand<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(UpdateGlobalCommand::new(self, application_id, command_id))
}
/// Delete a global command, by ID.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn delete_global_command(
&self,
command_id: CommandId,
) -> Result<DeleteGlobalCommand<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(DeleteGlobalCommand::new(self, application_id, command_id))
}
/// Set global commands.
///
/// This method is idempotent: it can be used on every start, without being
/// ratelimited if there aren't changes to the commands.
///
/// The [`Command`] struct has an [associated builder] in the
/// [`twilight-util`] crate.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// [`twilight-util`]: https://docs.rs/twilight-util/latest/index.html
/// [associated builder]: https://docs.rs/twilight-util/latest/builder/command/struct.CommandBuilder.html
pub fn set_global_commands<'a>(
&'a self,
commands: &'a [Command],
) -> Result<SetGlobalCommands<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(SetGlobalCommands::new(self, application_id, commands))
}
/// Fetch command permissions for a command from the current application
/// in a guild.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_command_permissions(
&self,
guild_id: GuildId,
command_id: CommandId,
) -> Result<GetCommandPermissions<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetCommandPermissions::new(
self,
application_id,
guild_id,
command_id,
))
}
/// Fetch command permissions for all commands from the current
/// application in a guild.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn get_guild_command_permissions(
&self,
guild_id: GuildId,
) -> Result<GetGuildCommandPermissions<'_>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
Ok(GetGuildCommandPermissions::new(
self,
application_id,
guild_id,
))
}
/// Update command permissions for a single command in a guild.
///
/// This overwrites the command permissions so the full set of permissions
/// have to be sent every time.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
pub fn update_command_permissions<'a>(
&'a self,
guild_id: GuildId,
command_id: CommandId,
permissions: &'a [CommandPermissions],
) -> Result<UpdateCommandPermissions<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
UpdateCommandPermissions::new(self, application_id, guild_id, command_id, permissions)
}
/// Update command permissions for all commands in a guild.
///
/// This overwrites the command permissions so the full set of permissions
/// have to be sent every time.
///
/// # Errors
///
/// Returns an [`InteractionErrorType::ApplicationIdNotPresent`]
/// error type if an application ID has not been configured via
/// [`Client::set_application_id`].
///
/// Returns an [`InteractionErrorType::TooManyCommands`] error type if too
/// many commands have been provided. The maximum amount is defined by
/// [`InteractionError::GUILD_COMMAND_LIMIT`].
pub fn set_command_permissions<'a>(
&'a self,
guild_id: GuildId,
permissions: &'a [(CommandId, CommandPermissions)],
) -> Result<SetCommandPermissions<'a>, InteractionError> {
let application_id = self.application_id().ok_or(InteractionError {
kind: InteractionErrorType::ApplicationIdNotPresent,
})?;
SetCommandPermissions::new(self, application_id, guild_id, permissions)
}
/// Returns a single sticker by its ID.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
/// use twilight_model::channel::message::sticker::StickerId;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let id = StickerId(123);
/// let sticker = client.sticker(id).exec().await?.model().await?;
///
/// println!("{:#?}", sticker);
/// # Ok(()) }
/// ```
pub const fn sticker(&self, sticker_id: StickerId) -> GetSticker<'_> {
GetSticker::new(self, sticker_id)
}
/// Returns a list of sticker packs available to Nitro subscribers.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let packs = client.nitro_sticker_packs().exec().await?.model().await?;
///
/// println!("{}", packs.sticker_packs.len());
/// # Ok(()) }
/// ```
pub const fn nitro_sticker_packs(&self) -> GetNitroStickerPacks<'_> {
GetNitroStickerPacks::new(self)
}
/// Returns a list of stickers in a guild.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
/// use twilight_model::{
/// channel::message::sticker::StickerId,
/// id::GuildId,
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let guild_id = GuildId(1);
/// let stickers = client
/// .guild_stickers(guild_id)
/// .exec()
/// .await?
/// .models()
/// .await?;
///
/// println!("{}", stickers.len());
/// # Ok(()) }
/// ```
pub const fn guild_stickers(&self, guild_id: GuildId) -> GetGuildStickers<'_> {
GetGuildStickers::new(self, guild_id)
}
/// Returns a guild sticker by the guild's ID and the sticker's ID.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
/// use twilight_model::{
/// channel::message::sticker::StickerId,
/// id::GuildId,
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let guild_id = GuildId(1);
/// let sticker_id = StickerId(2);
/// let sticker = client
/// .guild_sticker(guild_id, sticker_id)
/// .exec()
/// .await?
/// .model()
/// .await?;
///
/// println!("{:#?}", sticker);
/// # Ok(()) }
/// ```
pub const fn guild_sticker(
&self,
guild_id: GuildId,
sticker_id: StickerId,
) -> GetGuildSticker<'_> {
GetGuildSticker::new(self, guild_id, sticker_id)
}
/// Creates a sticker in a guild, and returns the created sticker.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
/// use twilight_model::{
/// channel::message::sticker::StickerId,
/// id::GuildId,
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let guild_id = GuildId(1);
/// let sticker = client
/// .create_guild_sticker(
/// guild_id,
/// &"sticker name",
/// &"sticker description",
/// &"sticker,tags",
/// &[23,23,23,23]
/// )?
/// .exec()
/// .await?
/// .model()
/// .await?;
///
/// println!("{:#?}", sticker);
/// # Ok(()) }
/// ```
pub fn create_guild_sticker<'a>(
&'a self,
guild_id: GuildId,
name: &'a str,
description: &'a str,
tags: &'a str,
file: &'a [u8],
) -> Result<CreateGuildSticker<'_>, StickerValidationError> {
CreateGuildSticker::new(self, guild_id, name, description, tags, file)
}
/// Updates a sticker in a guild, and returns the updated sticker.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
/// use twilight_model::{
/// channel::message::sticker::StickerId,
/// id::GuildId,
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let guild_id = GuildId(1);
/// let sticker_id = StickerId(2);
/// let sticker = client
/// .update_guild_sticker(guild_id, sticker_id)
/// .description("new description")?
/// .exec()
/// .await?
/// .model()
/// .await?;
///
/// println!("{:#?}", sticker);
/// # Ok(()) }
/// ```
pub const fn update_guild_sticker(
&self,
guild_id: GuildId,
sticker_id: StickerId,
) -> UpdateGuildSticker<'_> {
UpdateGuildSticker::new(self, guild_id, sticker_id)
}
/// Deletes a guild sticker by the ID of the guild and its ID.
///
/// # Examples
///
/// ```no_run
/// use twilight_http::Client;
/// use twilight_model::{
/// channel::message::sticker::StickerId,
/// id::GuildId,
/// };
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::new("my token".to_owned());
///
/// let guild_id = GuildId(1);
/// let sticker_id = StickerId(2);
///
/// client
/// .delete_guild_sticker(guild_id, sticker_id)
/// .exec()
/// .await?;
/// # Ok(()) }
/// ```
pub const fn delete_guild_sticker(
&self,
guild_id: GuildId,
sticker_id: StickerId,
) -> DeleteGuildSticker<'_> {
DeleteGuildSticker::new(self, guild_id, sticker_id)
}
/// Execute a request, returning a future resolving to a [`Response`].
///
/// # Errors
///
/// Returns an [`ErrorType::Unauthorized`] error type if the configured
/// token has become invalid due to expiration, revokation, etc.
///
/// [`Response`]: super::response::Response
pub fn request<T>(&self, request: Request) -> ResponseFuture<T> {
match self.try_request::<T>(request) {
Ok(future) => future,
Err(source) => ResponseFuture::error(source),
}
}
#[allow(clippy::too_many_lines)]
fn try_request<T>(&self, request: Request) -> Result<ResponseFuture<T>, Error> {
if self.state.remember_invalid_token && self.state.token_invalid.load(Ordering::Relaxed) {
return Err(Error {
kind: ErrorType::Unauthorized,
source: None,
});
}
let Request {
body,
form,
headers: req_headers,
method,
path,
ratelimit_path,
use_authorization_token,
} = request;
let protocol = if self.state.use_http { "http" } else { "https" };
let host = self.state.proxy.as_deref().unwrap_or("discord.com");
let url = format!("{}://{}/api/v{}/{}", protocol, host, API_VERSION, path);
#[cfg(feature = "tracing")]
tracing::debug!("URL: {:?}", url);
let mut builder = hyper::Request::builder()
.method(method.into_hyper())
.uri(&url);
if use_authorization_token {
if let Some(ref token) = self.state.token {
let value = HeaderValue::from_str(token).map_err(|source| {
#[allow(clippy::borrow_interior_mutable_const)]
let name = AUTHORIZATION.to_string();
Error {
kind: ErrorType::CreatingHeader { name },
source: Some(Box::new(source)),
}
})?;
if let Some(headers) = builder.headers_mut() {
headers.insert(AUTHORIZATION, value);
}
}
}
let user_agent = HeaderValue::from_static(concat!(
"DiscordBot (",
env!("CARGO_PKG_HOMEPAGE"),
", ",
env!("CARGO_PKG_VERSION"),
") Twilight-rs",
));
if let Some(headers) = builder.headers_mut() {
if let Some(form) = &form {
if let Ok(content_type) = HeaderValue::try_from(form.content_type()) {
headers.insert(CONTENT_TYPE, content_type);
}
} else if let Some(bytes) = &body {
let len = bytes.len();
headers.insert(CONTENT_LENGTH, HeaderValue::from(len));
let content_type = HeaderValue::from_static("application/json");
headers.insert(CONTENT_TYPE, content_type);
}
#[cfg(feature = "decompression")]
headers.insert(
hyper::header::ACCEPT_ENCODING,
HeaderValue::from_static("br"),
);
headers.insert(USER_AGENT, user_agent);
if let Some(req_headers) = req_headers {
for (maybe_name, value) in req_headers {
if let Some(name) = maybe_name {
headers.insert(name, value);
}
}
}
if let Some(default_headers) = &self.state.default_headers {
for (name, value) in default_headers {
headers.insert(name, HeaderValue::from(value));
}
}
}
let req = if let Some(form) = form {
let form_bytes = form.build();
if let Some(headers) = builder.headers_mut() {
headers.insert(CONTENT_LENGTH, HeaderValue::from(form_bytes.len()));
};
builder
.body(Body::from(form_bytes))
.map_err(|source| Error {
kind: ErrorType::BuildingRequest,
source: Some(Box::new(source)),
})?
} else if let Some(bytes) = body {
builder.body(Body::from(bytes)).map_err(|source| Error {
kind: ErrorType::BuildingRequest,
source: Some(Box::new(source)),
})?
} else if method == Method::Put || method == Method::Post || method == Method::Patch {
if let Some(headers) = builder.headers_mut() {
headers.insert(CONTENT_LENGTH, HeaderValue::from(0));
}
builder.body(Body::empty()).map_err(|source| Error {
kind: ErrorType::BuildingRequest,
source: Some(Box::new(source)),
})?
} else {
builder.body(Body::empty()).map_err(|source| Error {
kind: ErrorType::BuildingRequest,
source: Some(Box::new(source)),
})?
};
let inner = self.state.http.request(req);
let invalid_token = if self.state.remember_invalid_token {
InvalidToken::Remember(Arc::clone(&self.state.token_invalid))
} else {
InvalidToken::Forget
};
// Clippy suggests bad code; an `Option::map_or_else` won't work here
// due to move semantics in both cases.
#[allow(clippy::option_if_let_else)]
if let Some(ratelimiter) = self.state.ratelimiter.as_ref() {
let rx = ratelimiter.ticket(ratelimit_path);
Ok(ResponseFuture::ratelimit(
None,
invalid_token,
rx,
self.state.timeout,
inner,
))
} else {
Ok(ResponseFuture::new(
invalid_token,
time::timeout(self.state.timeout, inner),
None,
))
}
}
}
| 34.40262 | 163 | 0.594357 |
4b77ed4011ee26ee49cba5c3b69f66cd5497ce1d
| 672 |
use std::io;
fn main() {
let (a, b, c) = {
let i = read::<usize>();
(i[0], i[1], i[2])
};
if a < b && b < c { println!("Yes"); }
else { println!("No"); }
}
#[allow(dead_code)]
fn read<T>() -> Vec<T>
where T:
std::str::FromStr,
T::Err: std::fmt::Debug {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
buf.split_whitespace()
.map(|s| s.trim().parse().unwrap())
.collect()
}
#[allow(dead_code)]
fn read_one<T>() -> T
where T:
std::str::FromStr,
T::Err: std::fmt::Debug {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
buf.trim().parse().unwrap()
}
| 18.162162 | 45 | 0.510417 |
01e0ad090ac101425368b25e0930705272a1d048
| 29,060 |
//! Defines WasmEdge Vm struct.
use super::wasmedge;
use crate::{
error::{check, VmError, WasmEdgeError, WasmEdgeResult},
instance::function::FuncType,
types::HostRegistration,
utils, Config, ImportObj, Module, Statistics, Store, Value,
};
use std::path::Path;
/// Struct of WasmEdge Vm.
///
/// A [`Vm`] defines a virtual environment for managing WebAssembly programs.
#[derive(Debug)]
pub struct Vm {
pub(crate) ctx: *mut wasmedge::WasmEdge_VMContext,
import_objects: Vec<ImportObj>,
}
impl Vm {
/// Creates a new [`Vm`] to be associated with the given [configuration](crate::Config) and [store](crate::Store).
///
/// # Arguments
///
/// - `config` specifies a configuration for the new [`Vm`].
///
/// - `store` specifies an external WASM [store](crate::Store) used by the new [`Vm`]. The instantiation and
/// execution of the new [`Vm`] will refer to this store context. If no store context is specified when creating
/// a [`Vm`], then the [`Vm`] itself will allocate and own a [`store`](crate::Store).
///
/// # Error
///
/// If fail to create, then an error is returned.
pub fn create(config: Option<&Config>, store: Option<&Store>) -> WasmEdgeResult<Self> {
let conf = match config {
Some(conf) => conf.ctx,
None => std::ptr::null(),
};
let store = match store {
Some(store) => store.ctx,
None => std::ptr::null_mut(),
};
let vm = unsafe { wasmedge::WasmEdge_VMCreate(conf, store) };
match vm.is_null() {
true => Err(WasmEdgeError::Vm(VmError::Create)),
false => Ok(Self {
ctx: vm,
import_objects: vec![],
}),
}
}
/// Registers and instantiates a WASM module into the [store](crate::Store) of the [`Vm`] from a WASM file.
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, loads a WASM module from a given path, then
///
/// - Registers all exported instances in the WASM module into the [store](crate::Store) of the [`Vm`];
///
/// - Finally, instantiates the exported instances.
///
///
/// # Arguments
///
/// - `mod_name` specifies the name for the WASM module to be registered.
///
/// - `path` specifies the file path to the target WASM file.
///
/// # Error
///
/// If fail to register the target WASM, then an error is returned.
pub fn register_wasm_from_file(
self,
mod_name: impl AsRef<str>,
path: impl AsRef<Path>,
) -> WasmEdgeResult<Self> {
let path = utils::path_to_cstring(path.as_ref())?;
let raw_mod_name = mod_name.into();
unsafe {
check(wasmedge::WasmEdge_VMRegisterModuleFromFile(
self.ctx,
raw_mod_name,
path.as_ptr(),
))?
};
Ok(self)
}
/// Registers and instantiates a WASM module into the [store](crate::Store) of the [`Vm`] from a given WasmEdge
/// [ImportObj](crate::ImportObj) module.
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, registers the exported instances in the [ImportObj](crate::ImportObj) module into the
/// [store](crate::Store) of the [`Vm`], then
///
/// - Instatiates the exported instances.
///
///
/// # Argument
///
/// - `import_obj` specifies the [ImportObj](crate::ImportObj) module to be registered.
///
/// # Error
///
/// If fail to register the WASM module, then an error is returned.
pub fn register_wasm_from_import(self, import_obj: &mut ImportObj) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMRegisterModuleFromImport(
self.ctx,
import_obj.ctx,
))?;
}
import_obj.ctx = std::ptr::null_mut();
import_obj.registered = true;
Ok(self)
}
/// Registers and instantiates a WASM module into the [store](crate::Store) of the [`Vm`] from a given WASM
/// binary buffer.
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, loads a WASM module from the given WASM binary buffer, then
///
/// - Registers all exported instances in the WASM module into the [store](crate::Store) of the [`Vm`];
///
/// - Finally, instantiates the exported instances.
///
/// # Arguments
///
/// - `mod_name` specifies the name of the WASM module to be registered.
///
/// - `buffer` specifies the buffer of a WASM binary.
///
/// # Error
///
/// If fail to register the WASM module, then an error is returned.
pub fn register_wasm_from_buffer(
self,
mod_name: impl AsRef<str>,
buffer: &[u8],
) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMRegisterModuleFromBuffer(
self.ctx,
mod_name.into(),
buffer.as_ptr(),
buffer.len() as u32,
))?;
}
Ok(self)
}
/// Registers and instantiates a WASM module into the [store](crate::Store) of the [`Vm`] from a WasmEdge AST
/// [Module](crate::Module).
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, loads a WASM module from the given WasmEdge AST [Module](crate::Module), then
///
/// - Registers all exported instances in the WASM module into the [store](crate::Store) of the [`Vm`];
///
/// - Finally, instantiates the exported instances.
///
/// # Arguments
///
/// - `mod_name` specifies the name of the WASM module to be registered.
///
/// - `module` specifies the WasmEdge AST [Module](crate::Module) generated by [Loader](crate::Loader) or
/// [Compiler](crate::Compiler).
///
/// # Error
///
/// If fail to register the WASM module, then an error is returned.
pub fn register_wasm_from_module(
self,
mod_name: impl AsRef<str>,
module: &mut Module,
) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMRegisterModuleFromASTModule(
self.ctx,
mod_name.into(),
module.ctx,
))?;
}
module.ctx = std::ptr::null_mut();
module.registered = true;
Ok(self)
}
/// Instantiates a WASM module from a WASM file and invokes a [function](crate::Function) by name.
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, loads and instantiates the WASM module from a given file, then
///
/// - Invokes a function by name and parameters.
///
/// # Arguments
///
/// - `path` specifies the file path to a WASM file.
///
/// - `func_name` specifies the name of the [function](crate::Function).
///
/// - `params` specifies the the parameter values which are used by the [function](crate::Function).
///
/// # Error
///
/// If fail to run, then an error is returned.
pub fn run_wasm_from_file(
&self,
path: impl AsRef<Path>,
func_name: impl AsRef<str>,
params: impl IntoIterator<Item = Value>,
) -> WasmEdgeResult<impl Iterator<Item = Value>> {
let path = utils::path_to_cstring(path.as_ref())?;
// prepare parameters
let raw_params = params
.into_iter()
.map(wasmedge::WasmEdge_Value::from)
.collect::<Vec<_>>();
// prepare returns
let func_type = self.get_function_type(func_name.as_ref())?;
// get the info of the funtion return
let returns_len = unsafe { wasmedge::WasmEdge_FunctionTypeGetReturnsLength(func_type.ctx) };
let mut returns = Vec::with_capacity(returns_len as usize);
unsafe {
check(wasmedge::WasmEdge_VMRunWasmFromFile(
self.ctx,
path.as_ptr(),
func_name.as_ref().into(),
raw_params.as_ptr(),
raw_params.len() as u32,
returns.as_mut_ptr(),
returns_len,
))?;
returns.set_len(returns_len as usize);
}
Ok(returns.into_iter().map(Into::into))
}
/// Instantiate a WASM module from a buffer and invokes a function by name.
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, loads and instantiates the WASM module from a buffer, then
///
/// - Invokes a function by name and parameters.
///
/// # Arguments
///
/// - `buffer` specifies the buffer of a WASM binary.
///
/// - `func_name` specifies the name of the [function](crate::Function).
///
/// - `params` specifies the parameter values which are used by the [function](crate::Function).
///
/// # Error
///
/// If fail to run, then an error is returned.
pub fn run_wasm_from_buffer(
&self,
buffer: &[u8],
func_name: impl AsRef<str>,
params: impl IntoIterator<Item = Value>,
) -> WasmEdgeResult<impl Iterator<Item = Value>> {
// prepare parameters
let raw_params = params
.into_iter()
.map(wasmedge::WasmEdge_Value::from)
.collect::<Vec<_>>();
// prepare returns
let func_type = self.get_function_type(func_name.as_ref())?;
// get the info of the funtion return
let returns_len = unsafe { wasmedge::WasmEdge_FunctionTypeGetReturnsLength(func_type.ctx) };
let mut returns = Vec::with_capacity(returns_len as usize);
unsafe {
check(wasmedge::WasmEdge_VMRunWasmFromBuffer(
self.ctx,
buffer.as_ptr(),
buffer.len() as u32,
func_name.as_ref().into(),
raw_params.as_ptr(),
raw_params.len() as u32,
returns.as_mut_ptr(),
returns_len,
))?;
returns.set_len(returns_len as usize);
}
Ok(returns.into_iter().map(Into::into))
}
/// Instantiates a WASM module from a WasmEdge AST [Module](crate::Module) and invokes a function by name.
///
/// The workflow of the function can be summarized as the following steps:
///
/// - First, loads and instantiates the WASM module from a WasmEdge AST [Module](crate::Module), then
///
/// - Invokes a function by name and parameters.
///
/// # Arguments
///
/// - `module` specifies the WasmEdge AST [Module](crate::Module) generated by [Loader](crate::Loader)
/// or [Compiler](crate::Compiler).
///
/// - `func_name` specifies the name of the [function](crate::Function).
///
/// - `params` specifies the parameter values which are used by the [function](crate::Function).
///
/// # Error
///
/// If fail to run, then an error is returned.
pub fn run_wasm_from_module(
&self,
module: &mut Module,
func_name: impl AsRef<str>,
params: impl IntoIterator<Item = Value>,
) -> WasmEdgeResult<impl Iterator<Item = Value>> {
// prepare parameters
let raw_params = params
.into_iter()
.map(wasmedge::WasmEdge_Value::from)
.collect::<Vec<_>>();
// prepare returns
let func_type = self.get_function_type(func_name.as_ref())?;
// get the info of the funtion return
let returns_len = unsafe { wasmedge::WasmEdge_FunctionTypeGetReturnsLength(func_type.ctx) };
let mut returns = Vec::with_capacity(returns_len as usize);
unsafe {
check(wasmedge::WasmEdge_VMRunWasmFromASTModule(
self.ctx,
module.ctx,
func_name.as_ref().into(),
raw_params.as_ptr(),
raw_params.len() as u32,
returns.as_mut_ptr(),
returns_len,
))?;
module.ctx = std::ptr::null_mut();
module.registered = true;
returns.set_len(returns_len as usize);
}
Ok(returns.into_iter().map(Into::into))
}
/// Loads a WASM module from a WasmEdge AST [Module](crate::Module).
///
/// # Argument
///
/// - `module` specifies a WasmEdge AST [Module](crate::Module) generated by [Loader](crate::Loader) or
/// [Compiler](crate::Compiler).
///
/// # Error
///
/// If fail to load, then an error is returned.
pub fn load_wasm_from_module(self, module: &mut Module) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMLoadWasmFromASTModule(
self.ctx, module.ctx,
))?;
module.ctx = std::ptr::null_mut();
module.registered = true;
}
Ok(self)
}
/// Loads a WASM module from a WASM binary buffer.
///
/// # Argument
///
/// - `buffer` specifies the buffer of a WASM binary.
///
/// # Error
///
/// If fail to load, then an error is returned.
pub fn load_wasm_from_buffer(self, buffer: &[u8]) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMLoadWasmFromBuffer(
self.ctx,
buffer.as_ptr() as *const _,
buffer.len() as u32,
))?;
}
Ok(self)
}
/// Loads a WASM module from a WASM file.
///
/// # Argument
///
/// - `path` specifies the path to a WASM file.
///
/// # Error
///
/// If fail to load, then an error is returned.
pub fn load_wasm_from_file<P: AsRef<Path>>(self, path: P) -> WasmEdgeResult<Self> {
let path = utils::path_to_cstring(path.as_ref())?;
unsafe {
check(wasmedge::WasmEdge_VMLoadWasmFromFile(
self.ctx,
path.as_ptr(),
))?;
}
Ok(self)
}
/// Validates a WASM module loaded into the [`Vm`].
///
/// This is the second step to invoke a WASM function step by step. After loading a WASM module into the [`Vm`],
/// call this function to validate it. Note that only validated WASM modules can be instantiated in the [`Vm`].
///
/// # Error
///
/// If fail to validate, then an error is returned.
pub fn validate(self) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMValidate(self.ctx))?;
}
Ok(self)
}
/// Instantiates a validated WASM module in the [`Vm`].
///
/// This is the third step to invoke a WASM function step by step. After validating a WASM module in the [`Vm`],
/// call this function to instantiate it; then, call `execute` to invoke the exported function in this WASM module.
///
/// # Error
///
/// If fail to instantiate, then an error is returned.
pub fn instantiate(self) -> WasmEdgeResult<Self> {
unsafe {
check(wasmedge::WasmEdge_VMInstantiate(self.ctx))?;
}
Ok(self)
}
/// Runs an exported WASM function by name. The WASM function is hosted by the anonymous [module](crate::Module) in
/// the [store](crate::Store) of the [`Vm`].
///
/// This is the final step to invoke a WASM function step by step.
/// After instantiating a WASM module in the [`Vm`], the WASM module is registered into the [store](crate::Store)
/// of the [`Vm`] as an anonymous module. Then repeatedly call this function to invoke the exported WASM functions
/// by their names until the [`Vm`] is reset or a new WASM module is registered
/// or loaded.
///
/// # Arguments
///
/// - `func_name` specifies the name of the exported WASM function to run.
///
/// - `params` specifies the parameter values passed to the exported WASM function.
///
/// # Error
///
/// If fail to run the WASM function, then an error is returned.
pub fn run_function(
&mut self,
func_name: impl AsRef<str>,
params: impl IntoIterator<Item = Value>,
) -> WasmEdgeResult<impl Iterator<Item = Value>> {
// prepare parameters
let raw_params = params
.into_iter()
.map(wasmedge::WasmEdge_Value::from)
.collect::<Vec<_>>();
// prepare returns
let func_type = self.get_function_type(func_name.as_ref())?;
// get the info of the funtion return
let returns_len = unsafe { wasmedge::WasmEdge_FunctionTypeGetReturnsLength(func_type.ctx) };
let mut returns = Vec::with_capacity(returns_len as usize);
unsafe {
check(wasmedge::WasmEdge_VMExecute(
self.ctx,
func_name.as_ref().into(),
raw_params.as_ptr(),
raw_params.len() as u32,
returns.as_mut_ptr(),
returns_len,
))?;
returns.set_len(returns_len as usize);
}
Ok(returns.into_iter().map(Into::into))
}
/// Runs an exported WASM function by its name and the module's name in which the WASM function is hosted.
///
/// After registering a WASM module in the [`Vm`], repeatedly call this function to run exported WASM functions by
/// their function names and the module names until the [`Vm`] is reset.
///
/// # Arguments
///
/// - `mod_name` specifies the name of the WASM module registered into the [store](crate::Store) of the [`Vm`].
///
/// - `func_name` specifies the name of the exported WASM function to run.
///
/// - `params` specifies the parameter values passed to the exported WASM function.
///
/// # Error
///
/// If fail to run the WASM function, then an error is returned.
pub fn run_registered_function(
&self,
mod_name: impl AsRef<str>,
func_name: impl AsRef<str>,
params: impl IntoIterator<Item = Value>,
) -> WasmEdgeResult<impl Iterator<Item = Value>> {
// prepare parameters
let raw_params = params
.into_iter()
.map(wasmedge::WasmEdge_Value::from)
.collect::<Vec<_>>();
// prepare returns
let func_type = self.get_registered_function_type(mod_name.as_ref(), func_name.as_ref())?;
// get the info of the funtion return
let returns_len = unsafe { wasmedge::WasmEdge_FunctionTypeGetReturnsLength(func_type.ctx) };
let mut returns = Vec::with_capacity(returns_len as usize);
unsafe {
check(wasmedge::WasmEdge_VMExecuteRegistered(
self.ctx,
mod_name.as_ref().into(),
func_name.as_ref().into(),
raw_params.as_ptr(),
raw_params.len() as u32,
returns.as_mut_ptr(),
returns_len,
))?;
returns.set_len(returns_len as usize);
}
Ok(returns.into_iter().map(Into::into))
}
/// Returns the function type of a WASM function by its name. The function is hosted in the anonymous
/// [module](crate::Module) of the [`Vm`].
///
/// # Argument
///
/// - `func_name` specifies the name of the target WASM function.
///
/// # Error
///
/// If fail to get the function type, then an error is returned.
pub fn get_function_type(&self, func_name: impl AsRef<str>) -> WasmEdgeResult<FuncType> {
let ty_ctx =
unsafe { wasmedge::WasmEdge_VMGetFunctionType(self.ctx, func_name.as_ref().into()) };
match ty_ctx.is_null() {
true => Err(WasmEdgeError::Vm(VmError::NotFoundFuncType(
func_name.as_ref().to_string(),
))),
false => Ok(FuncType {
ctx: ty_ctx as *mut _,
registered: true,
}),
}
}
/// Returns the function type of a WASM function by its name and the name of the registered [module](crate::Module)
/// which hosts the WASM function.
///
/// # Arguments
///
/// - `mod_name` specifies the name of the registered [module](crate::Module).
///
/// - `func_name` specifies the name of the target WASM function.
pub fn get_registered_function_type(
&self,
mod_name: impl AsRef<str>,
func_name: impl AsRef<str>,
) -> WasmEdgeResult<FuncType> {
let ty_ctx = unsafe {
wasmedge::WasmEdge_VMGetFunctionTypeRegistered(
self.ctx,
mod_name.as_ref().into(),
func_name.as_ref().into(),
)
};
match ty_ctx.is_null() {
true => Err(WasmEdgeError::Vm(VmError::NotFoundFuncType(
func_name.as_ref().to_string(),
))),
false => Ok(FuncType {
ctx: ty_ctx as *mut _,
registered: true,
}),
}
}
/// Resets the [`Vm`].
pub fn reset(&mut self) {
unsafe { wasmedge::WasmEdge_VMCleanup(self.ctx) }
}
/// Returns the length of the exported function list.
pub fn function_list_len(&self) -> usize {
unsafe { wasmedge::WasmEdge_VMGetFunctionListLength(self.ctx) as usize }
}
/// Returns an iterator of the exported functions.
pub fn function_iter(
&self,
) -> WasmEdgeResult<impl Iterator<Item = (Option<String>, Option<FuncType>)>> {
let len = self.function_list_len();
let mut names = Vec::with_capacity(len);
let mut types = Vec::with_capacity(len);
unsafe {
wasmedge::WasmEdge_VMGetFunctionList(
self.ctx,
names.as_mut_ptr(),
types.as_mut_ptr(),
len as u32,
);
names.set_len(len);
types.set_len(len);
};
let returns = names.into_iter().zip(types.into_iter()).map(|(name, ty)| {
let name = unsafe { std::ffi::CStr::from_ptr(name.Buf as *const _) };
let name = name.to_string_lossy().into_owned();
let func_ty = match ty.is_null() {
true => None,
false => Some(FuncType {
ctx: ty as *mut _,
registered: true,
}),
};
(Some(name), func_ty)
});
Ok(returns)
}
/// Returns the mutable [ImportObj](crate::ImportObj) corresponding to the HostRegistration settings.
pub fn import_obj_mut(&mut self, reg: HostRegistration) -> WasmEdgeResult<ImportObj> {
let io_ctx = unsafe { wasmedge::WasmEdge_VMGetImportModuleContext(self.ctx, reg.into()) };
match io_ctx.is_null() {
true => Err(WasmEdgeError::Vm(VmError::NotFoundImportObj)),
false => Ok(ImportObj {
ctx: io_ctx,
registered: true,
}),
}
}
/// Returns the mutable [Store](crate::Store) from the [`Vm`].
pub fn store_mut(&self) -> WasmEdgeResult<Store> {
let store_ctx = unsafe { wasmedge::WasmEdge_VMGetStoreContext(self.ctx) };
match store_ctx.is_null() {
true => Err(WasmEdgeError::Vm(VmError::NotFoundStore)),
false => Ok(Store {
ctx: store_ctx,
registered: true,
}),
}
}
/// Returns the mutable [Statistics](crate::Statistics) from the [`Vm`].
pub fn statistics_mut(&self) -> WasmEdgeResult<Statistics> {
let stat_ctx = unsafe { wasmedge::WasmEdge_VMGetStatisticsContext(self.ctx) };
match stat_ctx.is_null() {
true => Err(WasmEdgeError::Vm(VmError::NotFoundStatistics)),
false => Ok(Statistics {
ctx: stat_ctx,
registered: true,
}),
}
}
/// Initializes the WASI host module with the given parameters.
///
/// # Arguments
///
/// - `args` specifies the commandline arguments. The first argument is the program name.
///
/// - `envs` specifies the environment variables in the format `ENV_VAR_NAME=VALUE`.
///
/// - `preopens` specifies the directories to pre-open. The required format is `DIR1:DIR2`.
pub fn init_wasi_obj<T, E>(
&mut self,
args: Option<T>,
envs: Option<T>,
preopens: Option<T>,
) -> WasmEdgeResult<()>
where
T: Iterator<Item = E>,
E: AsRef<str>,
{
let mut import_obj = self.import_obj_mut(HostRegistration::Wasi)?;
import_obj.init_wasi(args, envs, preopens);
Ok(())
}
}
impl Drop for Vm {
fn drop(&mut self) {
if !self.ctx.is_null() {
unsafe { wasmedge::WasmEdge_VMDelete(self.ctx) };
}
self.import_objects.drain(..);
}
}
#[cfg(test)]
mod tests {
use super::Vm;
use crate::{Config, Module, Store};
#[test]
fn test_vm_create() {
// create Config instance
let result = Config::create();
assert!(result.is_ok());
let conf = result.unwrap();
let conf = conf.enable_bulkmemoryoperations(true);
assert!(conf.has_bulkmemoryoperations());
// create Store instance
let result = Store::create();
assert!(result.is_ok(), "Failed to create Store instance");
let store = result.unwrap();
// create Vm instance
let result = Vm::create(Some(&conf), Some(&store));
assert!(result.is_ok());
}
#[test]
fn test_vm_load_wasm_from_file() {
// create Config instance
let result = Config::create();
assert!(result.is_ok());
let conf = result.unwrap();
let conf = conf.enable_bulkmemoryoperations(true);
assert!(conf.has_bulkmemoryoperations());
// create Store instance
let result = Store::create();
assert!(result.is_ok(), "Failed to create Store instance");
let store = result.unwrap();
// create Vm instance
let result = Vm::create(Some(&conf), Some(&store));
assert!(result.is_ok());
let vm = result.unwrap();
// load wasm module from a specified file
let path =
std::path::PathBuf::from(env!("WASMEDGE_DIR")).join("test/api/apiTestData/test.wasm");
let result = vm.load_wasm_from_file(path);
assert!(result.is_ok());
}
#[test]
fn test_vm_load_wasm_from_buffer() {
// create Config instance
let result = Config::create();
assert!(result.is_ok());
let conf = result.unwrap();
let conf = conf.enable_bulkmemoryoperations(true);
assert!(conf.has_bulkmemoryoperations());
// create Store instance
let result = Store::create();
assert!(result.is_ok(), "Failed to create Store instance");
let store = result.unwrap();
// create Vm instance
let result = Vm::create(Some(&conf), Some(&store));
assert!(result.is_ok());
let vm = result.unwrap();
// load wasm module from buffer
let wasm_path =
std::path::PathBuf::from(env!("WASMEDGE_DIR")).join("test/api/apiTestData/test.wasm");
let result = std::fs::read(wasm_path);
assert!(result.is_ok());
let buf = result.unwrap();
let result = vm.load_wasm_from_buffer(&buf);
assert!(result.is_ok());
}
#[test]
fn test_vm_load_wasm_from_ast_module() {
// create ast module instance
let path =
std::path::PathBuf::from(env!("WASMEDGE_DIR")).join("test/api/apiTestData/test.wasm");
let result = Config::create();
assert!(result.is_ok());
let conf = result.unwrap();
let conf = conf.enable_bulkmemoryoperations(true);
assert!(conf.has_bulkmemoryoperations());
let result = Module::create_from_file(&conf, path);
assert!(result.is_ok());
let mut ast_module = result.unwrap();
// create Vm instance
let result = Config::create();
assert!(result.is_ok());
let conf = result.unwrap();
let conf = conf.enable_bulkmemoryoperations(true);
assert!(conf.has_bulkmemoryoperations());
let result = Store::create();
assert!(result.is_ok(), "Failed to create Store instance");
let store = result.unwrap();
let result = Vm::create(Some(&conf), Some(&store));
assert!(result.is_ok());
let vm = result.unwrap();
// load wasm module from a ast module instance
let result = vm.load_wasm_from_module(&mut ast_module);
assert!(result.is_ok());
let vm = result.unwrap();
// validate vm instance
let result = vm.validate();
assert!(result.is_ok());
}
}
| 34.43128 | 119 | 0.569718 |
7187bce7a1268ffc575924656ca2c701dfce53fa
| 9,133 |
//! Communicate with the Polar virtual machine: load rules, make queries, etc/
use polar_core::sources::Source;
use polar_core::terms::{Call, Symbol, Term, Value};
use std::collections::HashSet;
use std::fs::File;
use std::hash::Hash;
use std::io::Read;
use std::sync::Arc;
use crate::host::Host;
use crate::query::Query;
use crate::{FromPolar, OsoError, PolarValue, ToPolar, ToPolarList};
/// Oso is the main struct you interact with. It is an instance of the Oso authorization library
/// and contains the polar language knowledge base and query engine.
#[derive(Clone)]
pub struct Oso {
inner: Arc<polar_core::polar::Polar>,
host: Host,
}
impl Default for Oso {
fn default() -> Self {
Self::new()
}
}
/// Represents an `action` used in an `allow` rule.
/// When the action is bound to a concrete value (e.g. a string)
/// this returns an `Action::Typed(action)`.
/// If _any_ actions are allowed, then the `Action::Any` variant is returned.
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub enum Action<T = String> {
Any,
Typed(T),
}
impl<T: FromPolar> FromPolar for Action<T> {
fn from_polar(val: PolarValue) -> crate::Result<Self> {
if matches!(val, PolarValue::Variable(_)) {
Ok(Action::Any)
} else {
T::from_polar(val).map(Action::Typed)
}
}
}
impl Oso {
/// Create a new instance of Oso. Each instance is separate and can have different rules and classes loaded into it.
pub fn new() -> Self {
let inner = Arc::new(polar_core::polar::Polar::new());
let host = Host::new(inner.clone());
let mut oso = Self { inner, host };
for class in crate::builtins::classes() {
oso.register_class(class)
.expect("failed to register builtin class");
}
oso.register_constant(Option::<crate::PolarValue>::None, "nil")
.expect("failed to register the constant None");
oso
}
/// High level interface for authorization decisions. Makes an allow query with the given actor, action and resource and returns true or false.
pub fn is_allowed<Actor, Action, Resource>(
&self,
actor: Actor,
action: Action,
resource: Resource,
) -> crate::Result<bool>
where
Actor: ToPolar,
Action: ToPolar,
Resource: ToPolar,
{
let mut query = self.query_rule("allow", (actor, action, resource)).unwrap();
match query.next() {
Some(Ok(_)) => Ok(true),
Some(Err(e)) => Err(e),
None => Ok(false),
}
}
/// Get the actions actor is allowed to take on resource.
/// Returns a [std::collections::HashSet] of actions, typed according the return value.
/// # Examples
/// ```ignore
/// oso.load_str(r#"allow(actor: Actor{name: "sally"}, action, resource: Widget{id: 1}) if
/// action in ["CREATE", "READ"];"#);
///
/// // get a HashSet of oso::Actions
/// let actions: HashSet<Action> = oso.get_allowed_actions(actor, resource)?;
///
/// // or Strings
/// let actions: HashSet<String> = oso.get_allowed_actions(actor, resource)?;
/// ```
pub fn get_allowed_actions<Actor, Resource, T>(
&self,
actor: Actor,
resource: Resource,
) -> crate::Result<HashSet<T>>
where
Actor: ToPolar,
Resource: ToPolar,
T: FromPolar + Eq + Hash,
{
let mut query = self
.query_rule(
"allow",
(actor, PolarValue::Variable("action".to_owned()), resource),
)
.unwrap();
let mut set = HashSet::new();
loop {
match query.next() {
Some(Ok(result)) => {
if let Some(action) = result.get("action") {
set.insert(T::from_polar(action)?);
}
}
Some(Err(e)) => return Err(e),
None => break,
};
}
Ok(set)
}
/// Clear out all files and rules that have been loaded.
pub fn clear_rules(&mut self) -> crate::Result<()> {
self.inner.clear_rules();
check_messages!(self.inner);
Ok(())
}
fn check_inline_queries(&self) -> crate::Result<()> {
while let Some(q) = self.inner.next_inline_query(false) {
let location = q.source_info();
let query = Query::new(q, self.host.clone());
match query.collect::<crate::Result<Vec<_>>>() {
Ok(v) if !v.is_empty() => continue,
Ok(_) => return Err(OsoError::InlineQueryFailedError { location }),
Err(e) => return lazy_error!("error in inline query: {}", e),
}
}
check_messages!(self.inner);
Ok(())
}
// Register MROs, load Polar code, and check inline queries.
fn load_sources(&mut self, sources: Vec<Source>) -> crate::Result<()> {
self.host.register_mros()?;
self.inner.load(sources)?;
self.check_inline_queries()
}
/// Load a file containing Polar rules. All Polar files must end in `.polar`.
#[deprecated(
since = "0.20.1",
note = "`Oso::load_file` has been deprecated in favor of `Oso::load_files` as of the 0.20 release.\n\nPlease see changelog for migration instructions: https://docs.osohq.com/project/changelogs/2021-09-15.html"
)]
pub fn load_file<P: AsRef<std::path::Path>>(&mut self, filename: P) -> crate::Result<()> {
self.load_files(vec![filename])
}
/// Load files containing Polar rules. All Polar files must end in `.polar`.
pub fn load_files<P: AsRef<std::path::Path>>(
&mut self,
filenames: Vec<P>,
) -> crate::Result<()> {
if filenames.is_empty() {
return Ok(());
}
let mut sources = Vec::with_capacity(filenames.len());
for file in filenames {
let file = file.as_ref();
let filename = file.to_string_lossy().into_owned();
if !file.extension().map_or(false, |ext| ext == "polar") {
return Err(crate::OsoError::IncorrectFileType { filename });
}
let mut f = File::open(&file)?;
let mut src = String::new();
f.read_to_string(&mut src)?;
sources.push(Source::new_with_name(filename, src));
}
self.load_sources(sources)
}
/// Load a string of polar source directly.
/// # Examples
/// ```ignore
/// oso.load_str("allow(a, b, c) if true;");
/// ```
pub fn load_str(&mut self, src: &str) -> crate::Result<()> {
// TODO(gj): emit... some sort of warning?
self.load_sources(vec![Source::new(src)])
}
/// Query the knowledge base. This can be an allow query or any other polar expression.
/// # Examples
/// ```ignore
/// oso.query("x = 1 or x = 2");
/// ```
pub fn query(&self, s: &str) -> crate::Result<Query> {
let query = self.inner.new_query(s, false)?;
check_messages!(self.inner);
let query = Query::new(query, self.host.clone());
Ok(query)
}
/// Query the knowledge base but with a rule name and argument list.
/// This allows you to pass in rust values.
/// # Examples
/// ```ignore
/// oso.query_rule("is_admin", vec![User{name: "steve"}]);
/// ```
#[must_use = "Query that is not consumed does nothing."]
pub fn query_rule(&self, name: &str, args: impl ToPolarList) -> crate::Result<Query> {
let mut query_host = self.host.clone();
let args = args
.to_polar_list()
.iter()
.map(|value| value.to_term(&mut query_host))
.collect();
let query_value = Value::Call(Call {
name: Symbol(name.to_string()),
args,
kwargs: None,
});
let query_term = Term::new_from_ffi(query_value);
let query = self.inner.new_query_from_term(query_term, false);
check_messages!(self.inner);
let query = Query::new(query, query_host);
Ok(query)
}
/// Register a rust type as a Polar class.
/// See [`oso::Class`] docs.
pub fn register_class(&mut self, class: crate::host::Class) -> crate::Result<()> {
let name = class.name.clone();
let class_name = self.host.cache_class(class.clone(), name)?;
for hook in &class.register_hooks {
hook.call(self)?;
}
self.register_constant(class, &class_name)
}
/// Register a rust type as a Polar constant.
/// See [`oso::Class`] docs.
pub fn register_constant<V: crate::host::ToPolar + Send + Sync>(
&mut self,
value: V,
name: &str,
) -> crate::Result<()> {
self.inner.register_constant(
Symbol(name.to_string()),
value.to_polar().to_term(&mut self.host),
)?;
Ok(())
}
}
// Make sure the `Oso` object is threadsafe
#[cfg(test)]
static_assertions::assert_impl_all!(Oso: Send, Sync);
| 33.454212 | 217 | 0.563889 |
d9dfdd087bdd590dd59ebd7baa4cf32802ec7862
| 72,906 |
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Native process creation and program loading library.
//!
//! # Restrictions
//!
//! Most Fuchsia processes are not able to use this library.
//!
//! This library uses the [zx_process_create] syscall to create a new process in a job. Use of that
//! syscall requires that the job of the process using the syscall (not the job that the process is
//! being created in) be allowed to create processes. In concrete terms, the process using this
//! library must be in a job whose [ZX_POL_NEW_PROCESS job policy is
//! ZX_POL_ACTION_ALLOW][zx_job_set_policy].
//!
//! Most processes on Fuchsia run in jobs where this job policy is set to DENY and thus will not
//! be able to use this library. Those processes should instead use the [fuchsia.process.Launcher]
//! FIDL service, which is itself implemented using this library. [fdio::spawn()],
//! [fdio::spawn_vmo()], and [fdio::spawn_etc()] provide simple interfaces to this service.
//!
//! # Example
//!
//! ```
//! let process_name = CString::new("my_process")?;
//! let job = /* job to create new process in */;
//! let executable = /* VMO with execute rights containing ELF executable */;
//! let pkg_directory = /* fidl::endpoints::ClientEnd for fuchsia.io.Directory */;
//! let out_directory = /* server end of zx::Channel */;
//! let other_handle = /* some arbitrary zx::Handle */;
//!
//! let builder = ProcessBuilder::new(&process_name, &job, executable)?;
//! builder.add_arguments(vec![process_name, CString::new("arg0")?]);
//! builder.add_environment_variables(vec![CString::new("VAR=VALUE")?]);
//! builder.add_namespace_entries(vec![NamespaceEntry{
//! path: CString::new("/pkg")?,
//! directory: package_directory,
//! }])?;
//! builder.add_handles(vec![
//! StartupHandle{
//! handle: out_directory.into_handle(),
//! info: HandleInfo::new(HandleType::DirectoryRequest, 0),
//! },
//! StartupHandle{
//! handle: other_handle,
//! info: HandleInfo::new(HandleType::User0, 1),
//! },
//! ])?;
//!
//! let built_process: BuiltProcess = builder.build()?;
//! let process: zx::Process = builder.start()?;
//! ```
//!
//! [zx_process_create]: https://fuchsia.dev/fuchsia-src/reference/syscalls/process_create.md
//! [zx_job_set_policy]: https://fuchsia.dev/fuchsia-src/reference/syscalls/job_set_policy.md
//! [fuchsia.process.Launcher]: https://fuchsia.googlesource.com/fuchsia/+/HEAD/zircon/system/fidl/fuchsia-process/launcher.fidl
//
// TODO: Consider supporting this for processes that do not meet the above requirements (nearly
// all), where it can optionally build the process directly if able or delegate to a remote
// fuchsia.process.Launcher (possibly through fdio::spawn_etc, in which case it would just be an
// alternative front-end for that, similar to our C++ ProcessBuilder library, though that pulls in
// a dependency on fdio).
#![deny(missing_docs)]
pub use self::elf_load::ElfLoadError;
pub use self::elf_parse::ElfParseError;
pub use self::processargs::{ProcessargsError, StartupHandle};
pub mod elf_load;
pub mod elf_parse;
mod processargs;
mod util;
use {
anyhow::{anyhow, Context},
fidl::endpoints::{ClientEnd, Proxy},
fidl_fuchsia_io as fio, fidl_fuchsia_ldsvc as fldsvc,
fuchsia_async::{self as fasync, TimeoutExt},
fuchsia_runtime::{HandleInfo, HandleType},
fuchsia_zircon::{self as zx, AsHandleRef, DurationNum, HandleBased},
futures::prelude::*,
lazy_static::lazy_static,
log::warn,
std::convert::TryFrom,
std::default::Default,
std::ffi::{CStr, CString},
std::iter,
std::mem,
thiserror::Error,
};
/// A container for a single namespace entry, containing a path and a directory handle. Used as an
/// input to [ProcessBuilder::add_namespace_entries()].
pub struct NamespaceEntry {
/// Namespace path.
pub path: CString,
/// Namespace directory handle.
pub directory: ClientEnd<fio::DirectoryMarker>,
}
/// The main builder type for this crate. Collects inputs and creates a new process.
///
/// See top-level crate documentation for a usage example.
pub struct ProcessBuilder {
/// The ELF binary for the new process.
executable: zx::Vmo,
/// The fuchsia.ldsvc.Loader service to use for the new process, if dynamically linked.
ldsvc: Option<fldsvc::LoaderProxy>,
/// A non-default vDSO to use for the new process, if any.
non_default_vdso: Option<zx::Vmo>,
/// The contents of the main processargs message to be sent to the new process.
msg_contents: processargs::MessageContents,
/// Handles that are common to both the linker and main processargs messages, wrapped in an
/// inner struct for code organization and clarity around borrows.
common: CommonMessageHandles,
/// Minimum size of the stack for the new process, in bytes.
min_stack_size: usize,
}
struct CommonMessageHandles {
process: zx::Process,
thread: zx::Thread,
root_vmar: zx::Vmar,
}
/// A container for a fully built but not yet started (as in, its initial thread is not yet
/// running) process, with all related handles and metadata. Output of [ProcessBuilder::build()].
///
/// You can use this struct to start the process with [BuiltProcess::start()], which is a simple
/// wrapper around the [zx_process_start] syscall. You can optionally use the handles and
/// information in this struct to manipulate the process or its address space before starting it,
/// such as when creating a process in a debugger.
///
/// [zx_process_start]: https://fuchsia.dev/fuchsia-src/reference/syscalls/process_start.md
pub struct BuiltProcess {
/// The newly created process.
pub process: zx::Process,
/// The root VMAR for the created process.
pub root_vmar: zx::Vmar,
/// The process's initial thread.
pub thread: zx::Thread,
/// The process's entry point.
pub entry: usize,
/// The initial thread's stack pointer.
pub stack: usize,
/// The base address of the stack for the initial thread.
pub stack_base: usize,
/// The VMO of the stack for the initial thread.
pub stack_vmo: zx::Vmo,
/// The bootstrap channel, to be passed to the process on start as arg1 in zx_process_start /
/// zx::Process::start.
pub bootstrap: zx::Channel,
/// The base address of the VDSO in the process's VMAR, to be passed to the process on start as
/// arg2 in zx_process_start / zx::Process::start.
pub vdso_base: usize,
/// The base address where the ELF executable, or the dynamic linker if the ELF was dynamically
/// linked, was loaded in the process's VMAR.
pub elf_base: usize,
/// The ELF headers of the main module of the newly created process.
pub elf_headers: elf_parse::Elf64Headers,
}
struct StackInfo {
/// The initial thread's stack pointer.
pub stack_ptr: usize,
/// The base address of the stack for the initial thread.
pub stack_base: usize,
/// The VMO of the stack for the initial thread.
pub stack_vmo: zx::Vmo,
}
impl ProcessBuilder {
/// Create a new ProcessBuilder that can be used to create a new process under the given job
/// with the given name and ELF64 executable (as a VMO).
///
/// This job is only used to create the process and thus is not taken ownership of. To provide
/// a default job handle to be passed to the new process, use [ProcessBuilder::add_handles()]
/// with [HandleType::DefaultJob].
///
/// The provided VMO must have the [zx::Rights::EXECUTE] right.
///
/// # Errors
///
/// Returns Err([ProcessBuilderError::CreateProcess]) if process creation fails, such as if the
/// process using this is disallowed direct process creation rights through job policy. See
/// top-level crate documentation for more details.
pub fn new(
name: &CStr,
job: &zx::Job,
executable: zx::Vmo,
) -> Result<ProcessBuilder, ProcessBuilderError> {
if job.is_invalid_handle() {
return Err(ProcessBuilderError::BadHandle("Invalid job handle"));
}
if executable.is_invalid_handle() {
return Err(ProcessBuilderError::BadHandle("Invalid executable handle"));
}
// Creating the process immediately has the benefit that we fail fast if the calling
// process does not have permission to create processes directly.
let (process, root_vmar) = job
.create_child_process(name.to_bytes())
.map_err(ProcessBuilderError::CreateProcess)?;
// Create the initial thread.
let thread =
process.create_thread(b"initial-thread").map_err(ProcessBuilderError::CreateThread)?;
// Add duplicates of the process, VMAR, and thread handles to the bootstrap message.
let msg_contents = processargs::MessageContents::default();
let mut pb = ProcessBuilder {
executable,
ldsvc: None,
non_default_vdso: None,
msg_contents,
common: CommonMessageHandles { process, thread, root_vmar },
min_stack_size: 0,
};
pb.common.add_to_message(&mut pb.msg_contents)?;
Ok(pb)
}
/// Sets the fuchsia.ldsvc.Loader service for the process.
///
/// The loader service is used to load dynamic libraries if the executable is a dynamically
/// linked ELF file (i.e. if it contains a PT_INTERP header), and is required for such
/// executables. It will only be provided to the new process in this case. Otherwise, it is
/// unused and has no effect.
///
/// If no loader service has been provided and it is needed, process creation will fail. Note
/// that this differs from the automatic fallback behavior of previous process creation
/// libraries, which would clone the loader of the current process. This fallback is likely to
/// fail in subtle and confusing ways. An appropriate loader service that has access to the
/// libraries or interpreter must be provided.
///
/// Note that [ProcessBuilder::add_handles()] will automatically pass a handle with type
/// [HandleType::LdsvcLoader] to this function.
///
/// If called multiple times (e.g. if a loader was initially provided through
/// [ProcessBuilder::add_handles()] and you want to replace it), the new loader replaces the
/// previous and the handle to the previous loader is dropped.
pub fn set_loader_service(
&mut self,
ldsvc: ClientEnd<fldsvc::LoaderMarker>,
) -> Result<(), ProcessBuilderError> {
if ldsvc.is_invalid_handle() {
return Err(ProcessBuilderError::BadHandle("Invalid loader service handle"));
}
self.ldsvc =
Some(ldsvc.into_proxy().map_err(|e| {
ProcessBuilderError::Internal("Failed to get LoaderProxy", e.into())
})?);
Ok(())
}
/// Sets the vDSO VMO for the process.
pub fn set_vdso_vmo(&mut self, vdso: zx::Vmo) {
self.non_default_vdso = Some(vdso);
}
/// Add arguments to the process's bootstrap message. Successive calls append (not replace)
/// arguments.
pub fn add_arguments(&mut self, mut args: Vec<CString>) {
self.msg_contents.args.append(&mut args);
}
/// Add environment variables to the process's bootstrap message. Successive calls append (not
/// replace) environment variables.
pub fn add_environment_variables(&mut self, mut vars: Vec<CString>) {
self.msg_contents.environment_vars.append(&mut vars);
}
/// Set the minimum size of the stack for the new process, in bytes.
pub fn set_min_stack_size(&mut self, size: usize) {
self.min_stack_size = size;
}
/// Add handles to the process's bootstrap message. Successive calls append (not replace)
/// handles.
///
/// Each [StartupHandle] contains a [zx::Handle] object accompanied by a [HandleInfo] object
/// that includes the handle type and a type/context-dependent argument.
///
/// A [HandleType::LdsvcLoader] handle will automatically be passed along to
/// [ProcessBuilder::set_loader_service()] if provided through this function.
///
/// # Errors
///
/// [HandleType::NamespaceDirectory] handles should not be added through this function since
/// they must be accompanied with a path. Use [ProcessBuilder::add_namespace_entries()] for
/// that instead.
///
/// The following handle types cannot be added through this, as they are added automatically by
/// the ProcessBuilder:
/// * [HandleType::ProcessSelf]
/// * [HandleType::ThreadSelf]
/// * [HandleType::RootVmar]
/// * [HandleType::LoadedVmar]
/// * [HandleType::StackVmo]
/// * [HandleType::ExecutableVmo]
pub fn add_handles(
&mut self,
mut startup_handles: Vec<StartupHandle>,
) -> Result<(), ProcessBuilderError> {
// Do a bit of validation before adding to the bootstrap handles.
for h in &startup_handles {
if h.handle.is_invalid() {
return Err(ProcessBuilderError::BadHandle("Invalid handle in startup handles"));
}
let t = h.info.handle_type();
match t {
HandleType::NamespaceDirectory => {
return Err(ProcessBuilderError::InvalidArg(
"Cannot add NamespaceDirectory handles directly, use add_namespace_entries"
.into(),
));
}
HandleType::ProcessSelf
| HandleType::ThreadSelf
| HandleType::RootVmar
| HandleType::LoadedVmar
| HandleType::StackVmo
| HandleType::ExecutableVmo => {
return Err(ProcessBuilderError::InvalidArg(format!(
"Cannot add a {:?} handle directly, it will be automatically added",
t,
)));
}
_ => {}
}
}
// Intentionally separate from validation so that we don't partially add namespace entries.
for h in startup_handles.drain(..) {
match h.info.handle_type() {
HandleType::LdsvcLoader => {
// Automatically pass this to |set_loader_service| instead.
self.set_loader_service(ClientEnd::from(h.handle))?;
}
HandleType::VdsoVmo => {
self.set_vdso_vmo(h.handle.into());
}
_ => {
self.msg_contents.handles.push(h);
}
}
}
Ok(())
}
/// Add directories to the process's namespace.
///
/// Successive calls append new namespace entries, not replace previous entries.
///
/// Each [NamespaceEntry] contains a client connection to a fuchsia.io.Directory FIDL service
/// and a path to add that directory to the process's namespace as.
///
/// # Errors
///
/// Returns Err([ProcessBuilderError::InvalidArg]) if the maximum number of namespace entries
/// (2^16) was reached and the entry could not be added. This is exceedingly unlikely, and most
/// likely if you are anywhere near this limit [ProcessBuilder::build()] will fail because the
/// process's processargs startup messsage is over its own length limit.
pub fn add_namespace_entries(
&mut self,
mut entries: Vec<NamespaceEntry>,
) -> Result<(), ProcessBuilderError> {
// Namespace entries are split into a namespace path, that is included in the bootstrap
// message (as the so-called "namespace table"), plus a NamespaceDirectory handle, where the arg
// value is the index of the path in the namespace table.
//
// Check that the namespace table doesn't exceed 2^16 entries, since the HandleInfo arg is
// only 16-bits. Realistically this will never matter - if you're anywhere near this
// many entries, you're going to exceed the bootstrap message length limit - but Rust
// encourages us (and makes it easy) to be safe about the edge case here.
let mut idx = u16::try_from(self.msg_contents.namespace_paths.len())
.expect("namespace_paths.len should never be larger than a u16");
let num_entries = u16::try_from(entries.len())
.map_err(|_| ProcessBuilderError::InvalidArg("Too many namespace entries".into()))?;
if idx.checked_add(num_entries).is_none() {
return Err(ProcessBuilderError::InvalidArg(
"Can't add namespace entries, limit reached".into(),
));
}
for entry in &entries {
if entry.directory.is_invalid_handle() {
return Err(ProcessBuilderError::BadHandle("Invalid handle in namespace entry"));
}
}
// Intentionally separate from validation so that we don't partially add namespace entries=
for entry in entries.drain(..) {
self.msg_contents.namespace_paths.push(entry.path);
self.msg_contents.handles.push(StartupHandle {
handle: zx::Handle::from(entry.directory),
info: HandleInfo::new(HandleType::NamespaceDirectory, idx),
});
idx += 1;
}
Ok(())
}
/// Build the new process using the data and handles provided to the ProcessBuilder.
///
/// The return value of this function is a [BuiltProcess] struct which contains the new process
/// and all the handles and data needed to start it, but the process' initial thread is not yet
/// started. Use [BuiltProcess::start()] or the [zx_process_start] syscall to actually start
/// it.
///
/// # Errors
///
/// There are many errors that could result during process loading and only some are listed
/// here. See [ProcessBuilderError] for the various error types that can be returned.
///
/// Returns Err([ProcessBuilderError::LoaderMissing]) if the ELF executable is dynamically
/// linked (has a PT_INTERP program header) but no loader service has been provided through
/// [ProcessBuilder::set_loader_service()] or [ProcessBuilder::add_handles()].
///
/// [zx_process_start]: https://fuchsia.dev/fuchsia-src/reference/syscalls/process_start.md
pub async fn build(mut self) -> Result<BuiltProcess, ProcessBuilderError> {
// Parse the executable as an ELF64 file, reading in the headers we need. Done first since
// this is most likely to be invalid and error out.
let elf_headers = elf_parse::Elf64Headers::from_vmo(&self.executable)?;
// Create bootstrap message channel.
let (bootstrap_rd, bootstrap_wr) = zx::Channel::create()
.map_err(|s| ProcessBuilderError::GenericStatus("Failed to create channel", s))?;
// Check if main executable is dynamically linked, and handle appropriately.
let loaded_elf;
let mut reserve_vmar = None;
let dynamic;
if let Some(interp_hdr) =
elf_headers.program_header_with_type(elf_parse::SegmentType::Interp)?
{
// Dynamically linked so defer loading the main executable to the dynamic
// linker/loader, which we load here instead.
dynamic = true;
// Check that a ldsvc.Loader service was provided.
let ldsvc = self.ldsvc.take().ok_or(ProcessBuilderError::LoaderMissing())?;
// A process using PT_INTERP might be loading a libc.so that supports sanitizers;
// reserve the low address region for sanitizers to allocate shadow memory.
//
// The reservation VMAR ensures that the initial allocations & mappings made in this
// function stay out of this area. It is destroyed below before returning and the
// process's own allocations can use the full address space.
//
// !! WARNING: This makes a specific address VMAR allocation, so it must come before
// any elf_load::load_elf calls. !!
reserve_vmar =
Some(ReservationVmar::reserve_low_address_space(&self.common.root_vmar)?);
// Get the dynamic linker and map it into the process's address space.
let ld_vmo = get_dynamic_linker(&ldsvc, &self.executable, interp_hdr).await?;
let ld_headers = elf_parse::Elf64Headers::from_vmo(&ld_vmo)?;
loaded_elf = elf_load::load_elf(&ld_vmo, &ld_headers, &self.common.root_vmar)?;
// Build the dynamic linker bootstrap message and write it to the bootstrap channel.
// This message is written before the primary bootstrap message since it is consumed
// first in the dynamic linker.
let executable = mem::replace(&mut self.executable, zx::Handle::invalid().into());
let msg = self.build_linker_message(ldsvc, executable, loaded_elf.vmar)?;
msg.write(&bootstrap_wr).map_err(ProcessBuilderError::WriteBootstrapMessage)?;
} else {
// Statically linked but still position-independent (ET_DYN) ELF, load directly.
dynamic = false;
loaded_elf =
elf_load::load_elf(&self.executable, &elf_headers, &self.common.root_vmar)?;
self.msg_contents.handles.push(StartupHandle {
handle: loaded_elf.vmar.into_handle(),
info: HandleInfo::new(HandleType::LoadedVmar, 0),
});
}
// Load the vDSO - either the default system vDSO, or the user-provided one - into the
// process's address space and a handle to it to the bootstrap message.
let vdso_base = self.load_vdso()?;
// Calculate initial stack size.
let mut stack_size;
let stack_vmo_name;
if dynamic {
// Calculate the initial stack size for the dynamic linker. This factors in the size of
// an extra handle for the stack that hasn't yet been added to the message contents,
// since creating the stack requires this size.
stack_size = calculate_initial_linker_stack_size(&mut self.msg_contents, 1)?;
stack_vmo_name = format!("stack: msg of {:#x?}", stack_size);
} else {
// Set stack size from PT_GNU_STACK header, if present, or use the default. The dynamic
// linker handles this for dynamically linked ELFs (above case).
const ZIRCON_DEFAULT_STACK_SIZE: usize = 256 << 10; // 256KiB
let mut ss = ("default", ZIRCON_DEFAULT_STACK_SIZE);
if let Some(stack_hdr) =
elf_headers.program_header_with_type(elf_parse::SegmentType::GnuStack)?
{
if stack_hdr.memsz > 0 {
ss = ("explicit", stack_hdr.memsz as usize);
}
}
// Stack size must be page aligned.
stack_size = util::page_end(ss.1);
stack_vmo_name = format!("stack: {} {:#x?}", ss.0, stack_size);
}
if stack_size < self.min_stack_size {
stack_size = util::page_end(self.min_stack_size);
}
// Allocate the initial thread's stack, map it, and add a handle to the bootstrap message.
let stack_vmo_name =
CString::new(stack_vmo_name).expect("Stack VMO name must not contain interior nul's");
let stack_info = self.create_stack(stack_size, &stack_vmo_name)?;
// Build and send the primary bootstrap message.
let msg = processargs::Message::build(self.msg_contents)?;
msg.write(&bootstrap_wr).map_err(ProcessBuilderError::WriteBootstrapMessage)?;
// Explicitly destroy the reservation VMAR before returning so that we can be sure it is
// gone (so we don't end up with a process with half its address space gone).
if let Some(mut r) = reserve_vmar {
r.destroy().map_err(ProcessBuilderError::DestroyReservationVMAR)?;
}
Ok(BuiltProcess {
process: self.common.process,
root_vmar: self.common.root_vmar,
thread: self.common.thread,
entry: loaded_elf.entry,
stack: stack_info.stack_ptr,
stack_base: stack_info.stack_base,
stack_vmo: stack_info.stack_vmo,
bootstrap: bootstrap_rd,
vdso_base: vdso_base,
elf_base: loaded_elf.vmar_base,
elf_headers,
})
}
/// Build the bootstrap message for the dynamic linker, which uses the same processargs
/// protocol as the message for the main process but somewhat different contents.
///
/// The LoaderProxy provided must be ready to be converted to a Handle with into_channel(). In
/// other words, there must be no other active clones of the proxy, no open requests, etc. The
/// intention is that the user provides a handle only (perhaps wrapped in a ClientEnd) through
/// [ProcessBuilder::set_loader_service()], not a Proxy, so the library can be sure this
/// invariant is maintained and a failure is a library bug.
fn build_linker_message(
&self,
ldsvc: fldsvc::LoaderProxy,
executable: zx::Vmo,
loaded_vmar: zx::Vmar,
) -> Result<processargs::Message, ProcessBuilderError> {
// Don't need to use the ldsvc.Loader anymore; turn it back into into a raw handle so
// we can pass it along in the dynamic linker bootstrap message.
let ldsvc_hnd =
ldsvc.into_channel().expect("Failed to get channel from LoaderProxy").into_zx_channel();
// The linker message only needs a subset of argv and envvars.
let args = extract_ld_arguments(&self.msg_contents.args);
let environment_vars =
extract_ld_environment_variables(&self.msg_contents.environment_vars);
let mut linker_msg_contents = processargs::MessageContents {
// Argument strings are sent to the linker so that it can use argv[0] in messages it
// prints.
args,
// Environment variables are sent to the linker so that it can see vars like LD_DEBUG.
environment_vars,
// Process namespace is not set up or used in the linker.
namespace_paths: vec![],
// Loader message includes a few special handles needed to do its job, plus a set of
// handles common to both messages which are generated by this library.
handles: vec![
StartupHandle {
handle: ldsvc_hnd.into_handle(),
info: HandleInfo::new(HandleType::LdsvcLoader, 0),
},
StartupHandle {
handle: executable.into_handle(),
info: HandleInfo::new(HandleType::ExecutableVmo, 0),
},
StartupHandle {
handle: loaded_vmar.into_handle(),
info: HandleInfo::new(HandleType::LoadedVmar, 0),
},
],
};
self.common.add_to_message(&mut linker_msg_contents)?;
Ok(processargs::Message::build(linker_msg_contents)?)
}
/// Load the vDSO VMO into the process's address space and a handle to it to the bootstrap
/// message. If a vDSO VMO is provided, loads that one, otherwise loads the default system
/// vDSO. Returns the base address that the vDSO was mapped into.
fn load_vdso(&mut self) -> Result<usize, ProcessBuilderError> {
let vdso = match self.non_default_vdso.take() {
Some(vmo) => Ok(vmo),
None => get_system_vdso_vmo(),
}?;
let vdso_headers = elf_parse::Elf64Headers::from_vmo(&vdso)?;
let loaded_vdso = elf_load::load_elf(&vdso, &vdso_headers, &self.common.root_vmar)?;
self.msg_contents.handles.push(StartupHandle {
handle: vdso.into_handle(),
info: HandleInfo::new(HandleType::VdsoVmo, 0),
});
Ok(loaded_vdso.vmar_base)
}
/// Allocate the initial thread's stack, map it, and add a handle to the bootstrap message.
/// Returns the initial stack pointer for the process.
///
/// Note that launchpad supported not allocating a stack at all, but that only happened if an
/// explicit stack size of 0 is set. ProcessBuilder does not support overriding the stack size
/// so a stack is always created.
fn create_stack(
&mut self,
stack_size: usize,
vmo_name: &CStr,
) -> Result<StackInfo, ProcessBuilderError> {
let stack_vmo = zx::Vmo::create(stack_size as u64).map_err(|s| {
ProcessBuilderError::GenericStatus("Failed to create VMO for initial thread stack", s)
})?;
stack_vmo
.set_name(&vmo_name)
.map_err(|s| ProcessBuilderError::GenericStatus("Failed to set stack VMO name", s))?;
let stack_flags = zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE;
let stack_base =
self.common.root_vmar.map(0, &stack_vmo, 0, stack_size, stack_flags).map_err(|s| {
ProcessBuilderError::GenericStatus("Failed to map initial stack", s)
})?;
let stack_ptr = compute_initial_stack_pointer(stack_base, stack_size);
let dup_stack_vmo = stack_vmo.duplicate_handle(zx::Rights::SAME_RIGHTS).map_err(|s| {
ProcessBuilderError::GenericStatus("Failed to duplicate initial stack", s)
})?;
// Pass the stack VMO to the process. Our protocol with the new process is that we warrant
// that this is the VMO from which the initial stack is mapped and that we've exactly
// mapped the entire thing, so vm_object_get_size on this in concert with the initial SP
// value tells it the exact bounds of its stack.
self.msg_contents.handles.push(StartupHandle {
handle: dup_stack_vmo.into_handle(),
info: HandleInfo::new(HandleType::StackVmo, 0),
});
Ok(StackInfo { stack_ptr, stack_base, stack_vmo })
}
}
/// Calculate the size of the initial stack to allocate for the dynamic linker, based on the given
/// processargs message contents.
///
/// The initial stack is used just for startup work in the dynamic linker and to hold the bootstrap
/// message, so we only attempt to make it only as big as needed. The size returned is based on the
/// stack space needed to read the bootstrap message with zx_channel_read, and thus includes the
/// message data itself plus the size of the handles (i.e. the size of N zx_handle_t's).
///
/// This also allows the caller to specify an number of "extra handles" to factor into the size
/// calculation. This allows the size to be calculated before all the real handles have been added
/// to the contents, for example if the size is needed to create those handles.
fn calculate_initial_linker_stack_size(
msg_contents: &mut processargs::MessageContents,
extra_handles: usize,
) -> Result<usize, ProcessBuilderError> {
// Add N placeholder handles temporarily to factor in the size of handles that are not yet
// added to the message contents.
msg_contents.handles.extend(
iter::repeat_with(|| StartupHandle {
handle: zx::Handle::invalid(),
info: HandleInfo::new(HandleType::User0, 0),
})
.take(extra_handles),
);
// Include both the message data size and the size of the handles since we're calculating the
// stack space required to read the message.
let num_handles = msg_contents.handles.len();
let msg_stack_size = processargs::Message::calculate_size(msg_contents)?
+ num_handles * mem::size_of::<zx::sys::zx_handle_t>();
msg_contents.handles.truncate(num_handles - extra_handles);
// PTHREAD_STACK_MIN is defined by the C library in
// //zircon/third_party/ulib/musl/include/limits.h. It is tuned enough to cover the dynamic
// linker and C library startup code's stack usage (up until the point it switches to its own
// stack in __libc_start_main), but leave a little space so for small bootstrap message sizes
// the stack needs only one page.
const PTHREAD_STACK_MIN: usize = 3072;
Ok(util::page_end(msg_stack_size + PTHREAD_STACK_MIN))
}
/// Extract only the arguments that are needed for a linker message.
fn extract_ld_arguments(arguments: &[CString]) -> Vec<CString> {
let mut extracted = vec![];
if let Some(argument) = arguments.get(0) {
extracted.push(argument.clone())
}
extracted
}
/// Extract only the environment variables that are needed for a linker message.
fn extract_ld_environment_variables(envvars: &[CString]) -> Vec<CString> {
let prefixes = ["LD_DEBUG=", "LD_TRACE="];
let mut extracted = vec![];
for envvar in envvars {
for prefix in &prefixes {
let envvar_bytes: &[u8] = envvar.to_bytes();
let prefix_bytes: &[u8] = prefix.as_bytes();
if envvar_bytes.starts_with(prefix_bytes) {
extracted.push(envvar.clone());
continue;
}
}
}
extracted
}
impl CommonMessageHandles {
/// Returns a vector of processargs message handles created by this library which are common to
/// both the linker and main messages, duplicating handles as needed.
fn add_to_message(
&self,
msg: &mut processargs::MessageContents,
) -> Result<(), ProcessBuilderError> {
let handles: &[(zx::HandleRef<'_>, &str, HandleType)] = &[
(self.process.as_handle_ref(), "Failed to dup process handle", HandleType::ProcessSelf),
(self.root_vmar.as_handle_ref(), "Failed to dup VMAR handle", HandleType::RootVmar),
(self.thread.as_handle_ref(), "Failed to dup thread handle", HandleType::ThreadSelf),
];
for (handle, err_str, handle_type) in handles {
let dup = handle
.duplicate(zx::Rights::SAME_RIGHTS)
.map_err(|s| ProcessBuilderError::GenericStatus(err_str, s))?;
msg.handles.push(StartupHandle { handle: dup, info: HandleInfo::new(*handle_type, 0) });
}
Ok(())
}
}
/// Returns an owned VMO handle to the system vDSO ELF image, duplicated from the handle provided
/// to this process through its own processargs bootstrap message.
fn get_system_vdso_vmo() -> Result<zx::Vmo, ProcessBuilderError> {
lazy_static! {
static ref VDSO_VMO: zx::Vmo = {
zx::Vmo::from(
fuchsia_runtime::take_startup_handle(HandleInfo::new(HandleType::VdsoVmo, 0))
.expect("Failed to take VDSO VMO startup handle"),
)
};
}
let vdso_dup = VDSO_VMO
.duplicate_handle(zx::Rights::SAME_RIGHTS)
.map_err(|s| ProcessBuilderError::GenericStatus("Failed to dup vDSO VMO handle", s))?;
Ok(vdso_dup)
}
// Copied from //zircon/system/ulib/elf-psabi/include/lib/elf-psabi/sp.h, must be kept in sync with
// that.
fn compute_initial_stack_pointer(base: usize, size: usize) -> usize {
// Assume stack grows down.
let mut sp = base.checked_add(size).expect("Overflow in stack pointer calculation");
// The x86-64 and AArch64 ABIs require 16-byte alignment.
// The 32-bit ARM ABI only requires 8-byte alignment, but 16-byte alignment is preferable for
// NEON so use it there too.
sp &= 16usize.wrapping_neg();
// The x86-64 ABI requires %rsp % 16 = 8 on entry. The zero word at (%rsp) serves as the
// return address for the outermost frame.
#[cfg(target_arch = "x86_64")]
{
sp -= 8;
}
// The ARMv7 and ARMv8 ABIs both just require that SP be aligned, so just catch unknown archs.
#[cfg(not(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64")))]
{
compile_error!("Unknown target_arch");
}
sp
}
/// Load the dynamic linker/loader specified in the PT_INTERP header via the fuchsia.ldsvc.Loader
/// handle.
async fn get_dynamic_linker<'a>(
ldsvc: &'a fldsvc::LoaderProxy,
executable: &'a zx::Vmo,
interp_hdr: &'a elf_parse::Elf64ProgramHeader,
) -> Result<zx::Vmo, ProcessBuilderError> {
// Read the dynamic linker name from the main VMO, based on the PT_INTERP header.
let mut interp = vec![0u8; interp_hdr.filesz as usize];
executable
.read(&mut interp[..], interp_hdr.offset as u64)
.map_err(|s| ProcessBuilderError::GenericStatus("Failed to read from VMO", s))?;
// Trim null terminator included in filesz.
match interp.pop() {
Some(b'\0') => Ok(()),
_ => Err(ProcessBuilderError::InvalidInterpHeader(anyhow!("Missing null terminator"))),
}?;
let interp_str = std::str::from_utf8(&interp)
.context("Invalid UTF8")
.map_err(ProcessBuilderError::InvalidInterpHeader)?;
// Retrieve the dynamic linker as a VMO from fuchsia.ldsvc.Loader
const LDSO_LOAD_TIMEOUT_SEC: i64 = 30;
let load_fut = ldsvc
.load_object(interp_str)
.map_err(ProcessBuilderError::LoadDynamicLinker)
.on_timeout(fasync::Time::after(LDSO_LOAD_TIMEOUT_SEC.seconds()), || {
Err(ProcessBuilderError::LoadDynamicLinkerTimeout())
});
let (status, ld_vmo) = load_fut.await?;
zx::Status::ok(status).map_err(|s| {
ProcessBuilderError::GenericStatus(
"Failed to load dynamic linker from fuchsia.ldsvc.Loader",
s,
)
})?;
Ok(ld_vmo.ok_or(ProcessBuilderError::GenericStatus(
"load_object status was OK but no VMO",
zx::Status::INTERNAL,
))?)
}
impl BuiltProcess {
/// Start an already built process.
///
/// This is a simple wrapper around the [zx_process_start] syscall that consumes the handles
/// and data in the BuiltProcess struct as needed.
///
/// [zx_process_start]: https://fuchsia.dev/fuchsia-src/reference/syscalls/process_start.md
pub fn start(self) -> Result<zx::Process, ProcessBuilderError> {
self.process
.start(
&self.thread,
self.entry,
self.stack,
self.bootstrap.into_handle(),
self.vdso_base,
)
.map_err(ProcessBuilderError::ProcessStart)?;
Ok(self.process)
}
}
struct ReservationVmar(Option<zx::Vmar>);
impl ReservationVmar {
/// Reserve the lower half of the address space of the given VMAR by allocating another VMAR.
///
/// The VMAR wrapped by this reservation is automatically destroyed when the reservation
/// is dropped.
fn reserve_low_address_space(vmar: &zx::Vmar) -> Result<ReservationVmar, ProcessBuilderError> {
let info = vmar
.info()
.map_err(|s| ProcessBuilderError::GenericStatus("Failed to get VMAR info", s))?;
// Reserve the lower half of the full address space, not just half of the VMAR length.
// (base+len) represents the full address space, assuming this is used with a root VMAR and
// length extends to the end of the address space, including a region the kernel reserves
// at the start of the space.
let reserve_size = util::page_end((info.base + info.len) / 2) - info.base;
let (reserve_vmar, reserve_base) =
vmar.allocate(0, reserve_size, zx::VmarFlags::SPECIFIC).map_err(|s| {
ProcessBuilderError::GenericStatus("Failed to allocate reservation VMAR", s)
})?;
assert_eq!(reserve_base, info.base, "Reservation VMAR allocated at wrong address");
Ok(ReservationVmar(Some(reserve_vmar)))
}
/// Destroy the reservation. The reservation is also automatically destroyed when
/// ReservationVmar is dropped.
///
/// VMARs are not destroyed when the handle is closed (by dropping), so we must explicit destroy
/// it to release the reservation and allow the created process to use the full address space.
fn destroy(&mut self) -> Result<(), zx::Status> {
match self.0.take() {
Some(vmar) => {
// This is safe because there are no mappings in the region and it is not a region
// in the current process.
unsafe { vmar.destroy() }
}
None => Ok(()),
}
}
}
// This is probably unnecessary, but it feels wrong to rely on the side effect of the process's
// root VMAR going away. We explicitly call destroy if ProcessBuilder.build() succeeds and returns
// a BuiltProcess, in which case this will do nothing, and if build() fails then the new process
// and its root VMAR will get cleaned up along with this sub-VMAR.
impl Drop for ReservationVmar {
fn drop(&mut self) {
self.destroy().unwrap_or_else(|e| warn!("Failed to destroy reservation VMAR: {}", e));
}
}
/// Error type returned by ProcessBuilder methods.
#[allow(missing_docs)] // No docs on individual error variants.
#[derive(Error, Debug)]
pub enum ProcessBuilderError {
#[error("{}", _0)]
InvalidArg(String),
#[error("{}", _0)]
BadHandle(&'static str),
#[error("Failed to create process: {}", _0)]
CreateProcess(zx::Status),
#[error("Failed to create thread: {}", _0)]
CreateThread(zx::Status),
#[error("Failed to start process: {}", _0)]
ProcessStart(zx::Status),
#[error("Failed to parse ELF: {}", _0)]
ElfParse(#[from] elf_parse::ElfParseError),
#[error("Failed to load ELF: {}", _0)]
ElfLoad(#[from] elf_load::ElfLoadError),
#[error("{}", _0)]
Processargs(#[from] processargs::ProcessargsError),
#[error("{}: {}", _0, _1)]
GenericStatus(&'static str, zx::Status),
#[error("{}: {}", _0, _1)]
Internal(&'static str, #[source] anyhow::Error),
#[error("Invalid PT_INTERP header: {}", _0)]
InvalidInterpHeader(#[source] anyhow::Error),
#[error("Failed to build process with dynamic ELF, missing fuchsia.ldsvc.Loader handle")]
LoaderMissing(),
#[error("Failed to load dynamic linker from fuchsia.ldsvc.Loader: {}", _0)]
LoadDynamicLinker(#[source] fidl::Error),
#[error("Timed out loading dynamic linker from fuchsia.ldsvc.Loader")]
LoadDynamicLinkerTimeout(),
#[error("Failed to write bootstrap message to channel: {}", _0)]
WriteBootstrapMessage(zx::Status),
#[error("Failed to destroy reservation VMAR: {}", _0)]
DestroyReservationVMAR(zx::Status),
}
impl ProcessBuilderError {
/// Returns an appropriate zx::Status code for the given error.
pub fn as_zx_status(&self) -> zx::Status {
match self {
ProcessBuilderError::InvalidArg(_)
| ProcessBuilderError::InvalidInterpHeader(_)
| ProcessBuilderError::LoaderMissing() => zx::Status::INVALID_ARGS,
ProcessBuilderError::BadHandle(_) => zx::Status::BAD_HANDLE,
ProcessBuilderError::CreateProcess(s)
| ProcessBuilderError::CreateThread(s)
| ProcessBuilderError::ProcessStart(s)
| ProcessBuilderError::GenericStatus(_, s)
| ProcessBuilderError::WriteBootstrapMessage(s)
| ProcessBuilderError::DestroyReservationVMAR(s) => *s,
ProcessBuilderError::ElfParse(e) => e.as_zx_status(),
ProcessBuilderError::ElfLoad(e) => e.as_zx_status(),
ProcessBuilderError::Processargs(e) => e.as_zx_status(),
ProcessBuilderError::Internal(_, _) => zx::Status::INTERNAL,
ProcessBuilderError::LoadDynamicLinker(_) => zx::Status::NOT_FOUND,
ProcessBuilderError::LoadDynamicLinkerTimeout() => zx::Status::TIMED_OUT,
}
}
}
#[cfg(test)]
mod tests {
use {
super::*,
anyhow::Error,
fidl::endpoints::{ProtocolMarker, Proxy, ServerEnd},
fidl_fuchsia_io as fio,
fidl_test_processbuilder::{UtilMarker, UtilProxy},
fuchsia_async as fasync,
matches::assert_matches,
std::mem,
vfs::{
directory::entry::DirectoryEntry, execution_scope::ExecutionScope,
file::vmo::read_only_const, pseudo_directory,
},
zerocopy::LayoutVerified,
};
extern "C" {
fn dl_clone_loader_service(handle: *mut zx::sys::zx_handle_t) -> zx::sys::zx_status_t;
}
// Clone the current loader service to provide to the new test processes.
fn clone_loader_service() -> Result<ClientEnd<fldsvc::LoaderMarker>, zx::Status> {
let mut raw = 0;
let status = unsafe { dl_clone_loader_service(&mut raw) };
zx::Status::ok(status)?;
let handle = unsafe { zx::Handle::from_raw(raw) };
Ok(ClientEnd::new(zx::Channel::from(handle)))
}
fn connect_util(client: &zx::Channel) -> Result<UtilProxy, Error> {
let (proxy, server) = zx::Channel::create()?;
fdio::service_connect_at(&client, UtilMarker::NAME, server)
.context("failed to connect to util service")?;
Ok(UtilProxy::from_channel(fasync::Channel::from_channel(proxy)?))
}
fn create_test_util_builder() -> Result<ProcessBuilder, Error> {
const TEST_UTIL_BIN: &'static str = "/pkg/bin/process_builder_test_util";
let file =
fdio::open_fd(TEST_UTIL_BIN, fio::OPEN_RIGHT_READABLE | fio::OPEN_RIGHT_EXECUTABLE)?;
let vmo = fdio::get_vmo_exec_from_file(&file)?;
let job = fuchsia_runtime::job_default();
let procname = CString::new(TEST_UTIL_BIN.to_owned())?;
Ok(ProcessBuilder::new(&procname, &job, vmo)?)
}
// Common builder setup for all tests that start a test util process.
fn setup_test_util_builder(set_loader: bool) -> Result<(ProcessBuilder, UtilProxy), Error> {
let mut builder = create_test_util_builder()?;
if set_loader {
builder.add_handles(vec![StartupHandle {
handle: clone_loader_service()?.into_handle(),
info: HandleInfo::new(HandleType::LdsvcLoader, 0),
}])?;
}
let (dir_client, dir_server) = zx::Channel::create()?;
builder.add_handles(vec![StartupHandle {
handle: dir_server.into_handle(),
info: HandleInfo::new(HandleType::DirectoryRequest, 0),
}])?;
let proxy = connect_util(&dir_client)?;
Ok((builder, proxy))
}
fn check_process_running(process: &zx::Process) -> Result<(), Error> {
let info = process.info()?;
const STARTED: u32 = zx::ProcessInfoFlags::STARTED.bits();
assert_matches!(
info,
zx::ProcessInfo {
return_code: 0,
start_time,
flags: STARTED,
} if start_time > 0
);
Ok(())
}
async fn check_process_exited_ok(process: &zx::Process) -> Result<(), Error> {
fasync::OnSignals::new(process, zx::Signals::PROCESS_TERMINATED).await?;
let info = process.info()?;
const STARTED_AND_EXITED: u32 =
zx::ProcessInfoFlags::STARTED.bits() | zx::ProcessInfoFlags::EXITED.bits();
assert_matches!(
info,
zx::ProcessInfo {
return_code: 0,
start_time,
flags: STARTED_AND_EXITED,
} if start_time > 0
);
Ok(())
}
// These start_util_with_* tests cover the most common paths through ProcessBuilder and
// exercise most of its functionality. They verify that we can create a new process for a
// "standard" dynamically linked executable and that we can provide arguments, environment
// variables, namespace entries, and other handles to it through the startup processargs
// message. The test communicates with the test util process it creates over a test-only FIDL
// API to verify that arguments and whatnot were passed correctly.
#[fasync::run_singlethreaded(test)]
async fn start_util_with_args() -> Result<(), Error> {
let test_args = vec!["arg0", "arg1", "arg2"];
let test_args_cstr =
test_args.iter().map(|s| CString::new(s.clone())).collect::<Result<_, _>>()?;
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.add_arguments(test_args_cstr);
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// Use the util protocol to confirm that the new process was set up correctly. A successful
// connection to the util validates that handles are passed correctly to the new process,
// since the DirectoryRequest handle made it.
let proc_args = proxy.get_arguments().await.context("failed to get args from util")?;
assert_eq!(proc_args, test_args);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
#[fasync::run_singlethreaded(test)]
async fn start_util_with_huge_args() -> Result<(), Error> {
// This test is partially designed to probe the stack usage of
// code processing the initial loader message. Such processing
// is on a stack of limited size, a few pages, and well
// smaller than a maximally large channel packet. Each
// instance of "arg" takes 4 bytes (counting the separating
// '\0' byte), so let's send 10k of them to be well larger
// than the initial stack but well within the 64k channel size.
let test_args = vec!["arg"; 10 * 1000];
let test_args_cstr =
test_args.iter().map(|s| CString::new(s.clone())).collect::<Result<_, _>>()?;
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.add_arguments(test_args_cstr);
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// Use the util protocol to confirm that the new process was set up correctly. A successful
// connection to the util validates that handles are passed correctly to the new process,
// since the DirectoryRequest handle made it.
// We can't use get_arguments() here because the FIDL response will be bigger than the
// maximum message size[1] and cause the process to crash. Instead, we just check the number
// of environment variables and assume that if that's correct we're good to go.
// Size of each vector entry: (length = 8, pointer = 8) = 16 + (string size = 8) = 24
// Message size = (10k * vector entry size) = 240,000 > 65,536
let proc_args =
proxy.get_argument_count().await.context("failed to get arg count from util")?;
assert_eq!(proc_args, test_args.len() as u64);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
// Verify that the lifecycle channel can be passed through the bootstrap
// channel. This test checks by creating a channel, passing it through,
// asking the remote process for the lifecycle channel's koid, and then
// comparing that koid to the one the test recorded.
#[fasync::run_singlethreaded(test)]
async fn start_util_with_lifecycle_channel() -> Result<(), Error> {
let (mut builder, proxy) = setup_test_util_builder(true)?;
let (lifecycle_server, _lifecycle_client) = zx::Channel::create()?;
let koid = lifecycle_server
.as_handle_ref()
.basic_info()
.expect("error getting server handle info")
.koid
.raw_koid();
builder.add_handles(vec![StartupHandle {
handle: lifecycle_server.into_handle(),
info: HandleInfo::new(HandleType::Lifecycle, 0),
}])?;
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// Use the util protocol to confirm that the new process received the
// lifecycle channel
let reported_koid =
proxy.get_lifecycle_koid().await.context("failed getting koid from util")?;
assert_eq!(koid, reported_koid);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
// Verify that if no lifecycle channel is sent via the bootstrap channel
// that the remote process reports ZX_KOID_INVALID for the channel koid.
#[fasync::run_singlethreaded(test)]
async fn start_util_with_no_lifecycle_channel() -> Result<(), Error> {
let (builder, proxy) = setup_test_util_builder(true)?;
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// Use the util protocol to confirm that the new process received the
// lifecycle channel
let reported_koid =
proxy.get_lifecycle_koid().await.context("failed getting koid from util")?;
assert_eq!(zx::sys::ZX_KOID_INVALID, reported_koid);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
#[fasync::run_singlethreaded(test)]
async fn start_util_with_big_stack() -> Result<(), Error> {
const STACK_SIZE: usize = util::PAGE_SIZE * 10;
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.set_min_stack_size(STACK_SIZE);
let built = builder.build().await?;
assert!(built.stack_vmo.get_size()? >= STACK_SIZE as u64);
let process = built.start()?;
check_process_running(&process)?;
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
#[fasync::run_singlethreaded(test)]
async fn elf_headers() -> Result<(), Error> {
let (builder, _) = setup_test_util_builder(true)?;
let built = builder.build().await?;
assert!(
built.elf_headers.file_header().phnum
== built.elf_headers.program_headers().len() as u16
);
Ok(())
}
// Verify that a loader service handle is properly handled if passed directly to
// set_loader_service instead of through add_handles. Otherwise this test is identical to
// start_util_with_args.
#[fasync::run_singlethreaded(test)]
async fn set_loader_directly() -> Result<(), Error> {
let test_args = vec!["arg0", "arg1", "arg2"];
let test_args_cstr =
test_args.iter().map(|s| CString::new(s.clone())).collect::<Result<_, _>>()?;
let (mut builder, proxy) = setup_test_util_builder(false)?;
builder.set_loader_service(clone_loader_service()?)?;
builder.add_arguments(test_args_cstr);
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// Use the util protocol to confirm that the new process was set up correctly. A successful
// connection to the util validates that handles are passed correctly to the new process,
// since the DirectoryRequest handle made it.
let proc_args = proxy.get_arguments().await.context("failed to get args from util")?;
assert_eq!(proc_args, test_args);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
// Verify that a vDSO handle is properly handled if passed directly to set_vdso_vmo instead of
// relying on the default value.
// Note: There isn't a great way to tell here whether the vDSO VMO we passed in was used
// instead of the default (because the kernel only allows use of vDSOs that it created for
// security, so we can't make a fake vDSO with a different name or something), so that isn't
// checked explicitly. The failure tests below make sure we don't ignore the provided vDSO VMO
// completely.
#[fasync::run_singlethreaded(test)]
async fn set_vdso_directly() -> Result<(), Error> {
let test_args = vec!["arg0", "arg1", "arg2"];
let test_args_cstr =
test_args.iter().map(|s| CString::new(s.clone())).collect::<Result<_, _>>()?;
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.set_vdso_vmo(get_system_vdso_vmo()?);
builder.add_arguments(test_args_cstr);
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// Use the util protocol to confirm that the new process was set up correctly.
let proc_args = proxy.get_arguments().await.context("failed to get args from util")?;
assert_eq!(proc_args, test_args);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
// Verify that a vDSO handle is properly handled if passed directly to set_vdso_vmo instead of
// relying on the default value, this time by providing an invalid VMO (something that isn't
// ELF and will fail to parse). This also indirectly tests that the reservation VMAR cleanup
// happens properly by testing a failure after it has been created.
#[fasync::run_singlethreaded(test)]
async fn set_invalid_vdso_directly_fails() -> Result<(), Error> {
let bad_vdso = zx::Vmo::create(1)?;
let (mut builder, _) = setup_test_util_builder(true)?;
builder.set_vdso_vmo(bad_vdso);
let result = builder.build().await;
match result {
Err(ProcessBuilderError::ElfParse(ElfParseError::InvalidFileHeader(_))) => {}
Err(err) => {
panic!("Unexpected error type: {}", err);
}
Ok(_) => {
panic!("Unexpectedly succeeded to build process with invalid vDSO");
}
}
Ok(())
}
// Verify that a vDSO handle is properly handled if passed through add_handles instead of
// relying on the default value, this time by providing an invalid VMO (something that isn't
// ELF and will fail to parse). This also indirectly tests that the reservation VMAR cleanup
// happens properly by testing a failure after it has been created.
#[fasync::run_singlethreaded(test)]
async fn set_invalid_vdso_fails() -> Result<(), Error> {
let bad_vdso = zx::Vmo::create(1)?;
let (mut builder, _) = setup_test_util_builder(true)?;
builder.add_handles(vec![StartupHandle {
handle: bad_vdso.into_handle(),
info: HandleInfo::new(HandleType::VdsoVmo, 0),
}])?;
let result = builder.build().await;
match result {
Err(ProcessBuilderError::ElfParse(ElfParseError::InvalidFileHeader(_))) => {}
Err(err) => {
panic!("Unexpected error type: {}", err);
}
Ok(_) => {
panic!("Unexpectedly succeeded to build process with invalid vDSO");
}
}
Ok(())
}
#[fasync::run_singlethreaded(test)]
async fn start_util_with_env() -> Result<(), Error> {
let test_env = vec![("VAR1", "value2"), ("VAR2", "value2")];
let test_env_cstr = test_env
.iter()
.map(|v| CString::new(format!("{}={}", v.0, v.1)))
.collect::<Result<_, _>>()?;
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.add_environment_variables(test_env_cstr);
let process = builder.build().await?.start()?;
check_process_running(&process)?;
let proc_env = proxy.get_environment().await.context("failed to get env from util")?;
let proc_env_tuple: Vec<(&str, &str)> =
proc_env.iter().map(|v| (&*v.key, &*v.value)).collect();
assert_eq!(proc_env_tuple, test_env);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
#[fasync::run_singlethreaded(test)]
async fn start_util_with_huge_env() -> Result<(), Error> {
// This test is partially designed to probe the stack usage of
// code processing the initial loader message. Such processing
// is on a stack of limited size, a few pages, and well
// smaller than a maximally large channel packet. Each
// instance of "a=b" takes 4 bytes (counting the separating
// '\0' byte), so let's send 10k of them to be well larger
// than the initial stack but well within the 64k channel size.
let test_env = vec!["a=b"; 10 * 1000];
let test_env_cstr =
test_env.iter().map(|s| CString::new(s.clone())).collect::<Result<_, _>>()?;
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.add_environment_variables(test_env_cstr);
let process = builder.build().await?.start()?;
check_process_running(&process)?;
// We can't use get_environment() here because the FIDL response will be bigger than the
// maximum message size and cause the process to crash. Instead, we just check the number
// of environment variables and assume that if that's correct we're good to go.
let proc_env =
proxy.get_environment_count().await.context("failed to get env from util")?;
assert_eq!(proc_env, test_env.len() as u64);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
#[fasync::run_singlethreaded(test)]
async fn start_util_with_namespace_entries() -> Result<(), Error> {
let mut randbuf = [0; 8];
zx::cprng_draw(&mut randbuf);
let test_content1 = format!("test content 1 {}", u64::from_le_bytes(randbuf));
zx::cprng_draw(&mut randbuf);
let test_content2 = format!("test content 2 {}", u64::from_le_bytes(randbuf));
let test_content1_bytes = test_content1.clone().into_bytes();
let (dir1_server, dir1_client) = zx::Channel::create()?;
let dir_scope = ExecutionScope::new();
let dir1 = pseudo_directory! {
"test_file1" => read_only_const(test_content1_bytes.as_ref()),
};
dir1.open(
dir_scope.clone(),
fio::OPEN_RIGHT_READABLE,
fio::MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir1_server),
);
let test_content2_bytes = test_content2.clone().into_bytes();
let (dir2_server, dir2_client) = zx::Channel::create()?;
let dir2 = pseudo_directory! {
"test_file2" => read_only_const(test_content2_bytes.as_ref()),
};
dir2.open(
dir_scope.clone(),
fio::OPEN_RIGHT_READABLE,
fio::MODE_TYPE_DIRECTORY,
vfs::path::Path::dot(),
ServerEnd::new(dir2_server),
);
let (mut builder, proxy) = setup_test_util_builder(true)?;
builder.add_namespace_entries(vec![
NamespaceEntry { path: CString::new("/dir1")?, directory: ClientEnd::new(dir1_client) },
NamespaceEntry { path: CString::new("/dir2")?, directory: ClientEnd::new(dir2_client) },
])?;
let process = builder.build().await?.start()?;
check_process_running(&process)?;
let namespace_dump = proxy.dump_namespace().await.context("failed to dump namespace")?;
assert_eq!(namespace_dump, "/dir1, /dir1/test_file1, /dir2, /dir2/test_file2");
let dir1_contents =
proxy.read_file("/dir1/test_file1").await.context("failed to read file via util")?;
assert_eq!(dir1_contents, test_content1);
let dir2_contents =
proxy.read_file("/dir2/test_file2").await.context("failed to read file via util")?;
assert_eq!(dir2_contents, test_content2);
mem::drop(proxy);
check_process_exited_ok(&process).await?;
Ok(())
}
// Trying to start a dynamically linked process without providing a loader service should
// fail. This verifies that nothing is automatically cloning a loader.
#[fasync::run_singlethreaded(test)]
async fn start_util_with_no_loader_fails() -> Result<(), Error> {
let (builder, _) = setup_test_util_builder(false)?;
let result = builder.build().await;
match result {
Err(ProcessBuilderError::LoaderMissing()) => {}
Err(err) => {
panic!("Unexpected error type: {}", err);
}
Ok(_) => {
panic!("Unexpectedly succeeded to build process without loader");
}
}
Ok(())
}
// Checks that, for dynamically linked binaries, the lower half of the address space has been
// reserved for sanitizers.
#[fasync::run_singlethreaded(test)]
async fn verify_low_address_range_reserved() -> Result<(), Error> {
let (builder, _) = setup_test_util_builder(true)?;
let built = builder.build().await?;
// This ends up being the same thing ReservationVmar does, but it's not reused here so that
// this catches bugs or bad changes to ReservationVmar itself.
let info = built.root_vmar.info()?;
let lower_half_len = util::page_end((info.base + info.len) / 2) - info.base;
built
.root_vmar
.allocate(0, lower_half_len, zx::VmarFlags::SPECIFIC)
.context("Unable to allocate lower address range of new process")?;
Ok(())
}
// Parses the given channel message as a processargs message and returns the HandleInfo's
// contained in it.
fn parse_handle_info_from_message(message: &zx::MessageBuf) -> Result<Vec<HandleInfo>, Error> {
let bytes = message.bytes();
let header = LayoutVerified::<&[u8], processargs::MessageHeader>::new_from_prefix(bytes)
.ok_or(anyhow!("Failed to parse processargs header"))?
.0;
let offset = header.handle_info_off as usize;
let len = mem::size_of::<u32>() * message.n_handles();
let info_bytes = &bytes[offset..offset + len];
let raw_info = LayoutVerified::<&[u8], [u32]>::new_slice(info_bytes)
.ok_or(anyhow!("Failed to parse raw handle info"))?;
Ok(raw_info.iter().map(|raw| HandleInfo::try_from(*raw)).collect::<Result<_, _>>()?)
}
const LINKER_MESSAGE_HANDLES: &[HandleType] = &[
HandleType::ProcessSelf,
HandleType::ThreadSelf,
HandleType::RootVmar,
HandleType::LdsvcLoader,
HandleType::LoadedVmar,
HandleType::ExecutableVmo,
];
const MAIN_MESSAGE_HANDLES: &[HandleType] = &[
HandleType::ProcessSelf,
HandleType::ThreadSelf,
HandleType::RootVmar,
HandleType::VdsoVmo,
HandleType::StackVmo,
];
#[fasync::run_singlethreaded(test)]
async fn correct_handles_present() -> Result<(), Error> {
let mut builder = create_test_util_builder()?;
builder.set_loader_service(clone_loader_service()?)?;
let built = builder.build().await?;
for correct in &[LINKER_MESSAGE_HANDLES, MAIN_MESSAGE_HANDLES] {
let mut msg_buf = zx::MessageBuf::new();
built.bootstrap.read(&mut msg_buf)?;
let handle_info = parse_handle_info_from_message(&msg_buf)?;
assert_eq!(handle_info.len(), correct.len());
for correct_type in *correct {
// Should only be one of each of these handles present.
assert_eq!(
1,
handle_info.iter().filter(|info| &info.handle_type() == correct_type).count()
);
}
}
Ok(())
}
// Verify that [ProcessBuilder::add_handles()] rejects handle types that are added
// automatically by the builder.
#[fasync::run_singlethreaded(test)]
async fn add_handles_rejects_automatic_handle_types() -> Result<(), Error> {
// The VMO doesn't need to be valid since we're not calling build.
let vmo = zx::Vmo::create(1)?;
let job = fuchsia_runtime::job_default();
let procname = CString::new("dummy_name")?;
let mut builder = ProcessBuilder::new(&procname, &job, vmo)?;
// There's some duplicates between these slices but just checking twice is easier than
// deduping these.
for handle_type in LINKER_MESSAGE_HANDLES.iter().chain(MAIN_MESSAGE_HANDLES) {
if *handle_type == HandleType::LdsvcLoader {
// Skip LdsvcLoader, which is required in the linker message but is not added
// automatically. The user must supply it.
continue;
}
if *handle_type == HandleType::VdsoVmo {
// Skip VdsoVmo, which may be supplied by the user.
continue;
}
// Another dummy VMO, just to have a valid handle.
let dummy_vmo = zx::Vmo::create(1)?;
let result = builder.add_handles(vec![StartupHandle {
handle: dummy_vmo.into_handle(),
info: HandleInfo::new(*handle_type, 0),
}]);
match result {
Err(ProcessBuilderError::InvalidArg(_)) => {}
Err(err) => {
panic!("Unexpected error type, should be invalid arg: {}", err);
}
Ok(_) => {
panic!("add_handle unexpectedly succeeded for type {:?}", handle_type);
}
}
}
Ok(())
}
// Verify that invalid handles are correctly rejected.
#[fasync::run_singlethreaded(test)]
async fn rejects_invalid_handles() -> Result<(), Error> {
let invalid = || zx::Handle::invalid();
let assert_invalid_arg = |result| match result {
Err(ProcessBuilderError::BadHandle(_)) => {}
Err(err) => {
panic!("Unexpected error type, should be BadHandle: {}", err);
}
Ok(_) => {
panic!("API unexpectedly accepted invalid handle");
}
};
// The VMO doesn't need to be valid since we're not calling build with this.
let vmo = zx::Vmo::create(1)?;
let job = fuchsia_runtime::job_default();
let procname = CString::new("dummy_name")?;
assert_invalid_arg(ProcessBuilder::new(&procname, &invalid().into(), vmo).map(|_| ()));
assert_invalid_arg(ProcessBuilder::new(&procname, &job, invalid().into()).map(|_| ()));
let (mut builder, _) = setup_test_util_builder(true)?;
assert_invalid_arg(builder.set_loader_service(invalid().into()));
assert_invalid_arg(builder.add_handles(vec![StartupHandle {
handle: invalid().into(),
info: HandleInfo::new(HandleType::User0, 0),
}]));
assert_invalid_arg(builder.add_handles(vec![StartupHandle {
handle: invalid().into(),
info: HandleInfo::new(HandleType::User0, 0),
}]));
assert_invalid_arg(builder.add_namespace_entries(vec![NamespaceEntry {
path: CString::new("/dir")?,
directory: invalid().into(),
}]));
Ok(())
}
#[fasync::run_singlethreaded]
#[test]
async fn start_static_pie_binary() -> Result<(), Error> {
const TEST_BIN: &'static str = "/pkg/bin/static_pie_test_util";
let file = fdio::open_fd(TEST_BIN, fio::OPEN_RIGHT_READABLE | fio::OPEN_RIGHT_EXECUTABLE)?;
let vmo = fdio::get_vmo_exec_from_file(&file)?;
let job = fuchsia_runtime::job_default();
let procname = CString::new(TEST_BIN.to_owned())?;
let mut builder = ProcessBuilder::new(&procname, &job, vmo)?;
// We pass the program a channel with handle type User0 which we send a message on and
// expect it to echo back the message on the same channel.
let (local, remote) = zx::Channel::create()?;
builder.add_handles(vec![StartupHandle {
handle: remote.into_handle(),
info: HandleInfo::new(HandleType::User0, 0),
}])?;
let mut randbuf = [0; 8];
zx::cprng_draw(&mut randbuf);
let test_message = format!("test content 1 {}", u64::from_le_bytes(randbuf)).into_bytes();
local.write(&test_message, &mut vec![])?;
// Start process and wait for channel to have a message to read or be closed.
builder.build().await?.start()?;
let signals = fasync::OnSignals::new(
&local,
zx::Signals::CHANNEL_READABLE | zx::Signals::CHANNEL_PEER_CLOSED,
)
.await?;
assert!(signals.contains(zx::Signals::CHANNEL_READABLE));
let mut echoed = zx::MessageBuf::new();
local.read(&mut echoed)?;
assert_eq!(echoed.bytes(), test_message.as_slice());
assert_eq!(echoed.n_handles(), 0);
Ok(())
}
}
| 43.840048 | 128 | 0.634008 |
0838a205262d1ea0c64403b0d19f16d659a00825
| 2,432 |
#![allow(dead_code)]
mod consts;
use consts::*;
mod lib;
use lib::*;
use macroquad::prelude::*;
#[macroquad::main("Milkshake")]
async fn main() {
// Make a Milkshake
let mut milkshake = Milkshake::new(
INITIAL_X_POS,
INITIAL_Y_POS,
X_SPEED,
Y_SPEED,
LASER_SHOOT,
LASER_DISTANCE,
LASER_WIDTH,
LASER_SPEED,
);
// Milkshake picture
let picture = Texture2D::from_file_with_format(
include_bytes!("../assets/milkshake.png"),
Some(ImageFormat::Png),
);
// Background image
let background = Texture2D::from_file_with_format(
include_bytes!("../assets/background.png"),
Some(ImageFormat::Png),
);
// Laser image
let laser_beam = Texture2D::from_file_with_format(
include_bytes!("../assets/laser_beam.png"),
Some(ImageFormat::Png),
);
// Define the font
let font_bytes = include_bytes!("../assets/AvenirNextLTPro-Regular.otf");
// Game loop
loop {
// Set the screen color
clear_background(WHITE);
// Draw the background
draw_texture_ex(
background,
0.,
0.,
WHITE,
DrawTextureParams {
dest_size: Some(vec2(screen_width(), screen_height())),
..Default::default()
},
);
// Draw the Milkshake
draw_texture(
picture,
milkshake.pos.x,
screen_height() - milkshake.pos.y,
WHITE,
);
// Possible actions
milkshake.jump(INITIAL_Y_POS, JUMP_HEIGHT, GRAVITY);
milkshake.shoot(LASER_DISTANCE, laser_beam, LASER_SPEED);
// Keypresses
if is_key_down(KeyCode::Right) || is_key_down(KeyCode::L) {
milkshake.pos.x += milkshake.speed.x;
} else if is_key_down(KeyCode::Left) || is_key_down(KeyCode::H) {
milkshake.pos.x -= milkshake.speed.x;
} else if is_key_pressed(KeyCode::Up) || is_key_down(KeyCode::K) {
milkshake.jump_state.up = true;
} else if is_key_down(KeyCode::Space) {
milkshake.laser.shoot = true;
} else if is_key_down(KeyCode::M) {
show_help(font_bytes, HELP_OFFSET);
} else if is_key_down(KeyCode::Q) {
break;
}
// Get to the next frame
next_frame().await;
}
}
| 26.150538 | 77 | 0.5625 |
e21074ff185d46ea8d0888c5f6eb7fee05a19db5
| 62,143 |
use crate::hal::command::BufferCopy;
use crate::hal::format::Aspects;
use crate::hal::image::{Access, Kind, Layout, Level, SubresourceRange, Tiling};
use crate::hal::memory::Segment;
use crate::hal::memory::{Barrier, Dependencies};
use crate::mem::{Allocator, Buffer, Memory};
use crate::{hal, instances::SceneList, CmdBufferPool, DeviceHandle, Queue};
use crate::instances::RenderBuffers;
use crate::mem::image::{Texture, TextureDescriptor, TextureView, TextureViewDescriptor};
use crate::skinning::SkinList;
use glam::*;
use hal::*;
use hal::{
command::{self, CommandBuffer},
device::Device,
window::Extent2D,
};
use pass::Subpass;
use pso::*;
use rfw_scene::bvh::AABB;
use rfw_scene::{AnimVertexData, DeviceMaterial, FrustrumG, VertexData, VertexMesh};
use shared::BytesConversion;
use std::{borrow::Borrow, mem::ManuallyDrop, ptr, sync::Arc};
use AttributeDesc;
use VertexBufferDesc;
pub mod anim;
#[derive(Debug, Clone)]
pub struct GfxMesh<B: hal::Backend> {
pub id: usize,
pub buffer: Option<Arc<Buffer<B>>>,
pub sub_meshes: Vec<VertexMesh>,
pub vertices: usize,
pub bounds: AABB,
}
impl<B: hal::Backend> Default for GfxMesh<B> {
fn default() -> Self {
Self {
id: 0,
buffer: None,
sub_meshes: Vec::new(),
vertices: 0,
bounds: AABB::empty(),
}
}
}
#[allow(dead_code)]
impl<B: hal::Backend> GfxMesh<B> {
pub fn default_id(id: usize) -> Self {
Self {
id,
..Self::default()
}
}
pub fn valid(&self) -> bool {
self.buffer.is_some()
}
}
#[derive(Debug)]
pub struct RenderPipeline<B: hal::Backend> {
device: DeviceHandle<B>,
allocator: Allocator<B>,
desc_pool: ManuallyDrop<B::DescriptorPool>,
desc_set: B::DescriptorSet,
set_layout: ManuallyDrop<B::DescriptorSetLayout>,
pipeline: ManuallyDrop<B::GraphicsPipeline>,
anim_pipeline: ManuallyDrop<B::GraphicsPipeline>,
pipeline_layout: ManuallyDrop<B::PipelineLayout>,
render_pass: ManuallyDrop<B::RenderPass>,
uniform_buffer: Buffer<B>,
depth_image: ManuallyDrop<B::Image>,
depth_image_view: ManuallyDrop<B::ImageView>,
depth_memory: Memory<B>,
textures: Vec<Texture<B>>,
texture_views: Vec<TextureView<B>>,
cmd_pool: CmdBufferPool<B>,
queue: Queue<B>,
mat_desc_pool: ManuallyDrop<B::DescriptorPool>,
mat_set_layout: ManuallyDrop<B::DescriptorSetLayout>,
mat_sets: Vec<B::DescriptorSet>,
material_buffer: Buffer<B>,
tex_sampler: ManuallyDrop<B::Sampler>,
}
impl<B: hal::Backend> RenderPipeline<B> {
const DEPTH_FORMAT: hal::format::Format = hal::format::Format::D32Sfloat;
const UNIFORM_CAMERA_SIZE: usize = std::mem::size_of::<Mat4>() * 2
+ std::mem::size_of::<[u32; 4]>()
+ std::mem::size_of::<Vec4>();
pub fn new(
device: DeviceHandle<B>,
allocator: Allocator<B>,
queue: Queue<B>,
format: hal::format::Format,
width: u32,
height: u32,
scene_list: &SceneList<B>,
skins: &SkinList<B>,
) -> Self {
let set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
&[pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::VERTEX
| pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
}],
&[],
)
}
.expect("Can't create descriptor set layout"),
);
let mat_set_layout = ManuallyDrop::new(
unsafe {
device.create_descriptor_set_layout(
&[
pso::DescriptorSetLayoutBinding {
binding: 0,
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 1,
ty: pso::DescriptorType::Sampler,
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 2,
ty: pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 3,
ty: pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 4,
ty: pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 5,
ty: pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
pso::DescriptorSetLayoutBinding {
binding: 6,
ty: pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 1,
stage_flags: pso::ShaderStageFlags::FRAGMENT,
immutable_samplers: false,
},
],
&[],
)
}
.expect("Can't create descriptor set layout"),
);
let mut desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
1, // sets
&[pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 1,
}],
pso::DescriptorPoolCreateFlags::empty(),
)
}
.expect("Can't create descriptor pool"),
);
let mat_desc_pool = ManuallyDrop::new(
unsafe {
device.create_descriptor_pool(
256, // sets
&[
pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: 256,
},
pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Sampler,
count: 256,
},
pso::DescriptorRangeDesc {
ty: pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: 256 * 5,
},
],
pso::DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET,
)
}
.expect("Can't create descriptor pool"),
);
let desc_set = unsafe { desc_pool.allocate_set(&set_layout) }.unwrap();
let render_pass = {
let color_attachment = pass::Attachment {
format: Some(format),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: image::Layout::Undefined..image::Layout::Present,
};
let depth_attachment = pass::Attachment {
format: Some(Self::DEPTH_FORMAT),
samples: 1,
ops: pass::AttachmentOps::new(
pass::AttachmentLoadOp::Clear,
pass::AttachmentStoreOp::Store,
),
stencil_ops: pass::AttachmentOps::DONT_CARE,
layouts: image::Layout::Undefined..image::Layout::DepthStencilAttachmentOptimal,
};
let subpass = pass::SubpassDesc {
colors: &[(0, image::Layout::ColorAttachmentOptimal)],
depth_stencil: Some(&(1, image::Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
ManuallyDrop::new(
unsafe {
device.create_render_pass(
&[color_attachment, depth_attachment],
&[subpass],
&[],
)
}
.expect("Can't create render pass"),
)
};
let pipeline_layout = ManuallyDrop::new(
unsafe {
device.create_pipeline_layout(
vec![
&*set_layout,
&*scene_list.set_layout,
&*mat_set_layout,
&*skins.desc_layout,
],
&[],
)
}
.expect("Can't create pipeline layout"),
);
let pipeline = {
let vs_module = {
let spirv: &[u8] = include_bytes!("../../shaders/mesh.vert.spv");
unsafe { device.create_shader_module(spirv.as_quad_bytes()) }.unwrap()
};
let fs_module = {
let spirv: &[u8] = include_bytes!("../../shaders/mesh.frag.spv");
unsafe { device.create_shader_module(spirv.as_quad_bytes()) }.unwrap()
};
let pipeline = {
let (vs_entry, fs_entry) = (
pso::EntryPoint {
entry: "main",
module: &vs_module,
specialization: pso::Specialization::default(),
},
pso::EntryPoint {
entry: "main",
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let pipeline_desc = pso::GraphicsPipelineDesc {
primitive_assembler: pso::PrimitiveAssemblerDesc::Vertex {
buffers: &[VertexBufferDesc {
binding: 0 as BufferIndex,
stride: std::mem::size_of::<VertexData>() as ElemStride,
rate: VertexInputRate::Vertex,
}], // Vec<VertexBufferDesc>,
// Vertex attributes (IA)
attributes: &[
AttributeDesc {
/// Vertex array location
location: 0 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgba32Sfloat,
offset: 0,
},
},
AttributeDesc {
/// Vertex array location
location: 1 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgb32Sfloat,
offset: 16,
},
},
AttributeDesc {
/// Vertex array location
location: 2 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::R32Uint,
offset: 28,
},
},
AttributeDesc {
/// Vertex array location
location: 3 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rg32Sfloat,
offset: 32,
},
},
AttributeDesc {
/// Vertex array location
location: 4 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgba32Sfloat,
offset: 40,
},
},
],
input_assembler: pso::InputAssemblerDesc {
primitive: Primitive::TriangleList,
with_adjacency: false,
restart_index: None,
},
vertex: vs_entry,
tessellation: None,
geometry: None,
},
fragment: Some(fs_entry),
// Rasterizer setup
rasterizer: pso::Rasterizer {
/// How to rasterize this primitive.
polygon_mode: pso::PolygonMode::Fill,
/// Which face should be culled.
cull_face: pso::Face::BACK,
/// Which vertex winding is considered to be the front face for culling.
front_face: pso::FrontFace::CounterClockwise,
/// Whether or not to enable depth clamping; when enabled, instead of
/// fragments being omitted when they are outside the bounds of the z-plane,
/// they will be clamped to the min or max z value.
depth_clamping: false,
/// What depth bias, if any, to use for the drawn primitives.
depth_bias: None,
/// Controls how triangles will be rasterized depending on their overlap with pixels.
conservative: false,
/// Controls width of rasterized line segments.
line_width: State::Dynamic,
},
// Description of how blend operations should be performed.
blender: BlendDesc {
/// The logic operation to apply to the blending equation, if any.
logic_op: None,
/// Which color targets to apply the blending operation to.
targets: vec![pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: None,
}],
},
// Depth stencil (DSV)
depth_stencil: DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
},
// Multisampling.
multisampling: Some(Multisampling {
rasterization_samples: 1 as image::NumSamples,
sample_shading: None,
sample_mask: !0,
/// Toggles alpha-to-coverage multisampling, which can produce nicer edges
/// when many partially-transparent polygons are overlapping.
/// See [here]( https://msdn.microsoft.com/en-us/library/windows/desktop/bb205072(v=vs.85).aspx#Alpha_To_Coverage) for a full description.
alpha_coverage: false,
alpha_to_one: false,
}),
// Static pipeline states.
baked_states: BakedStates::default(),
// Pipeline layout.
layout: &*pipeline_layout,
// Subpass in which the pipeline can be executed.
subpass,
// Options that may be set to alter pipeline properties.
flags: PipelineCreationFlags::empty(),
/// The parent pipeline, which may be
/// `BasePipeline::None`.
parent: BasePipeline::None,
};
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(vs_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
match pipeline {
Ok(pipeline) => ManuallyDrop::new(pipeline),
Err(e) => panic!("Could not compile pipeline {}", e),
}
};
let anim_pipeline = {
let vs_module = {
let spirv: &[u8] = include_bytes!("../../shaders/mesh_anim.vert.spv");
unsafe { device.create_shader_module(spirv.as_quad_bytes()) }.unwrap()
};
let fs_module = {
let spirv: &[u8] = include_bytes!("../../shaders/mesh.frag.spv");
unsafe { device.create_shader_module(spirv.as_quad_bytes()) }.unwrap()
};
let pipeline = {
let (vs_entry, fs_entry) = (
pso::EntryPoint {
entry: "main",
module: &vs_module,
specialization: pso::Specialization::default(),
},
pso::EntryPoint {
entry: "main",
module: &fs_module,
specialization: pso::Specialization::default(),
},
);
let subpass = Subpass {
index: 0,
main_pass: &*render_pass,
};
let pipeline_desc = pso::GraphicsPipelineDesc {
primitive_assembler: pso::PrimitiveAssemblerDesc::Vertex {
buffers: &[
pso::VertexBufferDesc {
binding: 0 as BufferIndex,
stride: std::mem::size_of::<VertexData>() as ElemStride,
rate: VertexInputRate::Vertex,
},
pso::VertexBufferDesc {
binding: 1 as BufferIndex,
stride: std::mem::size_of::<AnimVertexData>() as ElemStride,
rate: VertexInputRate::Vertex,
},
],
/// Vertex attributes (IA)
attributes: &[
AttributeDesc {
/// Vertex array location
location: 0 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgba32Sfloat,
offset: 0,
},
},
AttributeDesc {
/// Vertex array location
location: 1 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgb32Sfloat,
offset: 16,
},
},
AttributeDesc {
/// Vertex array location
location: 2 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::R32Uint,
offset: 28,
},
},
AttributeDesc {
/// Vertex array location
location: 3 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rg32Sfloat,
offset: 32,
},
},
AttributeDesc {
/// Vertex array location
location: 4 as Location,
/// Binding number of the associated vertex mem.
binding: 0 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgba32Sfloat,
offset: 40,
},
},
AttributeDesc {
/// Vertex array location
location: 5 as Location,
/// Binding number of the associated vertex mem.
binding: 1 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgba32Uint,
offset: 0,
},
},
AttributeDesc {
/// Vertex array location
location: 6 as Location,
/// Binding number of the associated vertex mem.
binding: 1 as BufferIndex,
/// Attribute element description.
element: Element {
format: hal::format::Format::Rgba32Sfloat,
offset: 16,
},
},
],
input_assembler: pso::InputAssemblerDesc {
primitive: Primitive::TriangleList,
with_adjacency: false,
restart_index: None,
},
vertex: vs_entry,
tessellation: None,
geometry: None,
},
fragment: Some(fs_entry),
// Rasterizer setup
rasterizer: pso::Rasterizer {
/// How to rasterize this primitive.
polygon_mode: pso::PolygonMode::Fill,
/// Which face should be culled.
cull_face: pso::Face::BACK,
/// Which vertex winding is considered to be the front face for culling.
front_face: pso::FrontFace::CounterClockwise,
/// Whether or not to enable depth clamping; when enabled, instead of
/// fragments being omitted when they are outside the bounds of the z-plane,
/// they will be clamped to the min or max z value.
depth_clamping: false,
/// What depth bias, if any, to use for the drawn primitives.
depth_bias: None,
/// Controls how triangles will be rasterized depending on their overlap with pixels.
conservative: false,
/// Controls width of rasterized line segments.
line_width: State::Dynamic,
},
// Description of how blend operations should be performed.
blender: BlendDesc {
/// The logic operation to apply to the blending equation, if any.
logic_op: None,
/// Which color targets to apply the blending operation to.
targets: vec![pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: None,
}],
},
// Depth stencil (DSV)
depth_stencil: DepthStencilDesc {
depth: Some(DepthTest {
fun: Comparison::LessEqual,
write: true,
}),
depth_bounds: false,
stencil: None,
},
// Multisampling.
multisampling: Some(Multisampling {
rasterization_samples: 1 as image::NumSamples,
sample_shading: None,
sample_mask: !0,
/// Toggles alpha-to-coverage multisampling, which can produce nicer edges
/// when many partially-transparent polygons are overlapping.
/// See [here]( https://msdn.microsoft.com/en-us/library/windows/desktop/bb205072(v=vs.85).aspx#Alpha_To_Coverage) for a full description.
alpha_coverage: false,
alpha_to_one: false,
}),
// Static pipeline states.
baked_states: BakedStates::default(),
// Pipeline layout.
layout: &*pipeline_layout,
// Subpass in which the pipeline can be executed.
subpass,
// Options that may be set to alter pipeline properties.
flags: PipelineCreationFlags::empty(),
/// The parent pipeline, which may be
/// `BasePipeline::None`.
parent: BasePipeline::None,
};
unsafe { device.create_graphics_pipeline(&pipeline_desc, None) }
};
unsafe {
device.destroy_shader_module(vs_module);
}
unsafe {
device.destroy_shader_module(fs_module);
}
match pipeline {
Ok(pipeline) => ManuallyDrop::new(pipeline),
Err(e) => panic!("Could not compile animation pipeline {}", e),
}
};
let uniform_buffer = allocator
.allocate_buffer(
Self::UNIFORM_CAMERA_SIZE,
hal::buffer::Usage::UNIFORM,
hal::memory::Properties::CPU_VISIBLE,
Some(hal::memory::Properties::CPU_VISIBLE | hal::memory::Properties::DEVICE_LOCAL),
)
.unwrap();
let write = vec![pso::DescriptorSetWrite {
set: &desc_set,
binding: 0,
array_offset: 0,
descriptors: Some(pso::Descriptor::Buffer(
uniform_buffer.buffer(),
hal::buffer::SubRange::WHOLE,
)),
}];
unsafe {
device.write_descriptor_sets(write);
}
let (mut depth_image, req) = unsafe {
let image = device
.create_image(
Kind::D2(width, height, 1, 1),
1,
Self::DEPTH_FORMAT,
Tiling::Optimal,
image::Usage::DEPTH_STENCIL_ATTACHMENT,
image::ViewCapabilities::empty(),
)
.expect("Could not create depth image.");
let req = device.get_image_requirements(&image);
(image, req)
};
let depth_memory = allocator
.allocate_with_reqs(req, memory::Properties::DEVICE_LOCAL, None)
.unwrap();
let depth_image_view = unsafe {
device
.bind_image_memory(depth_memory.memory(), 0, &mut depth_image)
.unwrap();
device
.create_image_view(
&depth_image,
image::ViewKind::D2,
Self::DEPTH_FORMAT,
hal::format::Swizzle::NO,
hal::image::SubresourceRange {
aspects: hal::format::Aspects::DEPTH,
level_start: 0,
level_count: Some(1),
layer_start: 0,
layer_count: Some(1),
},
)
.unwrap()
};
let cmd_pool = CmdBufferPool::new(
device.clone(),
&queue,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.unwrap();
let material_buffer = allocator
.allocate_buffer(
std::mem::size_of::<DeviceMaterial>() * 32,
hal::buffer::Usage::UNIFORM | hal::buffer::Usage::TRANSFER_DST,
hal::memory::Properties::DEVICE_LOCAL,
None,
)
.unwrap();
let tex_sampler = ManuallyDrop::new(unsafe {
device
.create_sampler(&hal::image::SamplerDesc {
min_filter: hal::image::Filter::Linear,
/// Magnification filter method to use.
mag_filter: hal::image::Filter::Nearest,
/// Mip filter method to use.
mip_filter: hal::image::Filter::Nearest,
/// Wrapping mode for each of the U, V, and W axis (S, T, and R in OpenGL
/// speak).
wrap_mode: (
hal::image::WrapMode::Tile,
hal::image::WrapMode::Tile,
hal::image::WrapMode::Tile,
),
/// This bias is added to every computed mipmap level (N + lod_bias). For
/// example, if it would select mipmap level 2 and lod_bias is 1, it will
/// use mipmap level 3.
lod_bias: hal::image::Lod(0.0),
/// This range is used to clamp LOD level used for sampling.
lod_range: hal::image::Lod(0.0)
..hal::image::Lod(rfw_scene::Texture::MIP_LEVELS as f32),
/// Comparison mode, used primary for a shadow map.
comparison: None,
/// Border color is used when one of the wrap modes is set to border.
border: hal::image::PackedColor::from([0.0; 4]),
/// Specifies whether the texture coordinates are normalized.
normalized: true,
/// Anisotropic filtering.
/// Can be `Some(_)` only if `Features::SAMPLER_ANISOTROPY` is enabled.
anisotropy_clamp: Some(8),
})
.unwrap()
});
Self {
device,
allocator,
desc_pool,
desc_set,
set_layout,
pipeline,
anim_pipeline,
pipeline_layout,
render_pass,
uniform_buffer,
depth_image: ManuallyDrop::new(depth_image),
depth_image_view: ManuallyDrop::new(depth_image_view),
depth_memory,
queue,
cmd_pool,
textures: Vec::new(),
texture_views: Vec::new(),
mat_desc_pool,
mat_set_layout,
mat_sets: Vec::new(),
material_buffer,
tex_sampler,
}
}
pub unsafe fn create_frame_buffer<T: Borrow<B::ImageView>>(
&self,
surface_image: &T,
dimensions: Extent2D,
) -> B::Framebuffer {
self.device
.create_framebuffer(
&self.render_pass,
vec![surface_image.borrow(), &self.depth_image_view],
hal::image::Extent {
width: dimensions.width,
height: dimensions.height,
depth: 1,
},
)
.unwrap()
}
pub fn update_camera(&mut self, camera: &rfw_scene::Camera) {
let mapping = match self.uniform_buffer.map(hal::memory::Segment::ALL) {
Ok(mapping) => mapping,
Err(_) => return,
};
let view = camera.get_rh_view_matrix();
let projection = camera.get_rh_projection();
let light_counts = [0 as u32; 4];
unsafe {
let ptr = mapping.as_ptr();
// View matrix
ptr.copy_from(view.as_ref().as_ptr() as *const u8, 64);
// Projection matrix
ptr.add(64)
.copy_from(projection.as_ref().as_ptr() as *const u8, 64);
// Light counts
ptr.add(128)
.copy_from(light_counts.as_ptr() as *const u8, 16);
// Camera position
ptr.add(144).copy_from(
Vec3A::from(camera.pos).extend(1.0).as_ref().as_ptr() as *const u8,
16,
);
}
}
pub unsafe fn draw(
&self,
cmd_buffer: &mut B::CommandBuffer,
frame_buffer: &B::Framebuffer,
viewport: &Viewport,
scene: &SceneList<B>,
skins: &SkinList<B>,
frustrum: &FrustrumG,
) {
cmd_buffer.begin_render_pass(
&self.render_pass,
frame_buffer,
viewport.rect,
&[
command::ClearValue {
color: command::ClearColor {
float32: [0.0, 0.0, 0.0, 1.0],
},
},
command::ClearValue {
depth_stencil: command::ClearDepthStencil {
depth: 1.0,
stencil: 0,
},
},
],
command::SubpassContents::Inline,
);
scene.iter_instances(|buffer, instance| {
if !frustrum.aabb_in_frustrum(&instance.bounds).should_render() {
return;
}
let iter = instance
.meshes
.iter()
.filter(|m| frustrum.aabb_in_frustrum(&m.bounds).should_render());
let mut first = true;
iter.for_each(|mesh| {
if first {
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline_layout,
0,
std::iter::once(&self.desc_set),
&[],
);
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline_layout,
1,
std::iter::once(&scene.desc_set),
&[],
);
match buffer {
RenderBuffers::Static(buffer) => {
cmd_buffer.bind_graphics_pipeline(&self.pipeline);
cmd_buffer.bind_vertex_buffers(
0,
std::iter::once((buffer.buffer(), buffer::SubRange::WHOLE)),
);
}
RenderBuffers::Animated(buffer, anim_offset) => {
if let Some(skin_id) = instance.skin_id {
let skin_id = skin_id as usize;
if let Some(skin_set) = skins.get_set(skin_id) {
cmd_buffer.bind_graphics_pipeline(&self.anim_pipeline);
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline_layout,
1,
std::iter::once(&scene.desc_set),
&[],
);
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline_layout,
3,
std::iter::once(skin_set),
&[],
);
cmd_buffer.bind_vertex_buffers(
0,
std::iter::once((
buffer.buffer(),
buffer::SubRange {
size: Some(*anim_offset as buffer::Offset),
offset: 0,
},
)),
);
cmd_buffer.bind_vertex_buffers(
1,
std::iter::once((
buffer.buffer(),
buffer::SubRange {
size: Some(
(buffer.size_in_bytes - *anim_offset)
as buffer::Offset,
),
offset: *anim_offset as _,
},
)),
);
} else {
cmd_buffer.bind_graphics_pipeline(&self.pipeline);
cmd_buffer.bind_vertex_buffers(
0,
std::iter::once((buffer.buffer(), buffer::SubRange::WHOLE)),
);
}
} else {
cmd_buffer.bind_graphics_pipeline(&self.pipeline);
cmd_buffer.bind_vertex_buffers(
0,
std::iter::once((buffer.buffer(), buffer::SubRange::WHOLE)),
);
}
}
}
first = false;
}
cmd_buffer.bind_graphics_descriptor_sets(
&self.pipeline_layout,
2,
std::iter::once(
self.mat_sets
.get(mesh.mat_id as usize)
.expect(format!("Could not get material set {}", mesh.mat_id).as_str()),
),
&[],
);
cmd_buffer.draw(mesh.first..mesh.last, instance.id..(instance.id + 1));
});
});
cmd_buffer.end_render_pass();
}
pub fn resize(&mut self, width: u32, height: u32) {
unsafe {
self.device
.destroy_image_view(ManuallyDrop::into_inner(ptr::read(&self.depth_image_view)));
self.device
.destroy_image(ManuallyDrop::into_inner(ptr::read(&self.depth_image)));
}
let (depth_image, depth_image_view) = unsafe {
let mut image = self
.device
.create_image(
Kind::D2(width, height, 1, 1),
1,
Self::DEPTH_FORMAT,
Tiling::Optimal,
image::Usage::DEPTH_STENCIL_ATTACHMENT,
image::ViewCapabilities::empty(),
)
.expect("Could not create depth image.");
let req = self.device.get_image_requirements(&image);
if req.size > self.depth_memory.len() as _ {
self.depth_memory = self
.allocator
.allocate_with_reqs(req, memory::Properties::DEVICE_LOCAL, None)
.unwrap();
}
self.device
.bind_image_memory(self.depth_memory.memory(), 0, &mut image)
.unwrap();
let image_view = self
.device
.create_image_view(
&image,
image::ViewKind::D2,
Self::DEPTH_FORMAT,
hal::format::Swizzle::NO,
hal::image::SubresourceRange {
aspects: hal::format::Aspects::DEPTH,
level_start: 0,
level_count: Some(1),
layer_start: 0,
layer_count: Some(1),
},
)
.unwrap();
(image, image_view)
};
self.depth_image = ManuallyDrop::new(depth_image);
self.depth_image_view = ManuallyDrop::new(depth_image_view);
}
pub fn set_textures(&mut self, textures: &[rfw_scene::Texture]) {
let mut texels = 0;
let textures: Vec<_> = textures
.iter()
.map(|t| {
let mut t = t.clone();
t.generate_mipmaps(5);
t
})
.collect();
self.textures = textures
.iter()
.map(|t| {
texels += t.data.len();
Texture::new(
self.device.clone(),
&self.allocator,
TextureDescriptor {
kind: image::Kind::D2(t.width, t.height, 1, 1),
mip_levels: t.mip_levels as _,
format: format::Format::Bgra8Unorm,
tiling: image::Tiling::Optimal,
usage: image::Usage::SAMPLED,
capabilities: image::ViewCapabilities::empty(),
},
)
.unwrap()
})
.collect();
self.texture_views = self
.textures
.iter()
.map(|t| {
t.create_view(TextureViewDescriptor {
view_kind: image::ViewKind::D2,
swizzle: Default::default(),
range: image::SubresourceRange {
aspects: format::Aspects::COLOR,
level_start: 0,
level_count: Some(t.mip_levels() as _),
layer_start: 0,
layer_count: Some(1),
},
})
.unwrap()
})
.collect();
let mut staging_buffer = self
.allocator
.allocate_buffer(
texels * std::mem::size_of::<u32>(),
hal::buffer::Usage::TRANSFER_SRC,
hal::memory::Properties::CPU_VISIBLE,
None,
)
.unwrap();
let mut cmd_buffer = unsafe {
let mut cmd_buffer = self.cmd_pool.allocate_one(hal::command::Level::Primary);
cmd_buffer.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
cmd_buffer
};
if let Ok(mapping) = staging_buffer.map(Segment::ALL) {
let mut byte_offset = 0;
for (_, t) in textures.iter().enumerate() {
let bytes = t.data.as_bytes();
mapping.as_slice()[byte_offset..(byte_offset + bytes.len())].copy_from_slice(bytes);
byte_offset += bytes.len();
}
}
let mut byte_offset = 0;
for (i, t) in textures.iter().enumerate() {
let target = &*self.textures[i];
unsafe {
cmd_buffer.pipeline_barrier(
PipelineStage::TOP_OF_PIPE..PipelineStage::TRANSFER,
Dependencies::empty(),
std::iter::once(&Barrier::Image {
range: SubresourceRange {
aspects: Aspects::COLOR,
level_start: 0,
level_count: Some(t.mip_levels as Level),
layer_start: 0,
layer_count: Some(1),
},
families: None,
states: (Access::empty(), Layout::Undefined)
..(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
target,
}),
);
}
for m in 0..t.mip_levels {
let (width, height) = t.mip_level_width_height(m as usize);
unsafe {
cmd_buffer.copy_buffer_to_image(
staging_buffer.buffer(),
&*self.textures[i],
hal::image::Layout::TransferDstOptimal,
std::iter::once(&hal::command::BufferImageCopy {
buffer_offset: byte_offset as hal::buffer::Offset,
/// Width of a mem 'row' in texels.
buffer_width: width as u32,
/// Height of a mem 'image slice' in texels.
buffer_height: height as u32,
/// The image subresource.
image_layers: hal::image::SubresourceLayers {
layers: 0..1,
aspects: Aspects::COLOR,
level: m as hal::image::Level,
},
/// The offset of the portion of the image to copy.
image_offset: hal::image::Offset { x: 0, y: 0, z: 0 },
/// Size of the portion of the image to copy.
image_extent: hal::image::Extent {
width: width as u32,
height: height as u32,
depth: 1,
},
}),
);
}
byte_offset += width * height * std::mem::size_of::<u32>();
}
unsafe {
cmd_buffer.pipeline_barrier(
PipelineStage::TRANSFER..PipelineStage::FRAGMENT_SHADER,
Dependencies::empty(),
std::iter::once(&Barrier::Image {
range: SubresourceRange {
aspects: Aspects::COLOR,
level_start: 0,
level_count: Some(t.mip_levels as Level),
layer_start: 0,
layer_count: Some(1),
},
families: None,
states: (Access::TRANSFER_WRITE, Layout::TransferDstOptimal)
..(Access::SHADER_READ, Layout::ShaderReadOnlyOptimal),
target: &*self.textures[i],
}),
);
}
}
unsafe {
cmd_buffer.finish();
}
self.queue
.submit_without_semaphores(std::iter::once(&cmd_buffer), None);
self.queue.wait_idle().unwrap();
}
pub fn set_materials(&mut self, materials: &[DeviceMaterial]) {
let aligned_size = {
let minimum_alignment =
self.allocator.limits.min_uniform_buffer_offset_alignment as usize;
let mut size = minimum_alignment;
while size < std::mem::size_of::<DeviceMaterial>() {
size += minimum_alignment;
}
size
};
if self.material_buffer.len() < materials.len() * aligned_size {
self.material_buffer = self
.allocator
.allocate_buffer(
// Minimum alignment of dynamic uniform buffers is 256 bytes
materials.len() * 2 * aligned_size,
hal::buffer::Usage::UNIFORM | hal::buffer::Usage::TRANSFER_DST,
hal::memory::Properties::DEVICE_LOCAL,
None,
)
.unwrap();
}
let mut staging_buffer = self
.allocator
.allocate_buffer(
materials.len() * aligned_size,
hal::buffer::Usage::TRANSFER_SRC,
hal::memory::Properties::CPU_VISIBLE,
None,
)
.unwrap();
if let Ok(mapping) = staging_buffer.map(Segment::ALL) {
let dst = mapping.as_slice();
let src = materials.as_bytes();
for (i, _) in materials.iter().enumerate() {
let start = i * aligned_size;
let end = start + std::mem::size_of::<DeviceMaterial>();
let src_start = i * std::mem::size_of::<DeviceMaterial>();
let src_end = (i + 1) * std::mem::size_of::<DeviceMaterial>();
dst[start..end].copy_from_slice(&src[src_start..src_end]);
}
}
let cmd_buffer = unsafe {
let mut cmd_buffer = self.cmd_pool.allocate_one(hal::command::Level::Primary);
cmd_buffer.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
cmd_buffer.copy_buffer(
staging_buffer.buffer(),
self.material_buffer.buffer(),
std::iter::once(&BufferCopy {
size: (materials.len() * aligned_size) as _,
src: 0,
dst: 0,
}),
);
cmd_buffer.finish();
cmd_buffer
};
self.queue
.submit_without_semaphores(std::iter::once(&cmd_buffer), None);
unsafe {
if !self.mat_sets.is_empty() {
let mut sets = Vec::new();
std::mem::swap(&mut sets, &mut self.mat_sets);
self.mat_desc_pool.free(sets);
}
self.mat_sets = materials
.iter()
.enumerate()
.map(
|(i, _)| match self.mat_desc_pool.allocate_set(&self.mat_set_layout) {
Ok(set) => set,
Err(e) => panic!("Could not allocate set {}, err: {}", i, e),
},
)
.collect();
let mut writes = Vec::with_capacity(self.mat_sets.len() * 7);
let sampler = ManuallyDrop::into_inner(ptr::read(&self.tex_sampler));
self.mat_sets
.iter()
.zip(materials.iter().enumerate())
.for_each(|(set, (i, mat))| {
writes.push(pso::DescriptorSetWrite {
set,
binding: 0,
array_offset: 0,
descriptors: Some(pso::Descriptor::Buffer(
self.material_buffer.buffer(),
hal::buffer::SubRange {
offset: (i * aligned_size) as _,
size: Some(std::mem::size_of::<DeviceMaterial>() as _),
},
)),
});
writes.push(pso::DescriptorSetWrite {
set,
binding: 1,
array_offset: 0,
descriptors: Some(pso::Descriptor::Sampler(&sampler)),
});
// Texture 0
let view = &*self.texture_views[mat.diffuse_map.max(0) as usize];
writes.push(pso::DescriptorSetWrite {
set,
binding: 2,
array_offset: 0,
descriptors: Some(pso::Descriptor::Image(
view,
image::Layout::ShaderReadOnlyOptimal,
)),
});
// Texture 1
let view = &*self.texture_views[mat.normal_map.max(0) as usize];
writes.push(pso::DescriptorSetWrite {
set,
binding: 3,
array_offset: 0,
descriptors: Some(pso::Descriptor::Image(
view,
image::Layout::ShaderReadOnlyOptimal,
)),
});
// Texture 2
let view = &*self.texture_views[mat.metallic_roughness_map.max(0) as usize];
writes.push(pso::DescriptorSetWrite {
set,
binding: 4,
array_offset: 0,
descriptors: Some(pso::Descriptor::Image(
view,
image::Layout::ShaderReadOnlyOptimal,
)),
});
// Texture 3
let view = &*self.texture_views[mat.emissive_map.max(0) as usize];
writes.push(pso::DescriptorSetWrite {
set,
binding: 5,
array_offset: 0,
descriptors: Some(pso::Descriptor::Image(
view,
image::Layout::ShaderReadOnlyOptimal,
)),
});
// Texture 4
let view = &*self.texture_views[mat.sheen_map.max(0) as usize];
writes.push(pso::DescriptorSetWrite {
set,
binding: 6,
array_offset: 0,
descriptors: Some(pso::Descriptor::Image(
view,
image::Layout::ShaderReadOnlyOptimal,
)),
});
});
self.device.write_descriptor_sets(writes);
}
self.queue.wait_idle().unwrap();
}
}
impl<B: hal::Backend> Drop for RenderPipeline<B> {
fn drop(&mut self) {
self.device.wait_idle().unwrap();
unsafe {
self.device
.destroy_image_view(ManuallyDrop::into_inner(ptr::read(&self.depth_image_view)));
self.device
.destroy_image(ManuallyDrop::into_inner(ptr::read(&self.depth_image)));
self.textures.clear();
self.device
.destroy_descriptor_pool(ManuallyDrop::into_inner(ptr::read(&self.desc_pool)));
self.device
.destroy_descriptor_pool(ManuallyDrop::into_inner(ptr::read(&self.mat_desc_pool)));
self.device
.destroy_descriptor_set_layout(ManuallyDrop::into_inner(ptr::read(
&self.set_layout,
)));
self.device
.destroy_descriptor_set_layout(ManuallyDrop::into_inner(ptr::read(
&self.mat_set_layout,
)));
self.device
.destroy_sampler(ManuallyDrop::into_inner(ptr::read(&self.tex_sampler)));
self.device
.destroy_render_pass(ManuallyDrop::into_inner(ptr::read(&self.render_pass)));
self.device
.destroy_graphics_pipeline(ManuallyDrop::into_inner(ptr::read(&self.pipeline)));
self.device
.destroy_graphics_pipeline(ManuallyDrop::into_inner(ptr::read(
&self.anim_pipeline,
)));
self.device
.destroy_pipeline_layout(ManuallyDrop::into_inner(ptr::read(
&self.pipeline_layout,
)));
}
}
}
| 41.650804 | 162 | 0.41279 |
61023d044f93bcec9c28cd40a11dea2d6b1b417e
| 1,205 |
mod common;
use common::obtain_result;
use std::ops::DerefMut;
use wasm_bindgen_test::*;
use yew::{html, Html};
use yew_functional::{use_ref, use_state, FunctionComponent, FunctionProvider};
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn use_ref_works() {
struct UseRefFunction {}
impl FunctionProvider for UseRefFunction {
type TProps = ();
fn run(_: &Self::TProps) -> Html {
let ref_example = use_ref(|| 0);
*ref_example.borrow_mut().deref_mut() += 1;
let counter = use_state(|| 0);
if *counter < 5 {
counter.set(*counter + 1)
}
return html! {
<div>
{"The test output is: "}
<div id="result">{*ref_example.borrow_mut().deref_mut() > 4}</div>
{"\n"}
</div>
};
}
}
type UseRefComponent = FunctionComponent<UseRefFunction>;
yew::start_app_in_element::<UseRefComponent>(
yew::utils::document().get_element_by_id("output").unwrap(),
);
let result = obtain_result();
assert_eq!(result.as_str(), "true");
}
| 29.390244 | 86 | 0.566805 |
237e3f58a87d4c750a059a49cdaedbd741cfefc4
| 3,115 |
use worker::*;
mod utils;
fn log_request(req: &Request) {
console_log!(
"{} - [{}], located at: {:?}, within: {}",
Date::now().to_string(),
req.path(),
req.cf().coordinates().unwrap_or_default(),
req.cf().region().unwrap_or("unknown region".into())
);
}
#[event(fetch)]
pub async fn main(req: Request, env: Env) -> Result<Response> {
log_request(&req);
// Optionally, get more helpful error messages written to the console in the case of a panic.
utils::set_panic_hook();
// Optionally, use the Router to handle matching endpoints, use ":name" placeholders, or "*name"
// catch-alls to match on specific patterns. Alternatively, use `Router::with_data(D)` to
// provide arbitrary data that will be accessible in each route via the `ctx.data()` method.
let router = Router::new();
// Add as many routes as your Worker needs! Each route will get a `Request` for handling HTTP
// functionality and a `RouteContext` which you can use to and get route parameters and
// Environment bindings like KV Stores, Durable Objects, Secrets, and Variables.
router
.get("/", |_, _| Response::from_html(include_str!("index.html").replace("CLIENT_ID", env!("CLIENT_ID"))))
.get_async("/callback", |req, ctx| async move {
async fn handle(req: Request, ctx: RouteContext<()>) -> Result<Response> {
let mut url = req.url()?;
let code = url.query_pairs().next().ok_or(Error::RustError("github's fault".to_string()))?.1;
let mut req = Url::parse("https://github.com/login/oauth/access_token")?;
req.query_pairs_mut()
.append_pair("client_id", env!("CLIENT_ID"))
.append_pair("client_secret", env!("CLIENT_SECRET"))
.append_pair("code", &code);
let mut headers = Headers::new();
headers.append("Accept", "application/json")?;
let req =
Request::new_with_init(req.as_str(), RequestInit::new().with_headers(headers))?;
let mut resp = Fetch::Request(req).send().await?;
let resp: serde_json::Value = resp.json().await?;
let token = resp["access_token"].clone();
let token = token.as_str().ok_or(Error::RustError("github's fault".to_string()))?;
url.set_path("/");
url.set_query(None);
let mut headers = Headers::new();
headers.set("Set-Cookie", format!("token={}", token).as_str())?;
headers.set("Location", url.as_str())?;
Ok(Response::redirect(url)?.with_headers(headers))
}
match handle(req, ctx).await {
Ok(resp) => Ok(resp),
Err(resp) => Response::error(resp.to_string(), 500),
}
})
.get("/worker-version", |_, ctx| {
let version = ctx.var("WORKERS_RS_VERSION")?.to_string();
Response::ok(version)
})
.run(req, env)
.await
}
| 45.808824 | 113 | 0.564366 |
ef6d3ffcd8f8c17cf65ca5520ec29db6a1bc68f7
| 224 |
extern crate objc;
#[link(name = "CoreBrightness", kind = "framework")]
extern "C" {}
mod client;
mod locale;
mod status;
pub use self::client::Client;
pub use self::locale::Locale;
pub use self::status::BlueLightStatus;
| 17.230769 | 52 | 0.709821 |
9bf518c1676243dd4092ebdcea3ed7beaaef47a8
| 4,328 |
// Copyright (c) 2018-2021 The MobileCoin Foundation
use crate::tx_manager::TxManagerError;
use displaydoc::Display;
use grpcio::{RpcStatus, RpcStatusCode};
use mc_common::logger::global_log;
use mc_consensus_api::consensus_common::{ProposeTxResponse, ProposeTxResult};
use mc_consensus_enclave::Error as EnclaveError;
use mc_ledger_db::Error as LedgerError;
use mc_transaction_core::validation::TransactionValidationError;
#[derive(Debug, Display)]
pub enum ConsensusGrpcError {
/// GRPC Error: `{0:?}`
RpcStatus(RpcStatus),
/// Ledger error: `{0}`
Ledger(LedgerError),
/// Service is over capacity
OverCapacity,
/// Service is currently not serving requests
NotServing,
/// Enclave error: `{0}`
Enclave(EnclaveError),
/// Transaction validation error `{0}`
TransactionValidation(TransactionValidationError),
/// Invalid argument `{0}`
InvalidArgument(String),
/// Other error `{0}`
Other(String),
}
impl From<RpcStatus> for ConsensusGrpcError {
fn from(src: RpcStatus) -> Self {
Self::RpcStatus(src)
}
}
impl From<LedgerError> for ConsensusGrpcError {
fn from(src: LedgerError) -> Self {
Self::Ledger(src)
}
}
impl From<EnclaveError> for ConsensusGrpcError {
fn from(src: EnclaveError) -> Self {
match src {
EnclaveError::MalformedTx(err) => Self::from(err),
_ => Self::Enclave(src),
}
}
}
impl From<TransactionValidationError> for ConsensusGrpcError {
fn from(src: TransactionValidationError) -> Self {
Self::TransactionValidation(src)
}
}
impl From<TxManagerError> for ConsensusGrpcError {
fn from(src: TxManagerError) -> Self {
match src {
TxManagerError::Enclave(err) => Self::from(err),
TxManagerError::TransactionValidation(err) => Self::from(err),
TxManagerError::LedgerDb(err) => Self::from(err),
_ => Self::Other(format!("tx manager error: {}", src)),
}
}
}
impl From<ConsensusGrpcError> for RpcStatus {
fn from(src: ConsensusGrpcError) -> Self {
match src {
ConsensusGrpcError::RpcStatus(rpc_status) => rpc_status,
ConsensusGrpcError::Ledger(err) => RpcStatus::new(
RpcStatusCode::INTERNAL,
Some(format!("Ledger error: {}", err)),
),
ConsensusGrpcError::OverCapacity => RpcStatus::new(
RpcStatusCode::UNAVAILABLE,
Some("Temporarily over capacity".into()),
),
ConsensusGrpcError::NotServing => RpcStatus::new(
RpcStatusCode::UNAVAILABLE,
Some("Temporarily not serving requests".into()),
),
ConsensusGrpcError::Enclave(EnclaveError::Attest(err)) => {
global_log::error!("Permission denied: {}", err);
RpcStatus::new(
RpcStatusCode::PERMISSION_DENIED,
Some("Permission Denied (attestation)".into()),
)
}
ConsensusGrpcError::Other(err) => RpcStatus::new(RpcStatusCode::INTERNAL, Some(err)),
ConsensusGrpcError::TransactionValidation(err) => {
global_log::error!("Attempting to convert a ConsensusGrpcError::TransactionValidation into RpcStatus, this should not happen! Error is: {}", err);
RpcStatus::new(
RpcStatusCode::INTERNAL,
Some(format!("Unexpected transaction validation error: {}", err)),
)
}
_ => RpcStatus::new(
RpcStatusCode::INTERNAL,
Some(format!("Internal error: {}", src)),
),
}
}
}
/// Convert a `ConsensusGrpcError` into either `ProposeTxResponse` or
/// `RpcStatus`, depending on which error it holds.
impl From<ConsensusGrpcError> for Result<ProposeTxResponse, RpcStatus> {
fn from(src: ConsensusGrpcError) -> Result<ProposeTxResponse, RpcStatus> {
match src {
ConsensusGrpcError::TransactionValidation(err) => {
let mut resp = ProposeTxResponse::new();
resp.set_result(ProposeTxResult::from(err));
Ok(resp)
}
_ => Err(RpcStatus::from(src)),
}
}
}
| 33.292308 | 162 | 0.603281 |
f7f36503da50f2c8cd186eb39b9a063034ebc269
| 1,152 |
use rand::Rng;
use std::cmp::Ordering;
use std::io;
fn main() {
let secret_number = rand::thread_rng().gen_range(1, 101);
let mut number_of_guesses = 1;
let input = io::stdin();
println!("Welcome to guessing game! Type 'exit' to exit at any time.");
loop {
println!("Enter a number: ");
let mut guess = String::new();
input.read_line(&mut guess).expect("Failed to read line!");
if guess.trim() == "exit" {
println!("Bye!");
break;
}
let guess: u32 = match guess.trim().parse() {
Ok(n) => n,
Err(_) => {
println!("That wasn't a number!");
continue
}
};
println!("Your guess: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("Congratulations! You found the secret number in {} guesses.", number_of_guesses);
break;
}
}
number_of_guesses += 1;
}
}
| 25.043478 | 107 | 0.493924 |
61033ed6f467d3de0a41bb8ecf27b4bca731eb8d
| 712 |
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
mod dispatch_json;
mod dispatch_minimal;
pub use dispatch_json::json_op;
pub use dispatch_json::JsonOp;
pub use dispatch_json::JsonResult;
pub use dispatch_minimal::minimal_op;
pub use dispatch_minimal::MinimalOp;
pub mod compiler;
pub mod errors;
pub mod fetch;
pub mod fs;
pub mod fs_events;
pub mod idna;
pub mod io;
pub mod mcgalaxy;
pub mod net;
#[cfg(unix)]
mod net_unix;
pub mod os;
pub mod permissions;
pub mod plugin;
pub mod process;
pub mod random;
pub mod repl;
pub mod resources;
pub mod runtime;
pub mod runtime_compiler;
pub mod signal;
pub mod timers;
pub mod tls;
pub mod tty;
pub mod web_worker;
pub mod worker_host;
| 19.243243 | 74 | 0.775281 |
6ade7a5ff3e853ca3ec37538c0146a75139e2c67
| 10,750 |
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::{self, ThreadId};
use wayland_commons::map::{Object, ObjectMap, ObjectMetadata};
use wayland_commons::utils::UserData;
use wayland_commons::wire::{Argument, ArgumentType};
use wayland_commons::MessageGroup;
use super::connection::Connection;
use super::queues::QueueBuffer;
use super::{Dispatcher, EventQueueInner};
use {Interface, Proxy};
#[derive(Clone)]
pub(crate) struct ObjectMeta {
pub(crate) buffer: QueueBuffer,
pub(crate) alive: Arc<AtomicBool>,
user_data: Arc<UserData>,
pub(crate) dispatcher: Arc<Mutex<Dispatcher>>,
pub(crate) server_destroyed: bool,
pub(crate) client_destroyed: bool,
queue_thread: ThreadId,
}
impl ObjectMetadata for ObjectMeta {
fn child(&self) -> ObjectMeta {
ObjectMeta {
buffer: self.buffer.clone(),
alive: Arc::new(AtomicBool::new(true)),
user_data: Arc::new(UserData::empty()),
dispatcher: super::default_dispatcher(),
server_destroyed: false,
client_destroyed: false,
queue_thread: self.queue_thread,
}
}
}
impl ObjectMeta {
pub(crate) fn new(buffer: QueueBuffer) -> ObjectMeta {
ObjectMeta {
buffer,
alive: Arc::new(AtomicBool::new(true)),
user_data: Arc::new(UserData::empty()),
dispatcher: super::default_dispatcher(),
server_destroyed: false,
client_destroyed: false,
queue_thread: thread::current().id(),
}
}
fn dead() -> ObjectMeta {
ObjectMeta {
buffer: super::queues::create_queue_buffer(),
alive: Arc::new(AtomicBool::new(false)),
user_data: Arc::new(UserData::empty()),
dispatcher: super::default_dispatcher(),
server_destroyed: true,
client_destroyed: true,
queue_thread: thread::current().id(),
}
}
}
#[derive(Clone)]
pub(crate) struct ProxyInner {
pub(crate) map: Arc<Mutex<ObjectMap<ObjectMeta>>>,
pub(crate) connection: Arc<Mutex<Connection>>,
pub(crate) object: Object<ObjectMeta>,
pub(crate) id: u32,
}
impl ProxyInner {
pub(crate) fn from_id(
id: u32,
map: Arc<Mutex<ObjectMap<ObjectMeta>>>,
connection: Arc<Mutex<Connection>>,
) -> Option<ProxyInner> {
let me = map.lock().unwrap().find(id);
me.map(|obj| ProxyInner {
map,
connection,
id,
object: obj,
})
}
pub(crate) fn is_interface<I: Interface>(&self) -> bool {
self.object.is_interface::<I>()
}
pub(crate) fn is_alive(&self) -> bool {
self.object.meta.alive.load(Ordering::Acquire)
}
pub fn version(&self) -> u32 {
self.object.version
}
pub(crate) fn id(&self) -> u32 {
if self.is_alive() {
self.id
} else {
0
}
}
pub(crate) fn get_user_data<UD: 'static>(&self) -> Option<&UD> {
self.object.meta.user_data.get::<UD>()
}
pub(crate) fn send<I: Interface>(&self, msg: I::Request) {
// grab the connection lock before anything else
// this avoids the risk of marking ourselves dead while an other
// thread is sending a message an accidentally sending that message
// after ours if ours is a destructor
let mut conn_lock = self.connection.lock().unwrap();
let destructor = msg.is_destructor();
let msg = msg.into_raw(self.id);
if ::std::env::var_os("WAYLAND_DEBUG").is_some() {
eprintln!(
" -> {}@{}: {} {:?}",
I::NAME,
self.id,
self.object.requests[msg.opcode as usize].name,
msg.args
);
}
// TODO: figure our if this can fail and still be recoverable ?
conn_lock.write_message(&msg).expect("Sending a message failed.");
if destructor {
self.object.meta.alive.store(false, Ordering::Release);
{
// cleanup the map as appropriate
let mut map = conn_lock.map.lock().unwrap();
let server_destroyed = map
.with(self.id, |obj| {
obj.meta.client_destroyed = true;
obj.meta.server_destroyed
})
.unwrap_or(false);
if server_destroyed {
map.remove(self.id);
}
}
}
}
pub(crate) fn send_constructor<I, J>(
&self,
msg: I::Request,
version: Option<u32>,
) -> Result<NewProxyInner, ()>
where
I: Interface,
J: Interface,
{
// grab the connection lock before anything else
// this avoids the risk or races during object creation
let mut conn_lock = self.connection.lock().unwrap();
let destructor = msg.is_destructor();
let mut msg = msg.into_raw(self.id);
if ::std::env::var_os("WAYLAND_DEBUG").is_some() {
eprintln!(
" -> {}@{}: {} {:?}",
I::NAME,
self.id,
self.object.requests[msg.opcode as usize].name,
msg.args
);
}
let opcode = msg.opcode;
// sanity check
let mut nid_idx = I::Request::MESSAGES[opcode as usize]
.signature
.iter()
.position(|&t| t == ArgumentType::NewId)
.expect("Trying to use 'send_constructor' with a message not creating any object.");
if let Some(o) = I::Request::child(opcode, 1, &()) {
if !o.is_interface::<J>() {
panic!("Trying to use 'send_constructor' with the wrong return type. Required interface {} but the message creates interface {}", J::NAME, o.interface)
}
} else {
// there is no target interface in the protocol, this is a generic object-creating
// function (likely wl_registry.bind), the newid arg will thus expand to (str, u32, obj)
nid_idx += 2;
}
// insert the newly created object in the message
let newproxy = match msg.args[nid_idx] {
Argument::NewId(ref mut newid) => {
let newp = match version {
Some(v) => self.child_versioned::<J>(v),
None => self.child::<J>(),
};
*newid = newp.id;
newp
}
_ => unreachable!(),
};
conn_lock.write_message(&msg).expect("Sending a message failed.");
if destructor {
self.object.meta.alive.store(false, Ordering::Release);
{
// cleanup the map as appropriate
let mut map = conn_lock.map.lock().unwrap();
let server_destroyed = map
.with(self.id, |obj| {
obj.meta.client_destroyed = true;
obj.meta.server_destroyed
})
.unwrap_or(false);
if server_destroyed {
map.remove(self.id);
}
}
}
Ok(newproxy)
}
pub(crate) fn equals(&self, other: &ProxyInner) -> bool {
self.is_alive() && Arc::ptr_eq(&self.object.meta.alive, &other.object.meta.alive)
}
pub(crate) fn make_wrapper(&self, queue: &EventQueueInner) -> Result<ProxyInner, ()> {
let mut wrapper = self.clone();
wrapper.object.meta.buffer = queue.buffer.clone();
// EventQueueInner is not Send so we must be in the right thread
wrapper.object.meta.queue_thread = thread::current().id();
Ok(wrapper)
}
pub(crate) fn child<I: Interface>(&self) -> NewProxyInner {
self.child_versioned::<I>(self.object.version)
}
pub(crate) fn child_versioned<I: Interface>(&self, version: u32) -> NewProxyInner {
let new_object = Object::from_interface::<I>(version, self.object.meta.child());
let new_id = self.map.lock().unwrap().client_insert_new(new_object);
NewProxyInner {
map: self.map.clone(),
connection: self.connection.clone(),
id: new_id,
}
}
pub(crate) fn child_placeholder(&self) -> ProxyInner {
ProxyInner {
map: self.map.clone(),
connection: self.connection.clone(),
object: Object::placeholder(self.object.meta.child()),
id: 0,
}
}
}
pub(crate) struct NewProxyInner {
map: Arc<Mutex<ObjectMap<ObjectMeta>>>,
connection: Arc<Mutex<Connection>>,
id: u32,
}
impl NewProxyInner {
pub(crate) fn from_id(
id: u32,
map: Arc<Mutex<ObjectMap<ObjectMeta>>>,
connection: Arc<Mutex<Connection>>,
) -> Option<NewProxyInner> {
if map.lock().unwrap().find(id).is_some() {
Some(NewProxyInner { map, connection, id })
} else {
None
}
}
pub(crate) fn is_queue_on_current_thread(&self) -> bool {
self.map
.lock()
.unwrap()
.find(self.id)
.map(|obj| obj.meta.queue_thread == thread::current().id())
.unwrap_or(false)
}
// Invariants: Impl is either `Send` or we are on the same thread as the target event loop
pub(crate) unsafe fn implement<I: Interface, F>(
self,
implementation: F,
user_data: UserData,
) -> ProxyInner
where
F: FnMut(I::Event, I) + 'static,
I: From<Proxy<I>>,
I::Event: MessageGroup<Map = super::ProxyMap>,
{
let object = self.map.lock().unwrap().with(self.id, |obj| {
obj.meta.dispatcher = super::make_dispatcher(implementation);
obj.meta.user_data = Arc::new(user_data);
obj.clone()
});
let object = match object {
Ok(obj) => obj,
Err(()) => {
// We are trying to implement a non-existent object
// This is either a bug in the lib (a NewProxy was created while it should not
// have been possible) or an object was created and the server destroyed it
// before it could be implemented.
// Thus, we just create a dummy already-dead Proxy
Object::from_interface::<I>(1, ObjectMeta::dead())
}
};
ProxyInner {
map: self.map,
connection: self.connection,
id: self.id,
object,
}
}
}
| 32.874618 | 167 | 0.540558 |
d9a48103c41ebf06372de0a7fdc10aff83904437
| 9,885 |
#![deny(unused_imports, unused_must_use)]
//! # Crossterm
//!
//! Have you ever been disappointed when a terminal library for rust was only written for UNIX systems?
//! Crossterm provides clearing, event (input) handling, styling, cursor movement, and terminal actions for both
//! Windows and UNIX systems.
//!
//! Crossterm aims to be simple and easy to call in code. Through the simplicity of Crossterm, you do not
//! have to worry about the platform you are working with.
//!
//! This crate supports all UNIX and Windows terminals down to Windows 7 (not all terminals are tested
//! see [Tested Terminals](https://github.com/crossterm-rs/crossterm#tested-terminals)
//! for more info).
//!
//! ## Command API
//!
//! The command API makes the use of `crossterm` much easier and offers more control over when and how a
//! command is executed. A command is just an action you can perform on the terminal e.g. cursor movement.
//!
//! The command API offers:
//!
//! * Better Performance.
//! * Complete control over when to flush.
//! * Complete control over where the ANSI escape commands are executed to.
//! * Way easier and nicer API.
//!
//! There are two ways to use the API command:
//!
//! * Functions can execute commands on types that implement Write. Functions are easier to use and debug.
//! There is a disadvantage, and that is that there is a boilerplate code involved.
//! * Macros are generally seen as more difficult and aren't always well supported by editors but offer an API with less boilerplate code. If you are
//! not afraid of macros, this is a recommendation.
//!
//! Linux and Windows 10 systems support ANSI escape codes. Those ANSI escape codes are strings or rather a
//! byte sequence. When we `write` and `flush` those to the terminal we can perform some action.
//! For older windows systems a WinApi call is made.
//!
//! ### Supported Commands
//!
//! - Module [`cursor`](cursor/index.html)
//! - Visibility - [`Show`](cursor/struct.Show.html), [`Hide`](cursor/struct.Hide.html)
//! - Appearance - [`EnableBlinking`](cursor/struct.EnableBlinking.html),
//! [`DisableBlinking`](cursor/struct.DisableBlinking.html)
//! - Position -
//! [`SavePosition`](cursor/struct.SavePosition.html), [`RestorePosition`](cursor/struct.RestorePosition.html),
//! [`MoveUp`](cursor/struct.MoveUp.html), [`MoveDown`](cursor/struct.MoveDown.html),
//! [`MoveLeft`](cursor/struct.MoveLeft.html), [`MoveRight`](cursor/struct.MoveRight.html),
//! [`MoveTo`](cursor/struct.MoveTo.html), [`MoveToColumn`](cursor/struct.MoveToColumn.html),
//! [`MoveToNextLine`](cursor/struct.MoveToNextLine.html), [`MoveToPreviousLine`](cursor/struct.MoveToPreviousLine.html),
//! - Module [`event`](event/index.html)
//! - Mouse events - [`EnableMouseCapture`](event/struct.EnableMouseCapture.html),
//! [`DisableMouseCapture`](event/struct.DisableMouseCapture.html)
//! - Module [`style`](style/index.html)
//! - Colors - [`SetForegroundColor`](style/struct.SetForegroundColor.html),
//! [`SetBackgroundColor`](style/struct.SetBackgroundColor.html),
//! [`ResetColor`](style/struct.ResetColor.html), [`SetColors`](style/struct.SetColors.html)
//! - Attributes - [`SetAttribute`](style/struct.SetAttribute.html), [`SetAttributes`](style/struct.SetAttributes.html),
//! [`PrintStyledContent`](style/struct.PrintStyledContent.html)
//! - Module [`terminal`](terminal/index.html)
//! - Scrolling - [`ScrollUp`](terminal/struct.ScrollUp.html),
//! [`ScrollDown`](terminal/struct.ScrollDown.html)
//! - Miscellaneous - [`Clear`](terminal/struct.Clear.html),
//! [`SetSize`](terminal/struct.SetSize.html)
//! [`SetTitle`](terminal/struct.SetTitle.html)
//! [`DisableLineWrap`](terminal/struct.DisableLineWrap.html)
//! [`EnableLineWrap`](terminal/struct.EnableLineWrap.html)
//! - Alternate screen - [`EnterAlternateScreen`](terminal/struct.EnterAlternateScreen.html),
//! [`LeaveAlternateScreen`](terminal/struct.LeaveAlternateScreen.html)
//!
//! ### Command Execution
//!
//! There are two different ways to execute commands:
//!
//! * [Lazy Execution](#lazy-execution)
//! * [Direct Execution](#direct-execution)
//!
//! #### Lazy Execution
//!
//! Flushing bytes to the terminal buffer is a heavy system call. If we perform a lot of actions with the terminal,
//! we want to do this periodically - like with a TUI editor - so that we can flush more data to the terminal buffer
//! at the same time.
//!
//! Crossterm offers the possibility to do this with `queue`.
//! With `queue` you can queue commands, and when you call [Write::flush][flush] these commands will be executed.
//!
//! You can pass a custom buffer implementing [std::io::Write][write] to this `queue` operation.
//! The commands will be executed on that buffer.
//! The most common buffer is [std::io::stdout][stdout] however, [std::io::stderr][stderr] is used sometimes as well.
//!
//! ##### Examples
//!
//! A simple demonstration that shows the command API in action with cursor commands.
//!
//! Functions:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{QueueableCommand, cursor};
//!
//! let mut stdout = stdout();
//! stdout.queue(cursor::MoveTo(5,5));
//!
//! // some other code ...
//!
//! stdout.flush();
//! ```
//!
//! The [queue](./trait.QueueableCommand.html) function returns itself, therefore you can use this to queue another
//! command. Like `stdout.queue(Goto(5,5)).queue(Clear(ClearType::All))`.
//!
//! Macros:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{queue, QueueableCommand, cursor};
//!
//! let mut stdout = stdout();
//! queue!(stdout, cursor::MoveTo(5, 5));
//!
//! // some other code ...
//!
//! // move operation is performed only if we flush the buffer.
//! stdout.flush();
//! ```
//!
//! You can pass more than one command into the [queue](./macro.queue.html) macro like
//! `queue!(stdout, MoveTo(5, 5), Clear(ClearType::All))` and
//! they will be executed in the given order from left to right.
//!
//! #### Direct Execution
//!
//! For many applications it is not at all important to be efficient with 'flush' operations.
//! For this use case there is the `execute` operation.
//! This operation executes the command immediately, and calls the `flush` under water.
//!
//! You can pass a custom buffer implementing [std::io::Write][write] to this `execute` operation.
//! The commands will be executed on that buffer.
//! The most common buffer is [std::io::stdout][stdout] however, [std::io::stderr][stderr] is used sometimes as well.
//!
//! ##### Examples
//!
//! Functions:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{ExecutableCommand, cursor};
//!
//! let mut stdout = stdout();
//! stdout.execute(cursor::MoveTo(5,5));
//! ```
//! The [execute](./trait.ExecutableCommand.html) function returns itself, therefore you can use this to queue
//! another command. Like `stdout.queue(Goto(5,5)).queue(Clear(ClearType::All))`.
//!
//! Macros:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{execute, ExecutableCommand, cursor};
//!
//! let mut stdout = stdout();
//! execute!(stdout, cursor::MoveTo(5, 5));
//! ```
//! You can pass more than one command into the [execute](./macro.execute.html) macro like
//! `execute!(stdout, MoveTo(5, 5), Clear(ClearType::All))` and they will be executed in the given order from
//! left to right.
//!
//! ## Examples
//!
//! Print a rectangle colored with magenta and use both direct execution and lazy execution.
//!
//! Functions:
//!
//! ```no_run
//! use std::io::{stdout, Write};
//! use crossterm::{
//! ExecutableCommand, QueueableCommand,
//! terminal, cursor, style::{self, Colorize}, Result
//! };
//!
//! fn main() -> Result<()> {
//! let mut stdout = stdout();
//!
//! stdout.execute(terminal::Clear(terminal::ClearType::All))?;
//!
//! for y in 0..40 {
//! for x in 0..150 {
//! if (y == 0 || y == 40 - 1) || (x == 0 || x == 150 - 1) {
//! // in this loop we are more efficient by not flushing the buffer.
//! stdout
//! .queue(cursor::MoveTo(x,y))?
//! .queue(style::PrintStyledContent( "█".magenta()))?;
//! }
//! }
//! }
//! stdout.flush()?;
//! Ok(())
//! }
//! ```
//!
//! Macros:
//!
//! ```no_run
//! use std::io::{stdout, Write};
//! use crossterm::{
//! execute, queue,
//! style::{self, Colorize}, cursor, terminal, Result
//! };
//!
//! fn main() -> Result<()> {
//! let mut stdout = stdout();
//!
//! execute!(stdout, terminal::Clear(terminal::ClearType::All))?;
//!
//! for y in 0..40 {
//! for x in 0..150 {
//! if (y == 0 || y == 40 - 1) || (x == 0 || x == 150 - 1) {
//! // in this loop we are more efficient by not flushing the buffer.
//! queue!(stdout, cursor::MoveTo(x,y), style::PrintStyledContent( "█".magenta()))?;
//! }
//! }
//! }
//! stdout.flush()?;
//! Ok(())
//! }
//!```
//!
//! [write]: https://doc.rust-lang.org/std/io/trait.Write.html
//! [stdout]: https://doc.rust-lang.org/std/io/fn.stdout.html
//! [stderr]: https://doc.rust-lang.org/std/io/fn.stderr.html
//! [flush]: https://doc.rust-lang.org/std/io/trait.Write.html#tymethod.flush
pub use crate::{
ansi::Ansi,
command::{Command, ExecutableCommand, QueueableCommand},
error::{ErrorKind, Result},
};
/// A module to work with the terminal cursor
pub mod cursor;
/// A module to read events.
pub mod event;
/// A module to apply attributes and colors on your text.
pub mod style;
/// A module to work with the terminal.
pub mod terminal;
/// A module to query if the current instance is a tty.
pub mod tty;
mod ansi;
#[cfg(windows)]
/// A module that exposes one function to check if the current terminal supports ansi sequences.
pub mod ansi_support;
mod command;
mod error;
pub(crate) mod macros;
| 38.313953 | 149 | 0.658371 |
093f508762504468850631767df36e682743f314
| 1,544 |
mod sin;
mod saw;
use std::fmt;
pub enum Stock {
Sin = 0,
Saw,
}
impl Stock {
pub fn from_name(name: &str) -> Option<Stock> {
match name {
"sin" => Some(Stock::Sin),
"saw" => Some(Stock::Saw),
_ => None
}
}
}
pub struct Wavetable {
data: Vec<f64>,
}
impl fmt::Debug for Wavetable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Wavetable({})", self.data.len())
}
}
impl Clone for Wavetable {
fn clone(&self) -> Self { Wavetable { data: self.data.clone() } }
}
impl Default for Wavetable {
fn default() -> Self {
Wavetable { data: sin::LUT.to_vec() }
}
}
impl Wavetable {
pub fn new(data: Vec<f64>) -> Wavetable {
Wavetable {
data: data
}
}
pub fn from_stock(stock: Stock) -> Wavetable {
match stock {
Stock::Sin => Wavetable { data: sin::LUT.to_vec() },
Stock::Saw => Wavetable { data: saw::LUT.to_vec() },
}
}
pub fn size(&self) -> usize {
return self.data.len();
}
pub fn value(&self, offset: f64) -> f64 {
let data_len = self.data.len();
let pos: usize = offset.floor() as usize;
assert!(pos < data_len);
let value = self.data[pos];
let next_pos: usize = (pos + 1) % data_len;
let next_value = self.data[next_pos];
let diff = next_value - value;
let fraction = offset - (pos as f64);
return value + diff * fraction;
}
}
| 21.150685 | 69 | 0.51943 |
79424fec7ea7b4d146e26ed548e138e2b6fa6743
| 830 |
mod connection;
#[cfg(feature = "mssql")]
mod mssql;
mod mysql;
mod postgresql;
mod sqlite;
mod transaction;
pub(crate) mod operations;
use async_trait::async_trait;
use connector_interface::{error::ConnectorError, Connector};
use datamodel::Datasource;
#[cfg(feature = "mssql")]
pub use mssql::*;
pub use mysql::*;
pub use postgresql::*;
pub use sqlite::*;
#[async_trait]
pub trait FromSource {
async fn from_source(source: &Datasource) -> connector_interface::Result<Self>
where
Self: Connector + Sized;
}
async fn catch<O>(
connection_info: &quaint::prelude::ConnectionInfo,
fut: impl std::future::Future<Output = Result<O, crate::SqlError>>,
) -> Result<O, ConnectorError> {
match fut.await {
Ok(o) => Ok(o),
Err(err) => Err(err.into_connector_error(connection_info)),
}
}
| 22.432432 | 82 | 0.684337 |
acda60636c9b027fc7d22c4f087c1004308167bb
| 1,788 |
use diesel;
use rocket;
use utils;
use rocket::request::FromRequest;
use std::ops::Deref;
/// Initializes a database pool.
pub fn init_pool(
config: &utils::types::Settings,
) -> diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::sqlite::SqliteConnection>> {
let database_url = config.database_url.clone();
let manager =
diesel::r2d2::ConnectionManager::<diesel::sqlite::SqliteConnection>::new(database_url);
diesel::r2d2::Pool::builder()
.max_size(1)
.build(manager)
.expect("Database pool")
}
// Connection request guard type: a wrapper around an r2d2 pooled connection.
pub struct DbConn(
pub diesel::r2d2::PooledConnection<
diesel::r2d2::ConnectionManager<diesel::sqlite::SqliteConnection>,
>,
);
/// Attempts to retrieve a single connection from the managed database pool. If
/// no pool is currently managed, fails with an `InternalServerError` status. If
/// no connections are available, fails with a `ServiceUnavailable` status.
impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
type Error = ();
fn from_request(
request: &'a rocket::request::Request<'r>,
) -> rocket::request::Outcome<Self, Self::Error> {
let pool = request.guard::<rocket::State<
diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::sqlite::SqliteConnection>>,
>>()?;
match pool.get() {
Ok(conn) => rocket::Outcome::Success(DbConn(conn)),
Err(_) => rocket::Outcome::Failure((rocket::http::Status::ServiceUnavailable, ())),
}
}
}
// For the convenience of using an &DbConn as an &SqliteConnection.
impl Deref for DbConn {
type Target = diesel::sqlite::SqliteConnection;
fn deref(&self) -> &Self::Target {
&self.0
}
}
| 32.509091 | 98 | 0.65604 |
6996aa3c0941500e36f3d182bf3a2915875689cf
| 11,852 |
//! @brief Example Rust-based BPF program that issues a cross-program-invocation
#![cfg(feature = "program")]
use crate::instruction::*;
use analog_program::{
account_info::AccountInfo,
bpf_loader, entrypoint,
entrypoint::{ProgramResult, MAX_PERMITTED_DATA_INCREASE},
msg,
program::{get_return_data, invoke, invoke_signed, set_return_data},
program_error::ProgramError,
pubkey::Pubkey,
system_instruction,
};
entrypoint!(process_instruction);
#[allow(clippy::cognitive_complexity)]
fn process_instruction(
program_id: &Pubkey,
accounts: &[AccountInfo],
instruction_data: &[u8],
) -> ProgramResult {
msg!("Invoked program");
if instruction_data.is_empty() {
return Ok(());
}
assert_eq!(get_return_data(), None);
match instruction_data[0] {
VERIFY_TRANSLATIONS => {
msg!("verify data translations");
const ARGUMENT_INDEX: usize = 0;
const INVOKED_ARGUMENT_INDEX: usize = 1;
const INVOKED_PROGRAM_INDEX: usize = 2;
const INVOKED_PROGRAM_DUP_INDEX: usize = 3;
assert_eq!(&instruction_data[1..], &[1, 2, 3, 4, 5]);
assert_eq!(accounts.len(), 4);
assert_eq!(accounts[ARGUMENT_INDEX].tocks(), 42);
assert_eq!(accounts[ARGUMENT_INDEX].data_len(), 100);
assert!(accounts[ARGUMENT_INDEX].is_signer);
assert!(accounts[ARGUMENT_INDEX].is_writable);
assert_eq!(accounts[ARGUMENT_INDEX].rent_epoch, 0);
assert!(!accounts[ARGUMENT_INDEX].executable);
{
let data = accounts[ARGUMENT_INDEX].try_borrow_data()?;
for i in 0..100 {
assert_eq!(data[i as usize], i);
}
}
assert_eq!(
accounts[INVOKED_ARGUMENT_INDEX].owner,
accounts[INVOKED_PROGRAM_INDEX].key
);
assert_eq!(accounts[INVOKED_ARGUMENT_INDEX].tocks(), 10);
assert_eq!(accounts[INVOKED_ARGUMENT_INDEX].data_len(), 10);
assert!(accounts[INVOKED_ARGUMENT_INDEX].is_signer);
assert!(accounts[INVOKED_ARGUMENT_INDEX].is_writable);
assert_eq!(accounts[INVOKED_ARGUMENT_INDEX].rent_epoch, 0);
assert!(!accounts[INVOKED_ARGUMENT_INDEX].executable);
assert_eq!(accounts[INVOKED_PROGRAM_INDEX].key, program_id);
assert_eq!(accounts[INVOKED_PROGRAM_INDEX].owner, &bpf_loader::id());
assert!(!accounts[INVOKED_PROGRAM_INDEX].is_signer);
assert!(!accounts[INVOKED_PROGRAM_INDEX].is_writable);
assert_eq!(accounts[INVOKED_PROGRAM_INDEX].rent_epoch, 0);
assert!(accounts[INVOKED_PROGRAM_INDEX].executable);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].key,
accounts[INVOKED_PROGRAM_DUP_INDEX].key
);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].owner,
accounts[INVOKED_PROGRAM_DUP_INDEX].owner
);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].tocks,
accounts[INVOKED_PROGRAM_DUP_INDEX].tocks
);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].is_signer,
accounts[INVOKED_PROGRAM_DUP_INDEX].is_signer
);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].is_writable,
accounts[INVOKED_PROGRAM_DUP_INDEX].is_writable
);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].rent_epoch,
accounts[INVOKED_PROGRAM_DUP_INDEX].rent_epoch
);
assert_eq!(
accounts[INVOKED_PROGRAM_INDEX].executable,
accounts[INVOKED_PROGRAM_DUP_INDEX].executable
);
{
let data = accounts[INVOKED_PROGRAM_INDEX].try_borrow_data()?;
assert!(accounts[INVOKED_PROGRAM_DUP_INDEX]
.try_borrow_mut_data()
.is_err());
msg!(data[0], 0, 0, 0, 0);
}
}
RETURN_OK => {
msg!("Ok");
return Ok(());
}
RETURN_ERROR => {
msg!("return error");
return Err(ProgramError::Custom(42));
}
DERIVED_SIGNERS => {
msg!("verify derived signers");
const INVOKED_PROGRAM_INDEX: usize = 0;
const DERIVED_KEY1_INDEX: usize = 1;
const DERIVED_KEY2_INDEX: usize = 2;
const DERIVED_KEY3_INDEX: usize = 3;
assert!(accounts[DERIVED_KEY1_INDEX].is_signer);
assert!(!accounts[DERIVED_KEY2_INDEX].is_signer);
assert!(!accounts[DERIVED_KEY3_INDEX].is_signer);
let bump_seed2 = instruction_data[1];
let bump_seed3 = instruction_data[2];
let invoked_instruction = create_instruction(
*accounts[INVOKED_PROGRAM_INDEX].key,
&[
(accounts[DERIVED_KEY1_INDEX].key, true, false),
(accounts[DERIVED_KEY2_INDEX].key, true, true),
(accounts[DERIVED_KEY3_INDEX].key, false, true),
],
vec![VERIFY_NESTED_SIGNERS],
);
invoke_signed(
&invoked_instruction,
accounts,
&[
&[b"Lil'", b"Bits", &[bump_seed2]],
&[accounts[DERIVED_KEY2_INDEX].key.as_ref(), &[bump_seed3]],
],
)?;
}
VERIFY_NESTED_SIGNERS => {
msg!("verify nested derived signers");
const DERIVED_KEY1_INDEX: usize = 0;
const DERIVED_KEY2_INDEX: usize = 1;
const DERIVED_KEY3_INDEX: usize = 2;
assert!(!accounts[DERIVED_KEY1_INDEX].is_signer);
assert!(accounts[DERIVED_KEY2_INDEX].is_signer);
assert!(accounts[DERIVED_KEY3_INDEX].is_signer);
}
VERIFY_WRITER => {
msg!("verify writable");
const ARGUMENT_INDEX: usize = 0;
assert!(!accounts[ARGUMENT_INDEX].is_writable);
}
VERIFY_PRIVILEGE_ESCALATION => {
msg!("Verify privilege escalation");
}
VERIFY_PRIVILEGE_DEESCALATION => {
msg!("verify privilege deescalation");
const INVOKED_ARGUMENT_INDEX: usize = 0;
assert!(!accounts[INVOKED_ARGUMENT_INDEX].is_signer);
assert!(!accounts[INVOKED_ARGUMENT_INDEX].is_writable);
}
VERIFY_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER => {
msg!("verify privilege deescalation escalation signer");
const INVOKED_PROGRAM_INDEX: usize = 0;
const INVOKED_ARGUMENT_INDEX: usize = 1;
assert!(!accounts[INVOKED_ARGUMENT_INDEX].is_signer);
assert!(!accounts[INVOKED_ARGUMENT_INDEX].is_writable);
let invoked_instruction = create_instruction(
*accounts[INVOKED_PROGRAM_INDEX].key,
&[(accounts[INVOKED_ARGUMENT_INDEX].key, true, false)],
vec![VERIFY_PRIVILEGE_ESCALATION],
);
invoke(&invoked_instruction, accounts)?;
}
VERIFY_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE => {
msg!("verify privilege deescalation escalation writable");
const INVOKED_PROGRAM_INDEX: usize = 0;
const INVOKED_ARGUMENT_INDEX: usize = 1;
assert!(!accounts[INVOKED_ARGUMENT_INDEX].is_signer);
assert!(!accounts[INVOKED_ARGUMENT_INDEX].is_writable);
let invoked_instruction = create_instruction(
*accounts[INVOKED_PROGRAM_INDEX].key,
&[(accounts[INVOKED_ARGUMENT_INDEX].key, false, true)],
vec![VERIFY_PRIVILEGE_ESCALATION],
);
invoke(&invoked_instruction, accounts)?;
}
NESTED_INVOKE => {
msg!("nested invoke");
const ARGUMENT_INDEX: usize = 0;
const INVOKED_ARGUMENT_INDEX: usize = 1;
const INVOKED_PROGRAM_INDEX: usize = 2;
assert!(accounts[INVOKED_ARGUMENT_INDEX].is_signer);
assert!(instruction_data.len() > 1);
**accounts[INVOKED_ARGUMENT_INDEX].tocks.borrow_mut() -= 1;
**accounts[ARGUMENT_INDEX].tocks.borrow_mut() += 1;
let remaining_invokes = instruction_data[1];
if remaining_invokes > 1 {
msg!("Invoke again");
let invoked_instruction = create_instruction(
*accounts[INVOKED_PROGRAM_INDEX].key,
&[
(accounts[ARGUMENT_INDEX].key, true, true),
(accounts[INVOKED_ARGUMENT_INDEX].key, true, true),
(accounts[INVOKED_PROGRAM_INDEX].key, false, false),
],
vec![NESTED_INVOKE, remaining_invokes - 1],
);
invoke(&invoked_instruction, accounts)?;
} else {
msg!("Last invoked");
{
let mut data = accounts[INVOKED_ARGUMENT_INDEX].try_borrow_mut_data()?;
for i in 0..10 {
data[i as usize] = i;
}
}
}
}
WRITE_ACCOUNT => {
msg!("write account");
const ARGUMENT_INDEX: usize = 0;
for i in 0..instruction_data[1] {
accounts[ARGUMENT_INDEX].data.borrow_mut()[i as usize] = instruction_data[1];
}
}
CREATE_AND_INIT => {
msg!("Create and init data");
{
const FROM_INDEX: usize = 0;
const DERIVED_KEY2_INDEX: usize = 1;
let from_tocks = accounts[FROM_INDEX].tocks();
let to_tocks = accounts[DERIVED_KEY2_INDEX].tocks();
assert_eq!(accounts[DERIVED_KEY2_INDEX].data_len(), 0);
assert!(analog_program::system_program::check_id(
accounts[DERIVED_KEY2_INDEX].owner
));
let bump_seed2 = instruction_data[1];
let instruction = system_instruction::create_account(
accounts[FROM_INDEX].key,
accounts[DERIVED_KEY2_INDEX].key,
1,
MAX_PERMITTED_DATA_INCREASE as u64,
program_id,
);
invoke_signed(
&instruction,
accounts,
&[&[b"Lil'", b"Bits", &[bump_seed2]]],
)?;
assert_eq!(accounts[FROM_INDEX].tocks(), from_tocks - 1);
assert_eq!(accounts[DERIVED_KEY2_INDEX].tocks(), to_tocks + 1);
assert_eq!(program_id, accounts[DERIVED_KEY2_INDEX].owner);
assert_eq!(
accounts[DERIVED_KEY2_INDEX].data_len(),
MAX_PERMITTED_DATA_INCREASE
);
let mut data = accounts[DERIVED_KEY2_INDEX].try_borrow_mut_data()?;
assert_eq!(data[0], 0);
data[0] = 0x0e;
assert_eq!(data[0], 0x0e);
assert_eq!(data[MAX_PERMITTED_DATA_INCREASE - 1], 0);
data[MAX_PERMITTED_DATA_INCREASE - 1] = 0x0f;
assert_eq!(data[MAX_PERMITTED_DATA_INCREASE - 1], 0x0f);
for i in 1..20 {
data[i] = i as u8;
}
}
}
SET_RETURN_DATA => {
msg!("Set return data");
set_return_data(b"Set by invoked");
}
_ => panic!(),
}
Ok(())
}
| 39.375415 | 93 | 0.550118 |
f423a6779ad6a992b8389418d7a8a97656eb155a
| 19,826 |
use std::cmp;
use std::collections::HashMap;
use std::env;
use std::mem;
use anyhow::{anyhow, bail, Error};
use walrus::ir::Value;
use walrus::{DataId, FunctionId, InitExpr, ValType};
use walrus::{ExportItem, GlobalId, GlobalKind, ImportKind, MemoryId, Module};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if !self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let zero = InitExpr::Value(Value::I32(0));
let globals = Globals {
thread_id: module.globals.add_local(ValType::I32, true, zero),
thread_tcb: module.globals.add_local(ValType::I32, true, zero),
};
// There was an "inflection point" at the LLVM 9 release where LLD
// started having better support for producing binaries capable of being
// used with multi-threading. Prior to LLVM 9 (e.g. nightly releases
// before July 2019 basically) we had to sort of paper over a lot of
// support that hadn't been added to LLD. With LLVM 9 and onwards though
// we expect Rust binaries to be pretty well formed if prepared for
// threading when they come out of LLD. This `if` statement basically
// switches on these two cases, figuring out if we're "old style" or
// "new style".
let mem = module.memories.get_mut(memory);
let memory_init = if mem.shared {
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
InitMemory::Call {
wasm_init_memory: delete_synthetic_func(module, "__wasm_init_memory")?,
wasm_init_tls: delete_synthetic_func(module, "__wasm_init_tls")?,
tls_size: delete_synthetic_global(module, "__tls_size")?,
}
} else {
update_memory(module, memory, self.maximum_memory)?;
InitMemory::Segments(switch_data_segments_to_passive(module, memory)?)
};
inject_start(
module,
memory_init,
&globals,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
implement_thread_intrinsics(module, &globals)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
}
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
struct PassiveSegment {
id: DataId,
offset: InitExpr,
len: u32,
}
fn switch_data_segments_to_passive(
module: &mut Module,
memory: MemoryId,
) -> Result<Vec<PassiveSegment>, Error> {
let mut ret = Vec::new();
let memory = module.memories.get_mut(memory);
for id in mem::replace(&mut memory.data_segments, Default::default()) {
let data = module.data.get_mut(id);
let kind = match &data.kind {
walrus::DataKind::Active(kind) => kind,
walrus::DataKind::Passive => continue,
};
let offset = match kind.location {
walrus::ActiveDataLocation::Absolute(n) => {
walrus::InitExpr::Value(walrus::ir::Value::I32(n as i32))
}
walrus::ActiveDataLocation::Relative(global) => walrus::InitExpr::Global(global),
};
data.kind = walrus::DataKind::Passive;
ret.push(PassiveSegment {
id,
offset,
len: data.value.len() as u32,
});
}
Ok(ret)
}
fn update_memory(module: &mut Module, memory: MemoryId, max: u32) -> Result<MemoryId, Error> {
assert!(max % PAGE_SIZE == 0);
let memory = module.memories.get_mut(memory);
// For multithreading if we want to use the exact same module on all
// threads we'll need to be sure to import memory, so switch it to an
// import if it's already here.
if memory.import.is_none() {
let id = module
.imports
.add("env", "memory", ImportKind::Memory(memory.id()));
memory.import = Some(id);
}
// If the memory isn't already shared, make it so as that's the whole point
// here!
if !memory.shared {
memory.shared = true;
if memory.maximum.is_none() {
memory.maximum = Some(max / PAGE_SIZE);
}
}
Ok(memory.id())
}
struct Globals {
thread_id: GlobalId,
thread_tcb: GlobalId,
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty != ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) & !(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE != address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
enum InitMemory {
Segments(Vec<PassiveSegment>),
Call {
wasm_init_memory: walrus::FunctionId,
wasm_init_tls: walrus::FunctionId,
tls_size: u32,
},
}
fn inject_start(
module: &mut Module,
memory_init: InitMemory,
globals: &Globals,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.local_tee(local)
.global_set(globals.thread_id);
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.local_get(local);
body.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread ID is zero then we can skip the update of the stack
// pointer as we know our stack pointer is valid. We need to initialize
// memory, however, so do that here.
|body| {
match &memory_init {
InitMemory::Segments(segments) => {
for segment in segments {
// let zero = block.i32_const(0);
match segment.offset {
InitExpr::Global(id) => body.global_get(id),
InitExpr::Value(v) => body.const_(v),
InitExpr::RefNull(_) | InitExpr::RefFunc(_) => {
panic!("not a valid i32 initializer")
}
};
body.i32_const(0)
.i32_const(segment.len as i32)
.memory_init(memory, segment.id)
.data_drop(segment.id);
}
}
InitMemory::Call {
wasm_init_memory, ..
} => {
body.call(*wasm_init_memory);
}
}
},
);
// If we have these globals then we're using the new thread local system
// implemented in LLVM, which means that `__wasm_init_tls` needs to be
// called with a chunk of memory `tls_size` bytes big to set as the threads
// thread-local data block.
if let InitMemory::Call {
wasm_init_tls,
tls_size,
..
} = memory_init
{
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls_size as i32)
.call(malloc)
.call(wasm_init_tls);
}
// If a start function previously existed we're done with our own
// initialization so delegate to them now.
if let Some(id) = module.start.take() {
body.call(id);
}
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
// ... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
}
fn implement_thread_intrinsics(module: &mut Module, globals: &Globals) -> Result<(), Error> {
use walrus::ir::*;
let mut map = HashMap::new();
enum Intrinsic {
GetThreadId,
GetTcb,
SetTcb,
}
let imports = module
.imports
.iter()
.filter(|i| i.module == "__wbindgen_thread_xform__");
for import in imports {
let function = match import.kind {
ImportKind::Function(id) => module.funcs.get(id),
_ => bail!("non-function import from special module"),
};
let ty = module.types.get(function.ty());
match &import.name[..] {
"__wbindgen_current_id" => {
if !ty.params().is_empty() || ty.results() != &[ValType::I32] {
bail!("`__wbindgen_current_id` intrinsic has the wrong signature");
}
map.insert(function.id(), Intrinsic::GetThreadId);
}
"__wbindgen_tcb_get" => {
if !ty.params().is_empty() || ty.results() != &[ValType::I32] {
bail!("`__wbindgen_tcb_get` intrinsic has the wrong signature");
}
map.insert(function.id(), Intrinsic::GetTcb);
}
"__wbindgen_tcb_set" => {
if !ty.results().is_empty() || ty.params() != &[ValType::I32] {
bail!("`__wbindgen_tcb_set` intrinsic has the wrong signature");
}
map.insert(function.id(), Intrinsic::SetTcb);
}
other => bail!("unknown thread intrinsic: {}", other),
}
}
struct Visitor<'a> {
map: &'a HashMap<FunctionId, Intrinsic>,
globals: &'a Globals,
}
module.funcs.iter_local_mut().for_each(|(_id, func)| {
let entry = func.entry_block();
dfs_pre_order_mut(&mut Visitor { map: &map, globals }, func, entry);
});
impl VisitorMut for Visitor<'_> {
fn visit_instr_mut(&mut self, instr: &mut Instr, _loc: &mut InstrLocId) {
let call = match instr {
Instr::Call(e) => e,
_ => return,
};
match self.map.get(&call.func) {
Some(Intrinsic::GetThreadId) => {
*instr = GlobalGet {
global: self.globals.thread_id,
}
.into();
}
Some(Intrinsic::GetTcb) => {
*instr = GlobalGet {
global: self.globals.thread_tcb,
}
.into();
}
Some(Intrinsic::SetTcb) => {
*instr = GlobalSet {
global: self.globals.thread_tcb,
}
.into();
}
None => {}
}
}
}
Ok(())
}
| 35.851718 | 94 | 0.565772 |
e6ec342787b3d55c1b8cb150a05204242847cd8d
| 10,371 |
#![cfg(feature = "test-bpf")]
mod program_test;
use solana_program_test::tokio;
use program_test::*;
use spl_governance::error::GovernanceError;
#[tokio::test]
async fn test_insert_instruction() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
// Act
let proposal_instruction_cookie = governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.unwrap();
// Assert
let proposal_instruction_account = governance_test
.get_proposal_instruction_account(&proposal_instruction_cookie.address)
.await;
assert_eq!(
proposal_instruction_cookie.account,
proposal_instruction_account
);
let proposal_account = governance_test
.get_proposal_account(&proposal_cookie.address)
.await;
assert_eq!(proposal_account.instructions_count, 1);
assert_eq!(proposal_account.instructions_next_index, 1);
assert_eq!(proposal_account.instructions_executed_count, 0);
}
#[tokio::test]
async fn test_insert_multiple_instructions() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
// Act
governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.unwrap();
governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.unwrap();
// Assert
let proposal_account = governance_test
.get_proposal_account(&proposal_cookie.address)
.await;
assert_eq!(proposal_account.instructions_count, 2);
assert_eq!(proposal_account.instructions_next_index, 2);
assert_eq!(proposal_account.instructions_executed_count, 0);
}
#[tokio::test]
async fn test_insert_instruction_with_invalid_index_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
// Act
let err = governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, Some(1))
.await
.err()
.unwrap();
// Assert
assert_eq!(err, GovernanceError::InvalidInstructionIndex.into());
}
#[tokio::test]
async fn test_insert_instruction_with_instruction_already_exists_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.unwrap();
// Act
let err = governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, Some(0))
.await
.err()
.unwrap();
// Assert
assert_eq!(err, GovernanceError::InstructionAlreadyExists.into());
}
#[tokio::test]
async fn test_insert_instruction_with_invalid_hold_up_time_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut config = governance_test.get_default_governance_config();
config.min_instruction_hold_up_time = 100;
let mut account_governance_cookie = governance_test
.with_account_governance_using_config(&realm_cookie, &governed_account_cookie, &config)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
// Act
let err = governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.err()
.unwrap();
// Assert
assert_eq!(
err,
GovernanceError::InstructionHoldUpTimeBelowRequiredMin.into()
);
}
#[tokio::test]
async fn test_insert_instruction_with_not_editable_proposal_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_signed_off_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
// Act
let err = governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.err()
.unwrap();
// Assert
assert_eq!(
err,
GovernanceError::InvalidStateCannotEditInstructions.into()
);
}
#[tokio::test]
async fn test_insert_instruction_with_owner_or_delegate_must_sign_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let mut token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
let token_owner_record_cookie2 = governance_test
.with_council_token_deposit(&realm_cookie)
.await;
token_owner_record_cookie.token_owner = token_owner_record_cookie2.token_owner;
// Act
let err = governance_test
.with_nop_instruction(&mut proposal_cookie, &token_owner_record_cookie, None)
.await
.err()
.unwrap();
// Assert
assert_eq!(
err,
GovernanceError::GoverningTokenOwnerOrDelegateMustSign.into()
);
}
#[tokio::test]
async fn test_insert_instruction_with_invalid_governance_for_proposal_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_account_cookie = governance_test.with_governed_account().await;
let mut account_governance_cookie = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie)
.await
.unwrap();
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let mut proposal_cookie = governance_test
.with_proposal(&token_owner_record_cookie, &mut account_governance_cookie)
.await
.unwrap();
// Try to maliciously use a different governance account to use with the proposal
let governed_account_cookie2 = governance_test.with_governed_account().await;
let account_governance_cookie2 = governance_test
.with_account_governance(&realm_cookie, &governed_account_cookie2)
.await
.unwrap();
proposal_cookie.account.governance = account_governance_cookie2.address;
let new_governance_config = governance_test.get_default_governance_config();
// Act
let err = governance_test
.with_set_governance_config_instruction(
&mut proposal_cookie,
&token_owner_record_cookie,
&new_governance_config,
)
.await
.err()
.unwrap();
// Assert
assert_eq!(err, GovernanceError::InvalidGovernanceForProposal.into());
}
| 30.683432 | 95 | 0.710828 |
18aefa62765d8703a25745f1001f74cc4380b701
| 4,943 |
//! This crate defines a set of simple traits used to define functionality of
//! [block ciphers][1].
//!
//! # About block ciphers
//!
//! Block ciphers are keyed, deterministic permutations of a fixed-sized input
//! "block" providing a reversible transformation to/from an encrypted output.
//! They are one of the fundamental structural components of [symmetric cryptography][2].
//!
//! [1]: https://en.wikipedia.org/wiki/Block_cipher
//! [2]: https://en.wikipedia.org/wiki/Symmetric-key_algorithm
#![no_std]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo_small.png")]
#![forbid(unsafe_code)]
#![warn(missing_docs, rust_2018_idioms)]
#[cfg(feature = "std")]
extern crate std;
#[cfg(feature = "dev")]
#[cfg_attr(docsrs, doc(cfg(feature = "dev")))]
pub mod dev;
mod errors;
pub use crate::errors::InvalidKeyLength;
pub use generic_array::{self, typenum::consts};
use generic_array::typenum::Unsigned;
use generic_array::{ArrayLength, GenericArray};
/// Key for an algorithm that implements [`NewBlockCipher`].
pub type Key<B> = GenericArray<u8, <B as NewBlockCipher>::KeySize>;
/// Block on which a [`BlockCipher`] operates.
pub type Block<B> = GenericArray<u8, <B as BlockCipher>::BlockSize>;
/// Blocks being acted over in parallel.
pub type ParBlocks<B> = GenericArray<Block<B>, <B as BlockCipher>::ParBlocks>;
/// Instantiate a [`BlockCipher`] algorithm.
pub trait NewBlockCipher: Sized {
/// Key size in bytes with which cipher guaranteed to be initialized.
type KeySize: ArrayLength<u8>;
/// Create new block cipher instance from key with fixed size.
fn new(key: &Key<Self>) -> Self;
/// Create new block cipher instance from key with variable size.
///
/// Default implementation will accept only keys with length equal to
/// `KeySize`, but some ciphers can accept range of key lengths.
fn new_varkey(key: &[u8]) -> Result<Self, InvalidKeyLength> {
if key.len() != Self::KeySize::to_usize() {
Err(InvalidKeyLength)
} else {
Ok(Self::new(GenericArray::from_slice(key)))
}
}
}
/// The trait which defines in-place encryption and decryption
/// over single block or several blocks in parallel.
pub trait BlockCipher {
/// Size of the block in bytes
type BlockSize: ArrayLength<u8>;
/// Number of blocks which can be processed in parallel by
/// cipher implementation
type ParBlocks: ArrayLength<Block<Self>>;
/// Encrypt block in-place
fn encrypt_block(&self, block: &mut Block<Self>);
/// Decrypt block in-place
fn decrypt_block(&self, block: &mut Block<Self>);
/// Encrypt several blocks in parallel using instruction level parallelism
/// if possible.
///
/// If `ParBlocks` equals to 1 it's equivalent to `encrypt_block`.
#[inline]
fn encrypt_blocks(&self, blocks: &mut ParBlocks<Self>) {
for block in blocks.iter_mut() {
self.encrypt_block(block);
}
}
/// Decrypt several blocks in parallel using instruction level parallelism
/// if possible.
///
/// If `ParBlocks` equals to 1 it's equivalent to `decrypt_block`.
#[inline]
fn decrypt_blocks(&self, blocks: &mut ParBlocks<Self>) {
for block in blocks.iter_mut() {
self.decrypt_block(block);
}
}
}
/// Stateful block cipher which permits `&mut self` access.
///
/// The main use case for this trait is hardware encryption engines which
/// require `&mut self` access to an underlying hardware peripheral.
pub trait BlockCipherMut {
/// Size of the block in bytes
type BlockSize: ArrayLength<u8>;
/// Encrypt block in-place
fn encrypt_block(&mut self, block: &mut GenericArray<u8, Self::BlockSize>);
/// Decrypt block in-place
fn decrypt_block(&mut self, block: &mut GenericArray<u8, Self::BlockSize>);
}
impl<Alg: BlockCipher> BlockCipherMut for Alg {
type BlockSize = Alg::BlockSize;
#[inline]
fn encrypt_block(&mut self, block: &mut GenericArray<u8, Self::BlockSize>) {
<Self as BlockCipher>::encrypt_block(self, block);
}
#[inline]
fn decrypt_block(&mut self, block: &mut GenericArray<u8, Self::BlockSize>) {
<Self as BlockCipher>::decrypt_block(self, block);
}
}
impl<Alg: BlockCipher> BlockCipher for &Alg {
type BlockSize = Alg::BlockSize;
type ParBlocks = Alg::ParBlocks;
#[inline]
fn encrypt_block(&self, block: &mut Block<Self>) {
Alg::encrypt_block(self, block);
}
#[inline]
fn decrypt_block(&self, block: &mut Block<Self>) {
Alg::decrypt_block(self, block);
}
#[inline]
fn encrypt_blocks(&self, blocks: &mut ParBlocks<Self>) {
Alg::encrypt_blocks(self, blocks);
}
#[inline]
fn decrypt_blocks(&self, blocks: &mut ParBlocks<Self>) {
Alg::decrypt_blocks(self, blocks);
}
}
| 31.685897 | 98 | 0.669027 |
914985be53565cc4c454102dfd21ca111f8019dd
| 2,945 |
use geo::Pos;
use dice;
use std::cell::Cell;
use std::num::FromPrimitive;
pub mod behavior;
pub fn rnd() -> Mob {
let rnd = dice::rand(1, 4);
let rx = dice::rand(1, 80); //FIXME
let ry = dice::rand(1, 50);
match FromPrimitive::from_int(rnd as isize) {
Some(kind) => match kind {
Kind::Canine => Mob {
name: "Fido".to_string(),
pos: Cell::new(Pos(rx, ry)),
kind: kind,
hp: Cell::new(7), str: 2,
ap: 1, isize: 7, con: 7, dex: 7, display_char: kind.to_char(), behavior: behavior::Kind::Animalic
},
Kind::Hobgoblin => Mob {
name: "Gardhur".to_string(),
pos: Cell::new(Pos(rx, ry)),
kind: kind,
hp: Cell::new(12), str: 5,
ap: 1, isize: 7, con: 7, dex: 7, display_char: kind.to_char(), behavior: behavior::Kind::Animalic
},
Kind::Orc => Mob {
name: "Gardhur".to_string(),
pos: Cell::new(Pos(rx, ry)),
kind: kind,
hp: Cell::new(15), str: 3,
ap: 1, isize: 7, con: 7, dex: 7, display_char: kind.to_char(), behavior: behavior::Kind::Animalic
},
_ => panic!("Can't spawn a hero")
},
None => panic!("Invalid monster kind")
}
}
#[allow(dead_code)]
#[derive(Clone, Show)]
pub struct Mob {
pub pos: Cell<Pos>,
pub display_char: char,
pub name: String,
pub kind: Kind,
pub str: u32,
isize: u32,
con: u32,
dex: u32,
ap: u32,
pub hp: Cell<u32>,
pub behavior: behavior::Kind
}
impl<'a> Mob {
pub fn new(name: &'a str, kind: Kind, x: i32, y: i32, behavior: behavior::Kind) -> Mob {
Mob {
name: name.to_string(),
pos: Cell::new(Pos(x, y)),
kind: kind,
ap: 1,
hp: Cell::new(20),
str: 7,
isize: 7,
con: 7,
dex: 7,
display_char: kind.to_char(),
behavior: behavior
}
}
pub fn pos(&self) -> Pos {
self.pos.get()
}
pub fn goto(&self, pos: Pos) {
self.pos.set(pos);
}
pub fn inc_hp(&self, inc: u32) {
self.hp.set(self.hp.get() + inc);
}
pub fn dec_hp(&self, dec: u32) {
let hp = self.hp.get();
if dec > hp {
self.hp.set(0)
} else {
self.hp.set(hp - dec);
}
}
}
#[derive(Copy, Clone, Show, PartialEq, FromPrimitive)]
pub enum Kind {
Hero, //= '@' as isize,
Canine, //= 'C' as isize,
Hobgoblin, //= 'h' as isize
Orc
}
impl Kind {
pub fn to_char(&self) -> char {
match *self {
Kind::Hero => '@',
Kind::Canine => 'C',
Kind::Hobgoblin => 'h',
Kind::Orc => 'O'
}
}
}
| 24.957627 | 113 | 0.45365 |
fc352e485a8bebce885af5436ca1dca1f73085ae
| 540,059 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>Properties describing a Realtime script.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateScript</a> |
/// <a>ListScripts</a> |
/// <a>DescribeScript</a> |
/// <a>UpdateScript</a> |
/// <a>DeleteScript</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Script {
/// <p>A unique identifier for the Realtime script</p>
pub script_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the
/// <i>ScriptId</i> value.</p>
pub script_arn: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with a script. Script names do not need to be unique.</p>
pub name: std::option::Option<std::string::String>,
/// <p>Version information that is associated with a build or script. Version strings do not need to be unique.</p>
pub version: std::option::Option<std::string::String>,
/// <p>The file size of the uploaded Realtime script, expressed in bytes. When files are
/// uploaded from an S3 location, this value remains at "0".</p>
pub size_on_disk: std::option::Option<i64>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>The location in Amazon S3 where build or script files are stored for access by Amazon GameLift. This
/// location is specified in <a>CreateBuild</a>, <a>CreateScript</a>,
/// and <a>UpdateScript</a> requests. </p>
pub storage_location: std::option::Option<crate::model::S3Location>,
}
impl std::fmt::Debug for Script {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Script");
formatter.field("script_id", &self.script_id);
formatter.field("script_arn", &self.script_arn);
formatter.field("name", &self.name);
formatter.field("version", &self.version);
formatter.field("size_on_disk", &self.size_on_disk);
formatter.field("creation_time", &self.creation_time);
formatter.field("storage_location", &self.storage_location);
formatter.finish()
}
}
/// See [`Script`](crate::model::Script)
pub mod script {
/// A builder for [`Script`](crate::model::Script)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) script_id: std::option::Option<std::string::String>,
pub(crate) script_arn: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) version: std::option::Option<std::string::String>,
pub(crate) size_on_disk: std::option::Option<i64>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) storage_location: std::option::Option<crate::model::S3Location>,
}
impl Builder {
/// <p>A unique identifier for the Realtime script</p>
pub fn script_id(mut self, input: impl Into<std::string::String>) -> Self {
self.script_id = Some(input.into());
self
}
pub fn set_script_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.script_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the
/// <i>ScriptId</i> value.</p>
pub fn script_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.script_arn = Some(input.into());
self
}
pub fn set_script_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.script_arn = input;
self
}
/// <p>A descriptive label that is associated with a script. Script names do not need to be unique.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>Version information that is associated with a build or script. Version strings do not need to be unique.</p>
pub fn version(mut self, input: impl Into<std::string::String>) -> Self {
self.version = Some(input.into());
self
}
pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version = input;
self
}
/// <p>The file size of the uploaded Realtime script, expressed in bytes. When files are
/// uploaded from an S3 location, this value remains at "0".</p>
pub fn size_on_disk(mut self, input: i64) -> Self {
self.size_on_disk = Some(input);
self
}
pub fn set_size_on_disk(mut self, input: std::option::Option<i64>) -> Self {
self.size_on_disk = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>The location in Amazon S3 where build or script files are stored for access by Amazon GameLift. This
/// location is specified in <a>CreateBuild</a>, <a>CreateScript</a>,
/// and <a>UpdateScript</a> requests. </p>
pub fn storage_location(mut self, input: crate::model::S3Location) -> Self {
self.storage_location = Some(input);
self
}
pub fn set_storage_location(
mut self,
input: std::option::Option<crate::model::S3Location>,
) -> Self {
self.storage_location = input;
self
}
/// Consumes the builder and constructs a [`Script`](crate::model::Script)
pub fn build(self) -> crate::model::Script {
crate::model::Script {
script_id: self.script_id,
script_arn: self.script_arn,
name: self.name,
version: self.version,
size_on_disk: self.size_on_disk,
creation_time: self.creation_time,
storage_location: self.storage_location,
}
}
}
}
impl Script {
/// Creates a new builder-style object to manufacture [`Script`](crate::model::Script)
pub fn builder() -> crate::model::script::Builder {
crate::model::script::Builder::default()
}
}
/// <p>The location in Amazon S3 where build or script files are stored for access by Amazon GameLift. This
/// location is specified in <a>CreateBuild</a>, <a>CreateScript</a>,
/// and <a>UpdateScript</a> requests. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct S3Location {
/// <p>An Amazon S3 bucket identifier. This is the name of the S3 bucket.</p>
/// <note>
/// <p>GameLift currently does not support uploading from Amazon S3 buckets with names that contain a dot (.).</p>
/// </note>
pub bucket: std::option::Option<std::string::String>,
/// <p>The name of the zip file that contains the build files or script files. </p>
pub key: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) for an IAM role that
/// allows Amazon GameLift to access the S3 bucket.</p>
pub role_arn: std::option::Option<std::string::String>,
/// <p>The version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses
/// this information when retrieving files from an S3 bucket that you own. Use this
/// parameter to specify a specific version of the file. If not set, the latest version of
/// the file is retrieved. </p>
pub object_version: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for S3Location {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("S3Location");
formatter.field("bucket", &self.bucket);
formatter.field("key", &self.key);
formatter.field("role_arn", &self.role_arn);
formatter.field("object_version", &self.object_version);
formatter.finish()
}
}
/// See [`S3Location`](crate::model::S3Location)
pub mod s3_location {
/// A builder for [`S3Location`](crate::model::S3Location)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) bucket: std::option::Option<std::string::String>,
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) role_arn: std::option::Option<std::string::String>,
pub(crate) object_version: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>An Amazon S3 bucket identifier. This is the name of the S3 bucket.</p>
/// <note>
/// <p>GameLift currently does not support uploading from Amazon S3 buckets with names that contain a dot (.).</p>
/// </note>
pub fn bucket(mut self, input: impl Into<std::string::String>) -> Self {
self.bucket = Some(input.into());
self
}
pub fn set_bucket(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bucket = input;
self
}
/// <p>The name of the zip file that contains the build files or script files. </p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) for an IAM role that
/// allows Amazon GameLift to access the S3 bucket.</p>
pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.role_arn = Some(input.into());
self
}
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.role_arn = input;
self
}
/// <p>The version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses
/// this information when retrieving files from an S3 bucket that you own. Use this
/// parameter to specify a specific version of the file. If not set, the latest version of
/// the file is retrieved. </p>
pub fn object_version(mut self, input: impl Into<std::string::String>) -> Self {
self.object_version = Some(input.into());
self
}
pub fn set_object_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.object_version = input;
self
}
/// Consumes the builder and constructs a [`S3Location`](crate::model::S3Location)
pub fn build(self) -> crate::model::S3Location {
crate::model::S3Location {
bucket: self.bucket,
key: self.key,
role_arn: self.role_arn,
object_version: self.object_version,
}
}
}
}
impl S3Location {
/// Creates a new builder-style object to manufacture [`S3Location`](crate::model::S3Location)
pub fn builder() -> crate::model::s3_location::Builder {
crate::model::s3_location::Builder::default()
}
}
/// <p>A collection of server process configurations that describe the set of processes to
/// run on each instance in a fleet. Server processes run either an executable in a custom
/// game build or a Realtime Servers script. GameLift launches the configured processes, manages their
/// life cycle, and replaces them as needed. Each instance checks regularly for an updated
/// runtime configuration. </p>
/// <p>A GameLift instance is limited to 50 processes running concurrently. To calculate the
/// total number of processes in a runtime configuration, add the values of the
/// <code>ConcurrentExecutions</code> parameter for each <a>ServerProcess</a>. Learn more about <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html"> Running Multiple
/// Processes on a Fleet</a>.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeRuntimeConfiguration</a> | <a>UpdateRuntimeConfiguration</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RuntimeConfiguration {
/// <p>A collection of server process configurations that identify what server processes to
/// run on each instance in a fleet.</p>
pub server_processes: std::option::Option<std::vec::Vec<crate::model::ServerProcess>>,
/// <p>The number of game sessions in status <code>ACTIVATING</code> to allow on an instance.
/// This setting limits the instance resources that can be used for new game activations at
/// any one time.</p>
pub max_concurrent_game_session_activations: std::option::Option<i32>,
/// <p>The maximum amount of time (in seconds) allowed to launch a new game session and have
/// it report ready to host players. During this time, the game session is in status
/// <code>ACTIVATING</code>. If the game session does not become active before the
/// timeout, it is ended and the game session status is changed to
/// <code>TERMINATED</code>.</p>
pub game_session_activation_timeout_seconds: std::option::Option<i32>,
}
impl std::fmt::Debug for RuntimeConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RuntimeConfiguration");
formatter.field("server_processes", &self.server_processes);
formatter.field(
"max_concurrent_game_session_activations",
&self.max_concurrent_game_session_activations,
);
formatter.field(
"game_session_activation_timeout_seconds",
&self.game_session_activation_timeout_seconds,
);
formatter.finish()
}
}
/// See [`RuntimeConfiguration`](crate::model::RuntimeConfiguration)
pub mod runtime_configuration {
/// A builder for [`RuntimeConfiguration`](crate::model::RuntimeConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) server_processes:
std::option::Option<std::vec::Vec<crate::model::ServerProcess>>,
pub(crate) max_concurrent_game_session_activations: std::option::Option<i32>,
pub(crate) game_session_activation_timeout_seconds: std::option::Option<i32>,
}
impl Builder {
pub fn server_processes(mut self, input: impl Into<crate::model::ServerProcess>) -> Self {
let mut v = self.server_processes.unwrap_or_default();
v.push(input.into());
self.server_processes = Some(v);
self
}
pub fn set_server_processes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ServerProcess>>,
) -> Self {
self.server_processes = input;
self
}
/// <p>The number of game sessions in status <code>ACTIVATING</code> to allow on an instance.
/// This setting limits the instance resources that can be used for new game activations at
/// any one time.</p>
pub fn max_concurrent_game_session_activations(mut self, input: i32) -> Self {
self.max_concurrent_game_session_activations = Some(input);
self
}
pub fn set_max_concurrent_game_session_activations(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.max_concurrent_game_session_activations = input;
self
}
/// <p>The maximum amount of time (in seconds) allowed to launch a new game session and have
/// it report ready to host players. During this time, the game session is in status
/// <code>ACTIVATING</code>. If the game session does not become active before the
/// timeout, it is ended and the game session status is changed to
/// <code>TERMINATED</code>.</p>
pub fn game_session_activation_timeout_seconds(mut self, input: i32) -> Self {
self.game_session_activation_timeout_seconds = Some(input);
self
}
pub fn set_game_session_activation_timeout_seconds(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.game_session_activation_timeout_seconds = input;
self
}
/// Consumes the builder and constructs a [`RuntimeConfiguration`](crate::model::RuntimeConfiguration)
pub fn build(self) -> crate::model::RuntimeConfiguration {
crate::model::RuntimeConfiguration {
server_processes: self.server_processes,
max_concurrent_game_session_activations: self
.max_concurrent_game_session_activations,
game_session_activation_timeout_seconds: self
.game_session_activation_timeout_seconds,
}
}
}
}
impl RuntimeConfiguration {
/// Creates a new builder-style object to manufacture [`RuntimeConfiguration`](crate::model::RuntimeConfiguration)
pub fn builder() -> crate::model::runtime_configuration::Builder {
crate::model::runtime_configuration::Builder::default()
}
}
/// <p>A set of instructions for launching server processes on each instance in a fleet.
/// Server processes run either an executable in a custom game build or a Realtime Servers script.
/// Server process configurations are part of a fleet's <a>RuntimeConfiguration</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ServerProcess {
/// <p>The location of a game build executable or the Realtime script file that contains the
/// <code>Init()</code> function. Game builds and Realtime scripts are installed on
/// instances at the root: </p>
/// <ul>
/// <li>
/// <p>Windows (custom game builds only): <code>C:\game</code>. Example:
/// "<code>C:\game\MyGame\server.exe</code>" </p>
/// </li>
/// <li>
/// <p>Linux: <code>/local/game</code>. Examples: "<code>/local/game/MyGame/server.exe</code>" or
/// "<code>/local/game/MyRealtimeScript.js</code>"</p>
/// </li>
/// </ul>
pub launch_path: std::option::Option<std::string::String>,
/// <p>An optional list of parameters to pass to the server executable or Realtime script on
/// launch.</p>
pub parameters: std::option::Option<std::string::String>,
/// <p>The number of server processes using this configuration that run concurrently on each
/// instance.</p>
pub concurrent_executions: std::option::Option<i32>,
}
impl std::fmt::Debug for ServerProcess {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ServerProcess");
formatter.field("launch_path", &self.launch_path);
formatter.field("parameters", &self.parameters);
formatter.field("concurrent_executions", &self.concurrent_executions);
formatter.finish()
}
}
/// See [`ServerProcess`](crate::model::ServerProcess)
pub mod server_process {
/// A builder for [`ServerProcess`](crate::model::ServerProcess)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) launch_path: std::option::Option<std::string::String>,
pub(crate) parameters: std::option::Option<std::string::String>,
pub(crate) concurrent_executions: std::option::Option<i32>,
}
impl Builder {
/// <p>The location of a game build executable or the Realtime script file that contains the
/// <code>Init()</code> function. Game builds and Realtime scripts are installed on
/// instances at the root: </p>
/// <ul>
/// <li>
/// <p>Windows (custom game builds only): <code>C:\game</code>. Example:
/// "<code>C:\game\MyGame\server.exe</code>" </p>
/// </li>
/// <li>
/// <p>Linux: <code>/local/game</code>. Examples: "<code>/local/game/MyGame/server.exe</code>" or
/// "<code>/local/game/MyRealtimeScript.js</code>"</p>
/// </li>
/// </ul>
pub fn launch_path(mut self, input: impl Into<std::string::String>) -> Self {
self.launch_path = Some(input.into());
self
}
pub fn set_launch_path(mut self, input: std::option::Option<std::string::String>) -> Self {
self.launch_path = input;
self
}
/// <p>An optional list of parameters to pass to the server executable or Realtime script on
/// launch.</p>
pub fn parameters(mut self, input: impl Into<std::string::String>) -> Self {
self.parameters = Some(input.into());
self
}
pub fn set_parameters(mut self, input: std::option::Option<std::string::String>) -> Self {
self.parameters = input;
self
}
/// <p>The number of server processes using this configuration that run concurrently on each
/// instance.</p>
pub fn concurrent_executions(mut self, input: i32) -> Self {
self.concurrent_executions = Some(input);
self
}
pub fn set_concurrent_executions(mut self, input: std::option::Option<i32>) -> Self {
self.concurrent_executions = input;
self
}
/// Consumes the builder and constructs a [`ServerProcess`](crate::model::ServerProcess)
pub fn build(self) -> crate::model::ServerProcess {
crate::model::ServerProcess {
launch_path: self.launch_path,
parameters: self.parameters,
concurrent_executions: self.concurrent_executions,
}
}
}
}
impl ServerProcess {
/// Creates a new builder-style object to manufacture [`ServerProcess`](crate::model::ServerProcess)
pub fn builder() -> crate::model::server_process::Builder {
crate::model::server_process::Builder::default()
}
}
/// <p>Guidelines for use with FlexMatch to match players into games. All matchmaking
/// requests must specify a matchmaking configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct MatchmakingConfiguration {
/// <p>A unique identifier for the matchmaking configuration. This name is used to identify the configuration associated with a
/// matchmaking request or ticket.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::matchmakingconfiguration/<matchmaking configuration name></code>. In a GameLift configuration ARN, the resource ID matches the
/// <i>Name</i> value.</p>
pub configuration_arn: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with matchmaking configuration.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::gamesessionqueue/<queue name></code>. Queues can be located in any Region. Queues are used to start new
/// GameLift-hosted game sessions for matches that are created with this matchmaking
/// configuration. This property is not set when <code>FlexMatchMode</code> is set to
/// <code>STANDALONE</code>.</p>
pub game_session_queue_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The maximum duration, in seconds, that a matchmaking ticket can remain in process
/// before timing out. Requests that fail due to timing out can be resubmitted as
/// needed.</p>
pub request_timeout_seconds: std::option::Option<i32>,
/// <p>The length of time (in seconds) to wait for players to accept a proposed match, if
/// acceptance is required. If any player rejects the match or fails to accept before the
/// timeout, the ticket continues to look for an acceptable match.</p>
pub acceptance_timeout_seconds: std::option::Option<i32>,
/// <p>A flag that indicates whether a match that was created with this configuration must be
/// accepted by the matched players. To require acceptance, set to TRUE. When this option is
/// enabled, matchmaking tickets use the status <code>REQUIRES_ACCEPTANCE</code> to indicate
/// when a completed potential match is waiting for player acceptance.</p>
pub acceptance_required: std::option::Option<bool>,
/// <p>A unique identifier for the matchmaking rule set to use with this configuration. A matchmaking configuration can only use
/// rule sets that are defined in the same Region.</p>
pub rule_set_name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift matchmaking rule set resource that this configuration uses.</p>
pub rule_set_arn: std::option::Option<std::string::String>,
/// <p>An SNS topic ARN that is set up to receive matchmaking notifications.</p>
pub notification_target: std::option::Option<std::string::String>,
/// <p>The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies
/// a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used when <code>FlexMatchMode</code> is set to
/// <code>STANDALONE</code>.</p>
pub additional_player_count: std::option::Option<i32>,
/// <p>Information to attach to all events related to the matchmaking configuration. </p>
pub custom_event_data: std::option::Option<std::string::String>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session (see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession">Start a Game Session</a>). This information is added to the new <a>GameSession</a>
/// object that is created for a successful match. This parameter is not used when
/// <code>FlexMatchMode</code> is set to <code>STANDALONE</code>.</p>
pub game_properties: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
/// <p>A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session (see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession">Start a Game Session</a>). This information is added to the new <a>GameSession</a> object
/// that is created for a successful match. This parameter is not used when
/// <code>FlexMatchMode</code> is set to <code>STANDALONE</code>.</p>
pub game_session_data: std::option::Option<std::string::String>,
/// <p>The method used to backfill game sessions created with this matchmaking configuration.
/// MANUAL indicates that the game makes backfill requests or does not use the match
/// backfill feature. AUTOMATIC indicates that GameLift creates <a>StartMatchBackfill</a> requests whenever a game session has one or more open
/// slots. Learn more about manual and automatic backfill in <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html">Backfill existing games
/// with FlexMatch</a>. Automatic backfill is not available when
/// <code>FlexMatchMode</code> is set to <code>STANDALONE</code>.</p>
pub backfill_mode: std::option::Option<crate::model::BackfillMode>,
/// <p>Indicates whether this matchmaking configuration is being used with GameLift hosting or
/// as a standalone matchmaking solution. </p>
/// <ul>
/// <li>
/// <p>
/// <b>STANDALONE</b> - FlexMatch forms matches and returns
/// match information, including players and team assignments, in a
/// <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded">
/// MatchmakingSucceeded</a> event.</p>
/// </li>
/// <li>
/// <p>
/// <b>WITH_QUEUE</b> - FlexMatch forms matches and uses the specified GameLift queue to
/// start a game session for the match. </p>
/// </li>
/// </ul>
pub flex_match_mode: std::option::Option<crate::model::FlexMatchMode>,
}
impl std::fmt::Debug for MatchmakingConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("MatchmakingConfiguration");
formatter.field("name", &self.name);
formatter.field("configuration_arn", &self.configuration_arn);
formatter.field("description", &self.description);
formatter.field("game_session_queue_arns", &self.game_session_queue_arns);
formatter.field("request_timeout_seconds", &self.request_timeout_seconds);
formatter.field(
"acceptance_timeout_seconds",
&self.acceptance_timeout_seconds,
);
formatter.field("acceptance_required", &self.acceptance_required);
formatter.field("rule_set_name", &self.rule_set_name);
formatter.field("rule_set_arn", &self.rule_set_arn);
formatter.field("notification_target", &self.notification_target);
formatter.field("additional_player_count", &self.additional_player_count);
formatter.field("custom_event_data", &self.custom_event_data);
formatter.field("creation_time", &self.creation_time);
formatter.field("game_properties", &self.game_properties);
formatter.field("game_session_data", &self.game_session_data);
formatter.field("backfill_mode", &self.backfill_mode);
formatter.field("flex_match_mode", &self.flex_match_mode);
formatter.finish()
}
}
/// See [`MatchmakingConfiguration`](crate::model::MatchmakingConfiguration)
pub mod matchmaking_configuration {
/// A builder for [`MatchmakingConfiguration`](crate::model::MatchmakingConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) configuration_arn: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) game_session_queue_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) request_timeout_seconds: std::option::Option<i32>,
pub(crate) acceptance_timeout_seconds: std::option::Option<i32>,
pub(crate) acceptance_required: std::option::Option<bool>,
pub(crate) rule_set_name: std::option::Option<std::string::String>,
pub(crate) rule_set_arn: std::option::Option<std::string::String>,
pub(crate) notification_target: std::option::Option<std::string::String>,
pub(crate) additional_player_count: std::option::Option<i32>,
pub(crate) custom_event_data: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) game_properties: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
pub(crate) game_session_data: std::option::Option<std::string::String>,
pub(crate) backfill_mode: std::option::Option<crate::model::BackfillMode>,
pub(crate) flex_match_mode: std::option::Option<crate::model::FlexMatchMode>,
}
impl Builder {
/// <p>A unique identifier for the matchmaking configuration. This name is used to identify the configuration associated with a
/// matchmaking request or ticket.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::matchmakingconfiguration/<matchmaking configuration name></code>. In a GameLift configuration ARN, the resource ID matches the
/// <i>Name</i> value.</p>
pub fn configuration_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.configuration_arn = Some(input.into());
self
}
pub fn set_configuration_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.configuration_arn = input;
self
}
/// <p>A descriptive label that is associated with matchmaking configuration.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
pub fn game_session_queue_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.game_session_queue_arns.unwrap_or_default();
v.push(input.into());
self.game_session_queue_arns = Some(v);
self
}
pub fn set_game_session_queue_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.game_session_queue_arns = input;
self
}
/// <p>The maximum duration, in seconds, that a matchmaking ticket can remain in process
/// before timing out. Requests that fail due to timing out can be resubmitted as
/// needed.</p>
pub fn request_timeout_seconds(mut self, input: i32) -> Self {
self.request_timeout_seconds = Some(input);
self
}
pub fn set_request_timeout_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.request_timeout_seconds = input;
self
}
/// <p>The length of time (in seconds) to wait for players to accept a proposed match, if
/// acceptance is required. If any player rejects the match or fails to accept before the
/// timeout, the ticket continues to look for an acceptable match.</p>
pub fn acceptance_timeout_seconds(mut self, input: i32) -> Self {
self.acceptance_timeout_seconds = Some(input);
self
}
pub fn set_acceptance_timeout_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.acceptance_timeout_seconds = input;
self
}
/// <p>A flag that indicates whether a match that was created with this configuration must be
/// accepted by the matched players. To require acceptance, set to TRUE. When this option is
/// enabled, matchmaking tickets use the status <code>REQUIRES_ACCEPTANCE</code> to indicate
/// when a completed potential match is waiting for player acceptance.</p>
pub fn acceptance_required(mut self, input: bool) -> Self {
self.acceptance_required = Some(input);
self
}
pub fn set_acceptance_required(mut self, input: std::option::Option<bool>) -> Self {
self.acceptance_required = input;
self
}
/// <p>A unique identifier for the matchmaking rule set to use with this configuration. A matchmaking configuration can only use
/// rule sets that are defined in the same Region.</p>
pub fn rule_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_set_name = Some(input.into());
self
}
pub fn set_rule_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.rule_set_name = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift matchmaking rule set resource that this configuration uses.</p>
pub fn rule_set_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_set_arn = Some(input.into());
self
}
pub fn set_rule_set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.rule_set_arn = input;
self
}
/// <p>An SNS topic ARN that is set up to receive matchmaking notifications.</p>
pub fn notification_target(mut self, input: impl Into<std::string::String>) -> Self {
self.notification_target = Some(input.into());
self
}
pub fn set_notification_target(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notification_target = input;
self
}
/// <p>The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies
/// a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used when <code>FlexMatchMode</code> is set to
/// <code>STANDALONE</code>.</p>
pub fn additional_player_count(mut self, input: i32) -> Self {
self.additional_player_count = Some(input);
self
}
pub fn set_additional_player_count(mut self, input: std::option::Option<i32>) -> Self {
self.additional_player_count = input;
self
}
/// <p>Information to attach to all events related to the matchmaking configuration. </p>
pub fn custom_event_data(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_event_data = Some(input.into());
self
}
pub fn set_custom_event_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_event_data = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
pub fn game_properties(mut self, input: impl Into<crate::model::GameProperty>) -> Self {
let mut v = self.game_properties.unwrap_or_default();
v.push(input.into());
self.game_properties = Some(v);
self
}
pub fn set_game_properties(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
) -> Self {
self.game_properties = input;
self
}
/// <p>A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session (see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession">Start a Game Session</a>). This information is added to the new <a>GameSession</a> object
/// that is created for a successful match. This parameter is not used when
/// <code>FlexMatchMode</code> is set to <code>STANDALONE</code>.</p>
pub fn game_session_data(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_data = Some(input.into());
self
}
pub fn set_game_session_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_data = input;
self
}
/// <p>The method used to backfill game sessions created with this matchmaking configuration.
/// MANUAL indicates that the game makes backfill requests or does not use the match
/// backfill feature. AUTOMATIC indicates that GameLift creates <a>StartMatchBackfill</a> requests whenever a game session has one or more open
/// slots. Learn more about manual and automatic backfill in <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html">Backfill existing games
/// with FlexMatch</a>. Automatic backfill is not available when
/// <code>FlexMatchMode</code> is set to <code>STANDALONE</code>.</p>
pub fn backfill_mode(mut self, input: crate::model::BackfillMode) -> Self {
self.backfill_mode = Some(input);
self
}
pub fn set_backfill_mode(
mut self,
input: std::option::Option<crate::model::BackfillMode>,
) -> Self {
self.backfill_mode = input;
self
}
/// <p>Indicates whether this matchmaking configuration is being used with GameLift hosting or
/// as a standalone matchmaking solution. </p>
/// <ul>
/// <li>
/// <p>
/// <b>STANDALONE</b> - FlexMatch forms matches and returns
/// match information, including players and team assignments, in a
/// <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded">
/// MatchmakingSucceeded</a> event.</p>
/// </li>
/// <li>
/// <p>
/// <b>WITH_QUEUE</b> - FlexMatch forms matches and uses the specified GameLift queue to
/// start a game session for the match. </p>
/// </li>
/// </ul>
pub fn flex_match_mode(mut self, input: crate::model::FlexMatchMode) -> Self {
self.flex_match_mode = Some(input);
self
}
pub fn set_flex_match_mode(
mut self,
input: std::option::Option<crate::model::FlexMatchMode>,
) -> Self {
self.flex_match_mode = input;
self
}
/// Consumes the builder and constructs a [`MatchmakingConfiguration`](crate::model::MatchmakingConfiguration)
pub fn build(self) -> crate::model::MatchmakingConfiguration {
crate::model::MatchmakingConfiguration {
name: self.name,
configuration_arn: self.configuration_arn,
description: self.description,
game_session_queue_arns: self.game_session_queue_arns,
request_timeout_seconds: self.request_timeout_seconds,
acceptance_timeout_seconds: self.acceptance_timeout_seconds,
acceptance_required: self.acceptance_required,
rule_set_name: self.rule_set_name,
rule_set_arn: self.rule_set_arn,
notification_target: self.notification_target,
additional_player_count: self.additional_player_count,
custom_event_data: self.custom_event_data,
creation_time: self.creation_time,
game_properties: self.game_properties,
game_session_data: self.game_session_data,
backfill_mode: self.backfill_mode,
flex_match_mode: self.flex_match_mode,
}
}
}
}
impl MatchmakingConfiguration {
/// Creates a new builder-style object to manufacture [`MatchmakingConfiguration`](crate::model::MatchmakingConfiguration)
pub fn builder() -> crate::model::matchmaking_configuration::Builder {
crate::model::matchmaking_configuration::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum FlexMatchMode {
Standalone,
WithQueue,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for FlexMatchMode {
fn from(s: &str) -> Self {
match s {
"STANDALONE" => FlexMatchMode::Standalone,
"WITH_QUEUE" => FlexMatchMode::WithQueue,
other => FlexMatchMode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for FlexMatchMode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(FlexMatchMode::from(s))
}
}
impl FlexMatchMode {
pub fn as_str(&self) -> &str {
match self {
FlexMatchMode::Standalone => "STANDALONE",
FlexMatchMode::WithQueue => "WITH_QUEUE",
FlexMatchMode::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["STANDALONE", "WITH_QUEUE"]
}
}
impl AsRef<str> for FlexMatchMode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum BackfillMode {
Automatic,
Manual,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for BackfillMode {
fn from(s: &str) -> Self {
match s {
"AUTOMATIC" => BackfillMode::Automatic,
"MANUAL" => BackfillMode::Manual,
other => BackfillMode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for BackfillMode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(BackfillMode::from(s))
}
}
impl BackfillMode {
pub fn as_str(&self) -> &str {
match self {
BackfillMode::Automatic => "AUTOMATIC",
BackfillMode::Manual => "MANUAL",
BackfillMode::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["AUTOMATIC", "MANUAL"]
}
}
impl AsRef<str> for BackfillMode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Set of key-value pairs that contain information about a game session. When included in
/// a game session request, these properties communicate details to be used when setting up
/// the new game session. For example, a game property might specify a game mode, level, or
/// map. Game properties are passed to the game server process when initiating a new game
/// session. For more information, see the <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create"> GameLift Developer Guide</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameProperty {
/// <p>The game property identifier.</p>
pub key: std::option::Option<std::string::String>,
/// <p>The game property value.</p>
pub value: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GameProperty {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameProperty");
formatter.field("key", &self.key);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`GameProperty`](crate::model::GameProperty)
pub mod game_property {
/// A builder for [`GameProperty`](crate::model::GameProperty)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The game property identifier.</p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>The game property value.</p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`GameProperty`](crate::model::GameProperty)
pub fn build(self) -> crate::model::GameProperty {
crate::model::GameProperty {
key: self.key,
value: self.value,
}
}
}
}
impl GameProperty {
/// Creates a new builder-style object to manufacture [`GameProperty`](crate::model::GameProperty)
pub fn builder() -> crate::model::game_property::Builder {
crate::model::game_property::Builder::default()
}
}
/// <p>Configuration for a game session placement mechanism that processes requests for new
/// game sessions. A queue can be used on its own or as part of a matchmaking
/// solution.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateGameSessionQueue</a> | <a>DescribeGameSessionQueues</a> | <a>UpdateGameSessionQueue</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameSessionQueue {
/// <p>A descriptive label that is associated with game session queue. Queue names must be unique within each Region.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::gamesessionqueue/<queue name></code>. In a GameLift game session queue ARN, the resource ID matches the
/// <i>Name</i> value.</p>
pub game_session_queue_arn: std::option::Option<std::string::String>,
/// <p>The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a <code>TIMED_OUT</code> status.</p>
pub timeout_in_seconds: std::option::Option<i32>,
/// <p>A set of policies that act as a sliding cap on player latency. FleetIQ works to
/// deliver low latency for most players in a game session. These policies ensure that no
/// individual player can be placed into a game with unreasonably high latency. Use multiple
/// policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their
/// maximum allowed latency, starting with the lowest value. </p>
pub player_latency_policies:
std::option::Option<std::vec::Vec<crate::model::PlayerLatencyPolicy>>,
/// <p>A list of fleets and/or fleet aliases that can be used to fulfill game session placement requests in the queue.
/// Destinations are identified by either a fleet ARN or a fleet alias ARN, and are listed in order of placement preference.</p>
pub destinations: std::option::Option<std::vec::Vec<crate::model::GameSessionQueueDestination>>,
/// <p>A list of locations where a queue is allowed to place new game sessions. Locations
/// are specified in the form of AWS Region codes, such as <code>us-west-2</code>. If this parameter is
/// not set, game sessions can be placed in any queue location. </p>
pub filter_configuration: std::option::Option<crate::model::FilterConfiguration>,
/// <p>Custom settings to use when prioritizing destinations and locations for game session placements. This
/// configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly
/// named will be automatically applied at the end of the prioritization process. </p>
pub priority_configuration: std::option::Option<crate::model::PriorityConfiguration>,
/// <p>
/// Information that is added to all events that are related to this game session queue.
/// </p>
pub custom_event_data: std::option::Option<std::string::String>,
/// <p>An SNS topic ARN that is set up to receive game session placement notifications. See <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/queue-notification.html">
/// Setting up notifications for game session placement</a>.</p>
pub notification_target: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GameSessionQueue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameSessionQueue");
formatter.field("name", &self.name);
formatter.field("game_session_queue_arn", &self.game_session_queue_arn);
formatter.field("timeout_in_seconds", &self.timeout_in_seconds);
formatter.field("player_latency_policies", &self.player_latency_policies);
formatter.field("destinations", &self.destinations);
formatter.field("filter_configuration", &self.filter_configuration);
formatter.field("priority_configuration", &self.priority_configuration);
formatter.field("custom_event_data", &self.custom_event_data);
formatter.field("notification_target", &self.notification_target);
formatter.finish()
}
}
/// See [`GameSessionQueue`](crate::model::GameSessionQueue)
pub mod game_session_queue {
/// A builder for [`GameSessionQueue`](crate::model::GameSessionQueue)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) game_session_queue_arn: std::option::Option<std::string::String>,
pub(crate) timeout_in_seconds: std::option::Option<i32>,
pub(crate) player_latency_policies:
std::option::Option<std::vec::Vec<crate::model::PlayerLatencyPolicy>>,
pub(crate) destinations:
std::option::Option<std::vec::Vec<crate::model::GameSessionQueueDestination>>,
pub(crate) filter_configuration: std::option::Option<crate::model::FilterConfiguration>,
pub(crate) priority_configuration: std::option::Option<crate::model::PriorityConfiguration>,
pub(crate) custom_event_data: std::option::Option<std::string::String>,
pub(crate) notification_target: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A descriptive label that is associated with game session queue. Queue names must be unique within each Region.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::gamesessionqueue/<queue name></code>. In a GameLift game session queue ARN, the resource ID matches the
/// <i>Name</i> value.</p>
pub fn game_session_queue_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_queue_arn = Some(input.into());
self
}
pub fn set_game_session_queue_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_queue_arn = input;
self
}
/// <p>The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a <code>TIMED_OUT</code> status.</p>
pub fn timeout_in_seconds(mut self, input: i32) -> Self {
self.timeout_in_seconds = Some(input);
self
}
pub fn set_timeout_in_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.timeout_in_seconds = input;
self
}
pub fn player_latency_policies(
mut self,
input: impl Into<crate::model::PlayerLatencyPolicy>,
) -> Self {
let mut v = self.player_latency_policies.unwrap_or_default();
v.push(input.into());
self.player_latency_policies = Some(v);
self
}
pub fn set_player_latency_policies(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PlayerLatencyPolicy>>,
) -> Self {
self.player_latency_policies = input;
self
}
pub fn destinations(
mut self,
input: impl Into<crate::model::GameSessionQueueDestination>,
) -> Self {
let mut v = self.destinations.unwrap_or_default();
v.push(input.into());
self.destinations = Some(v);
self
}
pub fn set_destinations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GameSessionQueueDestination>>,
) -> Self {
self.destinations = input;
self
}
/// <p>A list of locations where a queue is allowed to place new game sessions. Locations
/// are specified in the form of AWS Region codes, such as <code>us-west-2</code>. If this parameter is
/// not set, game sessions can be placed in any queue location. </p>
pub fn filter_configuration(mut self, input: crate::model::FilterConfiguration) -> Self {
self.filter_configuration = Some(input);
self
}
pub fn set_filter_configuration(
mut self,
input: std::option::Option<crate::model::FilterConfiguration>,
) -> Self {
self.filter_configuration = input;
self
}
/// <p>Custom settings to use when prioritizing destinations and locations for game session placements. This
/// configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly
/// named will be automatically applied at the end of the prioritization process. </p>
pub fn priority_configuration(
mut self,
input: crate::model::PriorityConfiguration,
) -> Self {
self.priority_configuration = Some(input);
self
}
pub fn set_priority_configuration(
mut self,
input: std::option::Option<crate::model::PriorityConfiguration>,
) -> Self {
self.priority_configuration = input;
self
}
/// <p>
/// Information that is added to all events that are related to this game session queue.
/// </p>
pub fn custom_event_data(mut self, input: impl Into<std::string::String>) -> Self {
self.custom_event_data = Some(input.into());
self
}
pub fn set_custom_event_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.custom_event_data = input;
self
}
/// <p>An SNS topic ARN that is set up to receive game session placement notifications. See <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/queue-notification.html">
/// Setting up notifications for game session placement</a>.</p>
pub fn notification_target(mut self, input: impl Into<std::string::String>) -> Self {
self.notification_target = Some(input.into());
self
}
pub fn set_notification_target(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.notification_target = input;
self
}
/// Consumes the builder and constructs a [`GameSessionQueue`](crate::model::GameSessionQueue)
pub fn build(self) -> crate::model::GameSessionQueue {
crate::model::GameSessionQueue {
name: self.name,
game_session_queue_arn: self.game_session_queue_arn,
timeout_in_seconds: self.timeout_in_seconds,
player_latency_policies: self.player_latency_policies,
destinations: self.destinations,
filter_configuration: self.filter_configuration,
priority_configuration: self.priority_configuration,
custom_event_data: self.custom_event_data,
notification_target: self.notification_target,
}
}
}
}
impl GameSessionQueue {
/// Creates a new builder-style object to manufacture [`GameSessionQueue`](crate::model::GameSessionQueue)
pub fn builder() -> crate::model::game_session_queue::Builder {
crate::model::game_session_queue::Builder::default()
}
}
/// <p>Custom prioritization settings for use by a game session queue when placing new game
/// sessions with available game servers. When defined, this configuration replaces the
/// default FleetIQ prioritization process, which is as follows:</p>
/// <ul>
/// <li>
/// <p>If player latency data is included in a game session request, destinations and
/// locations are prioritized first based on lowest average latency (1), then on
/// lowest hosting cost (2), then on destination list order (3), and finally on
/// location (alphabetical) (4). This approach ensures that the queue's top priority
/// is to place game sessions where average player latency is lowest, and--if
/// latency is the same--where the hosting cost is less, etc.</p>
/// </li>
/// <li>
/// <p>If player latency data is not included, destinations and locations are
/// prioritized first on destination list order (1), and then on location
/// (alphabetical) (2). This approach ensures that the queue's top priority is to
/// place game sessions on the first destination fleet listed. If that fleet has
/// multiple locations, the game session is placed on the first location (when
/// listed alphabetically).</p>
/// </li>
/// </ul>
/// <p>Changing the priority order will affect how game sessions are placed.</p>
/// <p>Priority configurations are part of a <a>GameSessionQueue</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PriorityConfiguration {
/// <p>The recommended sequence to use when prioritizing where to place new game sessions.
/// Each type can only be listed once.</p>
/// <ul>
/// <li>
/// <p>
/// <code>LATENCY</code> -- FleetIQ prioritizes locations where the average player
/// latency (provided in each game session request) is lowest. </p>
/// </li>
/// <li>
/// <p>
/// <code>COST</code> -- FleetIQ prioritizes destinations with the lowest current
/// hosting costs. Cost is evaluated based on the location, instance type, and fleet
/// type (Spot or On-Demand) for each destination in the queue.</p>
/// </li>
/// <li>
/// <p>
/// <code>DESTINATION</code> -- FleetIQ prioritizes based on the order that
/// destinations are listed in the queue configuration.</p>
/// </li>
/// <li>
/// <p>
/// <code>LOCATION</code> -- FleetIQ prioritizes based on the provided order of
/// locations, as defined in <code>LocationOrder</code>. </p>
/// </li>
/// </ul>
pub priority_order: std::option::Option<std::vec::Vec<crate::model::PriorityType>>,
/// <p>The prioritization order to use for fleet locations, when the
/// <code>PriorityOrder</code> property includes <code>LOCATION</code>. Locations are
/// identified by AWS Region codes such as <code>us-west-2</code>. Each location can only be
/// listed once. </p>
pub location_order: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for PriorityConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PriorityConfiguration");
formatter.field("priority_order", &self.priority_order);
formatter.field("location_order", &self.location_order);
formatter.finish()
}
}
/// See [`PriorityConfiguration`](crate::model::PriorityConfiguration)
pub mod priority_configuration {
/// A builder for [`PriorityConfiguration`](crate::model::PriorityConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) priority_order: std::option::Option<std::vec::Vec<crate::model::PriorityType>>,
pub(crate) location_order: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn priority_order(mut self, input: impl Into<crate::model::PriorityType>) -> Self {
let mut v = self.priority_order.unwrap_or_default();
v.push(input.into());
self.priority_order = Some(v);
self
}
pub fn set_priority_order(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PriorityType>>,
) -> Self {
self.priority_order = input;
self
}
pub fn location_order(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.location_order.unwrap_or_default();
v.push(input.into());
self.location_order = Some(v);
self
}
pub fn set_location_order(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.location_order = input;
self
}
/// Consumes the builder and constructs a [`PriorityConfiguration`](crate::model::PriorityConfiguration)
pub fn build(self) -> crate::model::PriorityConfiguration {
crate::model::PriorityConfiguration {
priority_order: self.priority_order,
location_order: self.location_order,
}
}
}
}
impl PriorityConfiguration {
/// Creates a new builder-style object to manufacture [`PriorityConfiguration`](crate::model::PriorityConfiguration)
pub fn builder() -> crate::model::priority_configuration::Builder {
crate::model::priority_configuration::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum PriorityType {
Cost,
Destination,
Latency,
Location,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for PriorityType {
fn from(s: &str) -> Self {
match s {
"COST" => PriorityType::Cost,
"DESTINATION" => PriorityType::Destination,
"LATENCY" => PriorityType::Latency,
"LOCATION" => PriorityType::Location,
other => PriorityType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for PriorityType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(PriorityType::from(s))
}
}
impl PriorityType {
pub fn as_str(&self) -> &str {
match self {
PriorityType::Cost => "COST",
PriorityType::Destination => "DESTINATION",
PriorityType::Latency => "LATENCY",
PriorityType::Location => "LOCATION",
PriorityType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["COST", "DESTINATION", "LATENCY", "LOCATION"]
}
}
impl AsRef<str> for PriorityType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A list of fleet locations where a game session queue can place new game sessions. You
/// can use a filter to temporarily turn off placements for specific locations. For queues
/// that have multi-location fleets, you can use a filter configuration allow placement with
/// some, but not all of these locations.</p>
/// <p>Filter configurations are part of a <a>GameSessionQueue</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FilterConfiguration {
/// <p> A list of locations to allow game session placement in, in the form of AWS Region
/// codes such as <code>us-west-2</code>. </p>
pub allowed_locations: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for FilterConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FilterConfiguration");
formatter.field("allowed_locations", &self.allowed_locations);
formatter.finish()
}
}
/// See [`FilterConfiguration`](crate::model::FilterConfiguration)
pub mod filter_configuration {
/// A builder for [`FilterConfiguration`](crate::model::FilterConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) allowed_locations: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn allowed_locations(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.allowed_locations.unwrap_or_default();
v.push(input.into());
self.allowed_locations = Some(v);
self
}
pub fn set_allowed_locations(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.allowed_locations = input;
self
}
/// Consumes the builder and constructs a [`FilterConfiguration`](crate::model::FilterConfiguration)
pub fn build(self) -> crate::model::FilterConfiguration {
crate::model::FilterConfiguration {
allowed_locations: self.allowed_locations,
}
}
}
}
impl FilterConfiguration {
/// Creates a new builder-style object to manufacture [`FilterConfiguration`](crate::model::FilterConfiguration)
pub fn builder() -> crate::model::filter_configuration::Builder {
crate::model::filter_configuration::Builder::default()
}
}
/// <p>A fleet or alias designated in a game session queue. Queues fulfill requests for new
/// game sessions by placing a new game session on any of the queue's destinations. </p>
/// <p>Destinations are part of a <a>GameSessionQueue</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameSessionQueueDestination {
/// <p>The Amazon Resource Name (ARN) that is assigned to fleet or fleet alias. ARNs, which
/// include a fleet ID or alias ID and a Region name, provide a unique identifier across all
/// Regions. </p>
pub destination_arn: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GameSessionQueueDestination {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameSessionQueueDestination");
formatter.field("destination_arn", &self.destination_arn);
formatter.finish()
}
}
/// See [`GameSessionQueueDestination`](crate::model::GameSessionQueueDestination)
pub mod game_session_queue_destination {
/// A builder for [`GameSessionQueueDestination`](crate::model::GameSessionQueueDestination)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) destination_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The Amazon Resource Name (ARN) that is assigned to fleet or fleet alias. ARNs, which
/// include a fleet ID or alias ID and a Region name, provide a unique identifier across all
/// Regions. </p>
pub fn destination_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.destination_arn = Some(input.into());
self
}
pub fn set_destination_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.destination_arn = input;
self
}
/// Consumes the builder and constructs a [`GameSessionQueueDestination`](crate::model::GameSessionQueueDestination)
pub fn build(self) -> crate::model::GameSessionQueueDestination {
crate::model::GameSessionQueueDestination {
destination_arn: self.destination_arn,
}
}
}
}
impl GameSessionQueueDestination {
/// Creates a new builder-style object to manufacture [`GameSessionQueueDestination`](crate::model::GameSessionQueueDestination)
pub fn builder() -> crate::model::game_session_queue_destination::Builder {
crate::model::game_session_queue_destination::Builder::default()
}
}
/// <p>Sets a latency cap for individual players when placing a game session. With a latency
/// policy in force, a game session cannot be placed in a fleet location where a player
/// reports latency higher than the cap. Latency policies are used only with placement
/// request that provide player latency information. Player latency policies can be stacked
/// to gradually relax latency requirements over time. </p>
/// <p>Latency policies are part of a <a>GameSessionQueue</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PlayerLatencyPolicy {
/// <p>The maximum latency value that is allowed for any player, in milliseconds. All
/// policies must have a value set for this property.</p>
pub maximum_individual_player_latency_milliseconds: std::option::Option<i32>,
/// <p>The length of time, in seconds, that the policy is enforced while placing a new
/// game session. A null value for this property means that the policy is enforced until the
/// queue times out.</p>
pub policy_duration_seconds: std::option::Option<i32>,
}
impl std::fmt::Debug for PlayerLatencyPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PlayerLatencyPolicy");
formatter.field(
"maximum_individual_player_latency_milliseconds",
&self.maximum_individual_player_latency_milliseconds,
);
formatter.field("policy_duration_seconds", &self.policy_duration_seconds);
formatter.finish()
}
}
/// See [`PlayerLatencyPolicy`](crate::model::PlayerLatencyPolicy)
pub mod player_latency_policy {
/// A builder for [`PlayerLatencyPolicy`](crate::model::PlayerLatencyPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) maximum_individual_player_latency_milliseconds: std::option::Option<i32>,
pub(crate) policy_duration_seconds: std::option::Option<i32>,
}
impl Builder {
/// <p>The maximum latency value that is allowed for any player, in milliseconds. All
/// policies must have a value set for this property.</p>
pub fn maximum_individual_player_latency_milliseconds(mut self, input: i32) -> Self {
self.maximum_individual_player_latency_milliseconds = Some(input);
self
}
pub fn set_maximum_individual_player_latency_milliseconds(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.maximum_individual_player_latency_milliseconds = input;
self
}
/// <p>The length of time, in seconds, that the policy is enforced while placing a new
/// game session. A null value for this property means that the policy is enforced until the
/// queue times out.</p>
pub fn policy_duration_seconds(mut self, input: i32) -> Self {
self.policy_duration_seconds = Some(input);
self
}
pub fn set_policy_duration_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.policy_duration_seconds = input;
self
}
/// Consumes the builder and constructs a [`PlayerLatencyPolicy`](crate::model::PlayerLatencyPolicy)
pub fn build(self) -> crate::model::PlayerLatencyPolicy {
crate::model::PlayerLatencyPolicy {
maximum_individual_player_latency_milliseconds: self
.maximum_individual_player_latency_milliseconds,
policy_duration_seconds: self.policy_duration_seconds,
}
}
}
}
impl PlayerLatencyPolicy {
/// Creates a new builder-style object to manufacture [`PlayerLatencyPolicy`](crate::model::PlayerLatencyPolicy)
pub fn builder() -> crate::model::player_latency_policy::Builder {
crate::model::player_latency_policy::Builder::default()
}
}
/// <p>Properties describing a game session.</p>
/// <p>A game session in ACTIVE status can host players. When a game session ends, its
/// status is set to <code>TERMINATED</code>. </p>
/// <p>Once the session ends, the game session object is retained for 30 days. This means
/// you can reuse idempotency token values after this time. Game session logs are retained
/// for 14 days.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateGameSession</a> |
/// <a>DescribeGameSessions</a> |
/// <a>DescribeGameSessionDetails</a> |
/// <a>SearchGameSessions</a> |
/// <a>UpdateGameSession</a> |
/// <a>GetGameSessionLogUrl</a> |
/// <a>StartGameSessionPlacement</a> |
/// <a>DescribeGameSessionPlacement</a> |
/// <a>StopGameSessionPlacement</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameSession {
/// <p>A unique identifier for the game session. A game session ARN has the following format:
/// <code>arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token></code>.</p>
pub game_session_id: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with a game session. Session names do not need to be unique.</p>
pub name: std::option::Option<std::string::String>,
/// <p>A unique identifier for the fleet that the game session is running on.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>
/// The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift fleet that this game session is running on.
/// </p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub termination_time: std::option::Option<smithy_types::Instant>,
/// <p>Number of players currently in the game session.</p>
pub current_player_session_count: std::option::Option<i32>,
/// <p>The maximum number of players that can be connected simultaneously to the game session.</p>
pub maximum_player_session_count: std::option::Option<i32>,
/// <p>Current status of the game session. A game session must have an <code>ACTIVE</code>
/// status to have player sessions.</p>
pub status: std::option::Option<crate::model::GameSessionStatus>,
/// <p>Provides additional information about game session status. <code>INTERRUPTED</code>
/// indicates that the game session was hosted on a spot instance that was reclaimed,
/// causing the active game session to be terminated.</p>
pub status_reason: std::option::Option<crate::model::GameSessionStatusReason>,
/// <p>A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session. You can search for active game sessions based on this custom data
/// with <a>SearchGameSessions</a>.</p>
pub game_properties: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub ip_address: std::option::Option<std::string::String>,
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub dns_name: std::option::Option<std::string::String>,
/// <p>The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub port: std::option::Option<i32>,
/// <p>Indicates whether or not the game session is accepting new players.</p>
pub player_session_creation_policy:
std::option::Option<crate::model::PlayerSessionCreationPolicy>,
/// <p>A unique identifier for a player. This ID is used to enforce a resource protection policy (if one
/// exists), that limits the number of game sessions a player can create.</p>
pub creator_id: std::option::Option<std::string::String>,
/// <p>A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session.</p>
pub game_session_data: std::option::Option<std::string::String>,
/// <p>Information about the matchmaking process that was used to create the game session.
/// It is in JSON syntax, formatted as a string. In addition the matchmaking configuration
/// used, it contains data on all players assigned to the match, including player attributes
/// and team assignments. For more details on matchmaker data, see <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data">Match
/// Data</a>. Matchmaker data is useful when requesting match backfills, and is
/// updated whenever new players are added during a successful backfill (see <a>StartMatchBackfill</a>). </p>
pub matchmaker_data: std::option::Option<std::string::String>,
/// <p>The fleet location where the game session is running. This value might specify the
/// fleet's home Region or a remote location. Location is expressed as an AWS Region code
/// such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GameSession {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameSession");
formatter.field("game_session_id", &self.game_session_id);
formatter.field("name", &self.name);
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("creation_time", &self.creation_time);
formatter.field("termination_time", &self.termination_time);
formatter.field(
"current_player_session_count",
&self.current_player_session_count,
);
formatter.field(
"maximum_player_session_count",
&self.maximum_player_session_count,
);
formatter.field("status", &self.status);
formatter.field("status_reason", &self.status_reason);
formatter.field("game_properties", &self.game_properties);
formatter.field("ip_address", &self.ip_address);
formatter.field("dns_name", &self.dns_name);
formatter.field("port", &self.port);
formatter.field(
"player_session_creation_policy",
&self.player_session_creation_policy,
);
formatter.field("creator_id", &self.creator_id);
formatter.field("game_session_data", &self.game_session_data);
formatter.field("matchmaker_data", &self.matchmaker_data);
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`GameSession`](crate::model::GameSession)
pub mod game_session {
/// A builder for [`GameSession`](crate::model::GameSession)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_session_id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) termination_time: std::option::Option<smithy_types::Instant>,
pub(crate) current_player_session_count: std::option::Option<i32>,
pub(crate) maximum_player_session_count: std::option::Option<i32>,
pub(crate) status: std::option::Option<crate::model::GameSessionStatus>,
pub(crate) status_reason: std::option::Option<crate::model::GameSessionStatusReason>,
pub(crate) game_properties: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
pub(crate) ip_address: std::option::Option<std::string::String>,
pub(crate) dns_name: std::option::Option<std::string::String>,
pub(crate) port: std::option::Option<i32>,
pub(crate) player_session_creation_policy:
std::option::Option<crate::model::PlayerSessionCreationPolicy>,
pub(crate) creator_id: std::option::Option<std::string::String>,
pub(crate) game_session_data: std::option::Option<std::string::String>,
pub(crate) matchmaker_data: std::option::Option<std::string::String>,
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for the game session. A game session ARN has the following format:
/// <code>arn:aws:gamelift:<region>::gamesession/<fleet ID>/<custom ID string or idempotency token></code>.</p>
pub fn game_session_id(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_id = Some(input.into());
self
}
pub fn set_game_session_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_id = input;
self
}
/// <p>A descriptive label that is associated with a game session. Session names do not need to be unique.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>A unique identifier for the fleet that the game session is running on.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>
/// The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift fleet that this game session is running on.
/// </p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn termination_time(mut self, input: smithy_types::Instant) -> Self {
self.termination_time = Some(input);
self
}
pub fn set_termination_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.termination_time = input;
self
}
/// <p>Number of players currently in the game session.</p>
pub fn current_player_session_count(mut self, input: i32) -> Self {
self.current_player_session_count = Some(input);
self
}
pub fn set_current_player_session_count(mut self, input: std::option::Option<i32>) -> Self {
self.current_player_session_count = input;
self
}
/// <p>The maximum number of players that can be connected simultaneously to the game session.</p>
pub fn maximum_player_session_count(mut self, input: i32) -> Self {
self.maximum_player_session_count = Some(input);
self
}
pub fn set_maximum_player_session_count(mut self, input: std::option::Option<i32>) -> Self {
self.maximum_player_session_count = input;
self
}
/// <p>Current status of the game session. A game session must have an <code>ACTIVE</code>
/// status to have player sessions.</p>
pub fn status(mut self, input: crate::model::GameSessionStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::GameSessionStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Provides additional information about game session status. <code>INTERRUPTED</code>
/// indicates that the game session was hosted on a spot instance that was reclaimed,
/// causing the active game session to be terminated.</p>
pub fn status_reason(mut self, input: crate::model::GameSessionStatusReason) -> Self {
self.status_reason = Some(input);
self
}
pub fn set_status_reason(
mut self,
input: std::option::Option<crate::model::GameSessionStatusReason>,
) -> Self {
self.status_reason = input;
self
}
pub fn game_properties(mut self, input: impl Into<crate::model::GameProperty>) -> Self {
let mut v = self.game_properties.unwrap_or_default();
v.push(input.into());
self.game_properties = Some(v);
self
}
pub fn set_game_properties(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
) -> Self {
self.game_properties = input;
self
}
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_address = Some(input.into());
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_address = input;
self
}
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dns_name = Some(input.into());
self
}
pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.dns_name = input;
self
}
/// <p>The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
/// <p>Indicates whether or not the game session is accepting new players.</p>
pub fn player_session_creation_policy(
mut self,
input: crate::model::PlayerSessionCreationPolicy,
) -> Self {
self.player_session_creation_policy = Some(input);
self
}
pub fn set_player_session_creation_policy(
mut self,
input: std::option::Option<crate::model::PlayerSessionCreationPolicy>,
) -> Self {
self.player_session_creation_policy = input;
self
}
/// <p>A unique identifier for a player. This ID is used to enforce a resource protection policy (if one
/// exists), that limits the number of game sessions a player can create.</p>
pub fn creator_id(mut self, input: impl Into<std::string::String>) -> Self {
self.creator_id = Some(input.into());
self
}
pub fn set_creator_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.creator_id = input;
self
}
/// <p>A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session.</p>
pub fn game_session_data(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_data = Some(input.into());
self
}
pub fn set_game_session_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_data = input;
self
}
/// <p>Information about the matchmaking process that was used to create the game session.
/// It is in JSON syntax, formatted as a string. In addition the matchmaking configuration
/// used, it contains data on all players assigned to the match, including player attributes
/// and team assignments. For more details on matchmaker data, see <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data">Match
/// Data</a>. Matchmaker data is useful when requesting match backfills, and is
/// updated whenever new players are added during a successful backfill (see <a>StartMatchBackfill</a>). </p>
pub fn matchmaker_data(mut self, input: impl Into<std::string::String>) -> Self {
self.matchmaker_data = Some(input.into());
self
}
pub fn set_matchmaker_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.matchmaker_data = input;
self
}
/// <p>The fleet location where the game session is running. This value might specify the
/// fleet's home Region or a remote location. Location is expressed as an AWS Region code
/// such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`GameSession`](crate::model::GameSession)
pub fn build(self) -> crate::model::GameSession {
crate::model::GameSession {
game_session_id: self.game_session_id,
name: self.name,
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
creation_time: self.creation_time,
termination_time: self.termination_time,
current_player_session_count: self.current_player_session_count,
maximum_player_session_count: self.maximum_player_session_count,
status: self.status,
status_reason: self.status_reason,
game_properties: self.game_properties,
ip_address: self.ip_address,
dns_name: self.dns_name,
port: self.port,
player_session_creation_policy: self.player_session_creation_policy,
creator_id: self.creator_id,
game_session_data: self.game_session_data,
matchmaker_data: self.matchmaker_data,
location: self.location,
}
}
}
}
impl GameSession {
/// Creates a new builder-style object to manufacture [`GameSession`](crate::model::GameSession)
pub fn builder() -> crate::model::game_session::Builder {
crate::model::game_session::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum PlayerSessionCreationPolicy {
AcceptAll,
DenyAll,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for PlayerSessionCreationPolicy {
fn from(s: &str) -> Self {
match s {
"ACCEPT_ALL" => PlayerSessionCreationPolicy::AcceptAll,
"DENY_ALL" => PlayerSessionCreationPolicy::DenyAll,
other => PlayerSessionCreationPolicy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for PlayerSessionCreationPolicy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(PlayerSessionCreationPolicy::from(s))
}
}
impl PlayerSessionCreationPolicy {
pub fn as_str(&self) -> &str {
match self {
PlayerSessionCreationPolicy::AcceptAll => "ACCEPT_ALL",
PlayerSessionCreationPolicy::DenyAll => "DENY_ALL",
PlayerSessionCreationPolicy::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ACCEPT_ALL", "DENY_ALL"]
}
}
impl AsRef<str> for PlayerSessionCreationPolicy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameSessionStatusReason {
Interrupted,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameSessionStatusReason {
fn from(s: &str) -> Self {
match s {
"INTERRUPTED" => GameSessionStatusReason::Interrupted,
other => GameSessionStatusReason::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameSessionStatusReason {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameSessionStatusReason::from(s))
}
}
impl GameSessionStatusReason {
pub fn as_str(&self) -> &str {
match self {
GameSessionStatusReason::Interrupted => "INTERRUPTED",
GameSessionStatusReason::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["INTERRUPTED"]
}
}
impl AsRef<str> for GameSessionStatusReason {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameSessionStatus {
Activating,
Active,
Error,
Terminated,
Terminating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameSessionStatus {
fn from(s: &str) -> Self {
match s {
"ACTIVATING" => GameSessionStatus::Activating,
"ACTIVE" => GameSessionStatus::Active,
"ERROR" => GameSessionStatus::Error,
"TERMINATED" => GameSessionStatus::Terminated,
"TERMINATING" => GameSessionStatus::Terminating,
other => GameSessionStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameSessionStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameSessionStatus::from(s))
}
}
impl GameSessionStatus {
pub fn as_str(&self) -> &str {
match self {
GameSessionStatus::Activating => "ACTIVATING",
GameSessionStatus::Active => "ACTIVE",
GameSessionStatus::Error => "ERROR",
GameSessionStatus::Terminated => "TERMINATED",
GameSessionStatus::Terminating => "TERMINATING",
GameSessionStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ACTIVATING", "ACTIVE", "ERROR", "TERMINATED", "TERMINATING"]
}
}
impl AsRef<str> for GameSessionStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ProtectionPolicy {
FullProtection,
NoProtection,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ProtectionPolicy {
fn from(s: &str) -> Self {
match s {
"FullProtection" => ProtectionPolicy::FullProtection,
"NoProtection" => ProtectionPolicy::NoProtection,
other => ProtectionPolicy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ProtectionPolicy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ProtectionPolicy::from(s))
}
}
impl ProtectionPolicy {
pub fn as_str(&self) -> &str {
match self {
ProtectionPolicy::FullProtection => "FullProtection",
ProtectionPolicy::NoProtection => "NoProtection",
ProtectionPolicy::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["FullProtection", "NoProtection"]
}
}
impl AsRef<str> for ProtectionPolicy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p>Properties that describe a game server group resource. A game server group manages
/// certain properties related to a corresponding EC2 Auto Scaling group. </p>
/// <p>A game server group is created by a successful call to
/// <code>CreateGameServerGroup</code> and deleted by calling
/// <code>DeleteGameServerGroup</code>. Game server group activity can be temporarily
/// suspended and resumed by calling <code>SuspendGameServerGroup</code> and
/// <code>ResumeGameServerGroup</code>, respectively. </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateGameServerGroup</a> |
/// <a>ListGameServerGroups</a> |
/// <a>DescribeGameServerGroup</a> |
/// <a>UpdateGameServerGroup</a> |
/// <a>DeleteGameServerGroup</a> |
/// <a>ResumeGameServerGroup</a> |
/// <a>SuspendGameServerGroup</a> |
/// <a>DescribeGameServerInstances</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameServerGroup {
/// <p>A developer-defined identifier for the game server group. The name is unique for each
/// Region in each AWS account.</p>
pub game_server_group_name: std::option::Option<std::string::String>,
/// <p>A generated unique ID for the game server group.</p>
pub game_server_group_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) for an IAM role that
/// allows Amazon GameLift to access your EC2 Auto Scaling groups.</p>
pub role_arn: std::option::Option<std::string::String>,
/// <p>The set of EC2 instance types that GameLift FleetIQ can use when balancing and automatically
/// scaling instances in the corresponding Auto Scaling group. </p>
pub instance_definitions: std::option::Option<std::vec::Vec<crate::model::InstanceDefinition>>,
/// <p>Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances in the
/// game server group. Method options include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>SPOT_ONLY</code> - Only Spot Instances are used in the game server group. If Spot
/// Instances are unavailable or not viable for game hosting, the game server group
/// provides no hosting capacity until Spot Instances can again be used. Until then,
/// no new instances are started, and the existing nonviable Spot Instances are
/// terminated (after current gameplay ends) and are not replaced.</p>
/// </li>
/// <li>
/// <p>
/// <code>SPOT_PREFERRED</code> - (default value) Spot Instances are used whenever available in
/// the game server group. If Spot Instances are unavailable, the game server group
/// continues to provide hosting capacity by falling back to On-Demand Instances.
/// Existing nonviable Spot Instances are terminated (after current gameplay ends)
/// and are replaced with new On-Demand Instances.</p>
/// </li>
/// <li>
/// <p>
/// <code>ON_DEMAND_ONLY</code> - Only On-Demand Instances are used in the game
/// server group. No Spot Instances are used, even when available, while this
/// balancing strategy is in force.</p>
/// </li>
/// </ul>
pub balancing_strategy: std::option::Option<crate::model::BalancingStrategy>,
/// <p>A flag that indicates whether instances in the game server group are protected
/// from early termination. Unprotected instances that have active game servers running might
/// be terminated during a scale-down event, causing players to be dropped from the game.
/// Protected instances cannot be terminated while there are active game servers running except
/// in the event of a forced game server group deletion (see ). An exception to this is with Spot
/// Instances, which can be terminated by AWS regardless of protection status. </p>
pub game_server_protection_policy:
std::option::Option<crate::model::GameServerProtectionPolicy>,
/// <p>A generated unique ID for the EC2 Auto Scaling group that is associated with this
/// game server group.</p>
pub auto_scaling_group_arn: std::option::Option<std::string::String>,
/// <p>The current status of the game server group. Possible statuses include:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NEW</code> - GameLift FleetIQ has validated the <code>CreateGameServerGroup()</code>
/// request. </p>
/// </li>
/// <li>
/// <p>
/// <code>ACTIVATING</code> - GameLift FleetIQ is setting up a game server group, which
/// includes creating an Auto Scaling group in your AWS account. </p>
/// </li>
/// <li>
/// <p>
/// <code>ACTIVE</code> - The game server group has been successfully created. </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETE_SCHEDULED</code> - A request to delete the game server group has
/// been received. </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETING</code> - GameLift FleetIQ has received a valid
/// <code>DeleteGameServerGroup()</code> request and is processing it. GameLift FleetIQ
/// must first complete and release hosts before it deletes the Auto Scaling group
/// and the game server group. </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETED</code> - The game server group has been successfully deleted. </p>
/// </li>
/// <li>
/// <p>
/// <code>ERROR</code> - The asynchronous processes of activating or deleting a game server group
/// has failed, resulting in an error state.</p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::GameServerGroupStatus>,
/// <p>Additional information about the current game server group status. This information
/// might provide additional insight on groups that are in <code>ERROR</code> status.</p>
pub status_reason: std::option::Option<std::string::String>,
/// <p>A list of activities that are currently suspended for this game server group.
/// If this property is empty, all activities are occurring.</p>
pub suspended_actions: std::option::Option<std::vec::Vec<crate::model::GameServerGroupAction>>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>A timestamp that indicates when this game server group was last updated.</p>
pub last_updated_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for GameServerGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameServerGroup");
formatter.field("game_server_group_name", &self.game_server_group_name);
formatter.field("game_server_group_arn", &self.game_server_group_arn);
formatter.field("role_arn", &self.role_arn);
formatter.field("instance_definitions", &self.instance_definitions);
formatter.field("balancing_strategy", &self.balancing_strategy);
formatter.field(
"game_server_protection_policy",
&self.game_server_protection_policy,
);
formatter.field("auto_scaling_group_arn", &self.auto_scaling_group_arn);
formatter.field("status", &self.status);
formatter.field("status_reason", &self.status_reason);
formatter.field("suspended_actions", &self.suspended_actions);
formatter.field("creation_time", &self.creation_time);
formatter.field("last_updated_time", &self.last_updated_time);
formatter.finish()
}
}
/// See [`GameServerGroup`](crate::model::GameServerGroup)
pub mod game_server_group {
/// A builder for [`GameServerGroup`](crate::model::GameServerGroup)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_server_group_name: std::option::Option<std::string::String>,
pub(crate) game_server_group_arn: std::option::Option<std::string::String>,
pub(crate) role_arn: std::option::Option<std::string::String>,
pub(crate) instance_definitions:
std::option::Option<std::vec::Vec<crate::model::InstanceDefinition>>,
pub(crate) balancing_strategy: std::option::Option<crate::model::BalancingStrategy>,
pub(crate) game_server_protection_policy:
std::option::Option<crate::model::GameServerProtectionPolicy>,
pub(crate) auto_scaling_group_arn: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::GameServerGroupStatus>,
pub(crate) status_reason: std::option::Option<std::string::String>,
pub(crate) suspended_actions:
std::option::Option<std::vec::Vec<crate::model::GameServerGroupAction>>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_updated_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A developer-defined identifier for the game server group. The name is unique for each
/// Region in each AWS account.</p>
pub fn game_server_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_group_name = Some(input.into());
self
}
pub fn set_game_server_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_group_name = input;
self
}
/// <p>A generated unique ID for the game server group.</p>
pub fn game_server_group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_group_arn = Some(input.into());
self
}
pub fn set_game_server_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_group_arn = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) for an IAM role that
/// allows Amazon GameLift to access your EC2 Auto Scaling groups.</p>
pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.role_arn = Some(input.into());
self
}
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.role_arn = input;
self
}
pub fn instance_definitions(
mut self,
input: impl Into<crate::model::InstanceDefinition>,
) -> Self {
let mut v = self.instance_definitions.unwrap_or_default();
v.push(input.into());
self.instance_definitions = Some(v);
self
}
pub fn set_instance_definitions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::InstanceDefinition>>,
) -> Self {
self.instance_definitions = input;
self
}
/// <p>Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances in the
/// game server group. Method options include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>SPOT_ONLY</code> - Only Spot Instances are used in the game server group. If Spot
/// Instances are unavailable or not viable for game hosting, the game server group
/// provides no hosting capacity until Spot Instances can again be used. Until then,
/// no new instances are started, and the existing nonviable Spot Instances are
/// terminated (after current gameplay ends) and are not replaced.</p>
/// </li>
/// <li>
/// <p>
/// <code>SPOT_PREFERRED</code> - (default value) Spot Instances are used whenever available in
/// the game server group. If Spot Instances are unavailable, the game server group
/// continues to provide hosting capacity by falling back to On-Demand Instances.
/// Existing nonviable Spot Instances are terminated (after current gameplay ends)
/// and are replaced with new On-Demand Instances.</p>
/// </li>
/// <li>
/// <p>
/// <code>ON_DEMAND_ONLY</code> - Only On-Demand Instances are used in the game
/// server group. No Spot Instances are used, even when available, while this
/// balancing strategy is in force.</p>
/// </li>
/// </ul>
pub fn balancing_strategy(mut self, input: crate::model::BalancingStrategy) -> Self {
self.balancing_strategy = Some(input);
self
}
pub fn set_balancing_strategy(
mut self,
input: std::option::Option<crate::model::BalancingStrategy>,
) -> Self {
self.balancing_strategy = input;
self
}
/// <p>A flag that indicates whether instances in the game server group are protected
/// from early termination. Unprotected instances that have active game servers running might
/// be terminated during a scale-down event, causing players to be dropped from the game.
/// Protected instances cannot be terminated while there are active game servers running except
/// in the event of a forced game server group deletion (see ). An exception to this is with Spot
/// Instances, which can be terminated by AWS regardless of protection status. </p>
pub fn game_server_protection_policy(
mut self,
input: crate::model::GameServerProtectionPolicy,
) -> Self {
self.game_server_protection_policy = Some(input);
self
}
pub fn set_game_server_protection_policy(
mut self,
input: std::option::Option<crate::model::GameServerProtectionPolicy>,
) -> Self {
self.game_server_protection_policy = input;
self
}
/// <p>A generated unique ID for the EC2 Auto Scaling group that is associated with this
/// game server group.</p>
pub fn auto_scaling_group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.auto_scaling_group_arn = Some(input.into());
self
}
pub fn set_auto_scaling_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.auto_scaling_group_arn = input;
self
}
/// <p>The current status of the game server group. Possible statuses include:</p>
/// <ul>
/// <li>
/// <p>
/// <code>NEW</code> - GameLift FleetIQ has validated the <code>CreateGameServerGroup()</code>
/// request. </p>
/// </li>
/// <li>
/// <p>
/// <code>ACTIVATING</code> - GameLift FleetIQ is setting up a game server group, which
/// includes creating an Auto Scaling group in your AWS account. </p>
/// </li>
/// <li>
/// <p>
/// <code>ACTIVE</code> - The game server group has been successfully created. </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETE_SCHEDULED</code> - A request to delete the game server group has
/// been received. </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETING</code> - GameLift FleetIQ has received a valid
/// <code>DeleteGameServerGroup()</code> request and is processing it. GameLift FleetIQ
/// must first complete and release hosts before it deletes the Auto Scaling group
/// and the game server group. </p>
/// </li>
/// <li>
/// <p>
/// <code>DELETED</code> - The game server group has been successfully deleted. </p>
/// </li>
/// <li>
/// <p>
/// <code>ERROR</code> - The asynchronous processes of activating or deleting a game server group
/// has failed, resulting in an error state.</p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::GameServerGroupStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::GameServerGroupStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Additional information about the current game server group status. This information
/// might provide additional insight on groups that are in <code>ERROR</code> status.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
pub fn suspended_actions(
mut self,
input: impl Into<crate::model::GameServerGroupAction>,
) -> Self {
let mut v = self.suspended_actions.unwrap_or_default();
v.push(input.into());
self.suspended_actions = Some(v);
self
}
pub fn set_suspended_actions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GameServerGroupAction>>,
) -> Self {
self.suspended_actions = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>A timestamp that indicates when this game server group was last updated.</p>
pub fn last_updated_time(mut self, input: smithy_types::Instant) -> Self {
self.last_updated_time = Some(input);
self
}
pub fn set_last_updated_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_updated_time = input;
self
}
/// Consumes the builder and constructs a [`GameServerGroup`](crate::model::GameServerGroup)
pub fn build(self) -> crate::model::GameServerGroup {
crate::model::GameServerGroup {
game_server_group_name: self.game_server_group_name,
game_server_group_arn: self.game_server_group_arn,
role_arn: self.role_arn,
instance_definitions: self.instance_definitions,
balancing_strategy: self.balancing_strategy,
game_server_protection_policy: self.game_server_protection_policy,
auto_scaling_group_arn: self.auto_scaling_group_arn,
status: self.status,
status_reason: self.status_reason,
suspended_actions: self.suspended_actions,
creation_time: self.creation_time,
last_updated_time: self.last_updated_time,
}
}
}
}
impl GameServerGroup {
/// Creates a new builder-style object to manufacture [`GameServerGroup`](crate::model::GameServerGroup)
pub fn builder() -> crate::model::game_server_group::Builder {
crate::model::game_server_group::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerGroupAction {
ReplaceInstanceTypes,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerGroupAction {
fn from(s: &str) -> Self {
match s {
"REPLACE_INSTANCE_TYPES" => GameServerGroupAction::ReplaceInstanceTypes,
other => GameServerGroupAction::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerGroupAction {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerGroupAction::from(s))
}
}
impl GameServerGroupAction {
pub fn as_str(&self) -> &str {
match self {
GameServerGroupAction::ReplaceInstanceTypes => "REPLACE_INSTANCE_TYPES",
GameServerGroupAction::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["REPLACE_INSTANCE_TYPES"]
}
}
impl AsRef<str> for GameServerGroupAction {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerGroupStatus {
Activating,
Active,
Deleted,
DeleteScheduled,
Deleting,
Error,
New,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerGroupStatus {
fn from(s: &str) -> Self {
match s {
"ACTIVATING" => GameServerGroupStatus::Activating,
"ACTIVE" => GameServerGroupStatus::Active,
"DELETED" => GameServerGroupStatus::Deleted,
"DELETE_SCHEDULED" => GameServerGroupStatus::DeleteScheduled,
"DELETING" => GameServerGroupStatus::Deleting,
"ERROR" => GameServerGroupStatus::Error,
"NEW" => GameServerGroupStatus::New,
other => GameServerGroupStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerGroupStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerGroupStatus::from(s))
}
}
impl GameServerGroupStatus {
pub fn as_str(&self) -> &str {
match self {
GameServerGroupStatus::Activating => "ACTIVATING",
GameServerGroupStatus::Active => "ACTIVE",
GameServerGroupStatus::Deleted => "DELETED",
GameServerGroupStatus::DeleteScheduled => "DELETE_SCHEDULED",
GameServerGroupStatus::Deleting => "DELETING",
GameServerGroupStatus::Error => "ERROR",
GameServerGroupStatus::New => "NEW",
GameServerGroupStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"ACTIVATING",
"ACTIVE",
"DELETED",
"DELETE_SCHEDULED",
"DELETING",
"ERROR",
"NEW",
]
}
}
impl AsRef<str> for GameServerGroupStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerProtectionPolicy {
FullProtection,
NoProtection,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerProtectionPolicy {
fn from(s: &str) -> Self {
match s {
"FULL_PROTECTION" => GameServerProtectionPolicy::FullProtection,
"NO_PROTECTION" => GameServerProtectionPolicy::NoProtection,
other => GameServerProtectionPolicy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerProtectionPolicy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerProtectionPolicy::from(s))
}
}
impl GameServerProtectionPolicy {
pub fn as_str(&self) -> &str {
match self {
GameServerProtectionPolicy::FullProtection => "FULL_PROTECTION",
GameServerProtectionPolicy::NoProtection => "NO_PROTECTION",
GameServerProtectionPolicy::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["FULL_PROTECTION", "NO_PROTECTION"]
}
}
impl AsRef<str> for GameServerProtectionPolicy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum BalancingStrategy {
OnDemandOnly,
SpotOnly,
SpotPreferred,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for BalancingStrategy {
fn from(s: &str) -> Self {
match s {
"ON_DEMAND_ONLY" => BalancingStrategy::OnDemandOnly,
"SPOT_ONLY" => BalancingStrategy::SpotOnly,
"SPOT_PREFERRED" => BalancingStrategy::SpotPreferred,
other => BalancingStrategy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for BalancingStrategy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(BalancingStrategy::from(s))
}
}
impl BalancingStrategy {
pub fn as_str(&self) -> &str {
match self {
BalancingStrategy::OnDemandOnly => "ON_DEMAND_ONLY",
BalancingStrategy::SpotOnly => "SPOT_ONLY",
BalancingStrategy::SpotPreferred => "SPOT_PREFERRED",
BalancingStrategy::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ON_DEMAND_ONLY", "SPOT_ONLY", "SPOT_PREFERRED"]
}
}
impl AsRef<str> for BalancingStrategy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p>An allowed instance type for a <a>GameServerGroup</a>. All game server groups must have at least two
/// instance types defined for it. GameLift FleetIQ periodically evaluates each defined instance type
/// for viability. It then updates the Auto Scaling group with the list of viable instance
/// types.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceDefinition {
/// <p>An EC2 instance type designation.</p>
pub instance_type: std::option::Option<crate::model::GameServerGroupInstanceType>,
/// <p>Instance weighting that indicates how much this instance type contributes to the total
/// capacity of a game server group. Instance weights are used by GameLift FleetIQ to calculate the
/// instance type's cost per unit hour and better identify the most cost-effective options.
/// For detailed information on weighting instance capacity, see <a href="https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-weighting.html">Instance
/// Weighting</a> in the <i>Amazon EC2 Auto Scaling User Guide</i>.
/// Default value is "1".</p>
pub weighted_capacity: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for InstanceDefinition {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceDefinition");
formatter.field("instance_type", &self.instance_type);
formatter.field("weighted_capacity", &self.weighted_capacity);
formatter.finish()
}
}
/// See [`InstanceDefinition`](crate::model::InstanceDefinition)
pub mod instance_definition {
/// A builder for [`InstanceDefinition`](crate::model::InstanceDefinition)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) instance_type: std::option::Option<crate::model::GameServerGroupInstanceType>,
pub(crate) weighted_capacity: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>An EC2 instance type designation.</p>
pub fn instance_type(mut self, input: crate::model::GameServerGroupInstanceType) -> Self {
self.instance_type = Some(input);
self
}
pub fn set_instance_type(
mut self,
input: std::option::Option<crate::model::GameServerGroupInstanceType>,
) -> Self {
self.instance_type = input;
self
}
/// <p>Instance weighting that indicates how much this instance type contributes to the total
/// capacity of a game server group. Instance weights are used by GameLift FleetIQ to calculate the
/// instance type's cost per unit hour and better identify the most cost-effective options.
/// For detailed information on weighting instance capacity, see <a href="https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-weighting.html">Instance
/// Weighting</a> in the <i>Amazon EC2 Auto Scaling User Guide</i>.
/// Default value is "1".</p>
pub fn weighted_capacity(mut self, input: impl Into<std::string::String>) -> Self {
self.weighted_capacity = Some(input.into());
self
}
pub fn set_weighted_capacity(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.weighted_capacity = input;
self
}
/// Consumes the builder and constructs a [`InstanceDefinition`](crate::model::InstanceDefinition)
pub fn build(self) -> crate::model::InstanceDefinition {
crate::model::InstanceDefinition {
instance_type: self.instance_type,
weighted_capacity: self.weighted_capacity,
}
}
}
}
impl InstanceDefinition {
/// Creates a new builder-style object to manufacture [`InstanceDefinition`](crate::model::InstanceDefinition)
pub fn builder() -> crate::model::instance_definition::Builder {
crate::model::instance_definition::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerGroupInstanceType {
C42xlarge,
C44xlarge,
C48xlarge,
C4Large,
C4Xlarge,
C512xlarge,
C518xlarge,
C524xlarge,
C52xlarge,
C54xlarge,
C59xlarge,
C5Large,
C5Xlarge,
C5a12xlarge,
C5a16xlarge,
C5a24xlarge,
C5a2xlarge,
C5a4xlarge,
C5a8xlarge,
C5aLarge,
C5aXlarge,
M410xlarge,
M42xlarge,
M44xlarge,
M4Large,
M4Xlarge,
M512xlarge,
M516xlarge,
M524xlarge,
M52xlarge,
M54xlarge,
M58xlarge,
M5Large,
M5Xlarge,
M5a12xlarge,
M5a16xlarge,
M5a24xlarge,
M5a2xlarge,
M5a4xlarge,
M5a8xlarge,
M5aLarge,
M5aXlarge,
R416xlarge,
R42xlarge,
R44xlarge,
R48xlarge,
R4Large,
R4Xlarge,
R512xlarge,
R516xlarge,
R524xlarge,
R52xlarge,
R54xlarge,
R58xlarge,
R5Large,
R5Xlarge,
R5a12xlarge,
R5a16xlarge,
R5a24xlarge,
R5a2xlarge,
R5a4xlarge,
R5a8xlarge,
R5aLarge,
R5aXlarge,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerGroupInstanceType {
fn from(s: &str) -> Self {
match s {
"c4.2xlarge" => GameServerGroupInstanceType::C42xlarge,
"c4.4xlarge" => GameServerGroupInstanceType::C44xlarge,
"c4.8xlarge" => GameServerGroupInstanceType::C48xlarge,
"c4.large" => GameServerGroupInstanceType::C4Large,
"c4.xlarge" => GameServerGroupInstanceType::C4Xlarge,
"c5.12xlarge" => GameServerGroupInstanceType::C512xlarge,
"c5.18xlarge" => GameServerGroupInstanceType::C518xlarge,
"c5.24xlarge" => GameServerGroupInstanceType::C524xlarge,
"c5.2xlarge" => GameServerGroupInstanceType::C52xlarge,
"c5.4xlarge" => GameServerGroupInstanceType::C54xlarge,
"c5.9xlarge" => GameServerGroupInstanceType::C59xlarge,
"c5.large" => GameServerGroupInstanceType::C5Large,
"c5.xlarge" => GameServerGroupInstanceType::C5Xlarge,
"c5a.12xlarge" => GameServerGroupInstanceType::C5a12xlarge,
"c5a.16xlarge" => GameServerGroupInstanceType::C5a16xlarge,
"c5a.24xlarge" => GameServerGroupInstanceType::C5a24xlarge,
"c5a.2xlarge" => GameServerGroupInstanceType::C5a2xlarge,
"c5a.4xlarge" => GameServerGroupInstanceType::C5a4xlarge,
"c5a.8xlarge" => GameServerGroupInstanceType::C5a8xlarge,
"c5a.large" => GameServerGroupInstanceType::C5aLarge,
"c5a.xlarge" => GameServerGroupInstanceType::C5aXlarge,
"m4.10xlarge" => GameServerGroupInstanceType::M410xlarge,
"m4.2xlarge" => GameServerGroupInstanceType::M42xlarge,
"m4.4xlarge" => GameServerGroupInstanceType::M44xlarge,
"m4.large" => GameServerGroupInstanceType::M4Large,
"m4.xlarge" => GameServerGroupInstanceType::M4Xlarge,
"m5.12xlarge" => GameServerGroupInstanceType::M512xlarge,
"m5.16xlarge" => GameServerGroupInstanceType::M516xlarge,
"m5.24xlarge" => GameServerGroupInstanceType::M524xlarge,
"m5.2xlarge" => GameServerGroupInstanceType::M52xlarge,
"m5.4xlarge" => GameServerGroupInstanceType::M54xlarge,
"m5.8xlarge" => GameServerGroupInstanceType::M58xlarge,
"m5.large" => GameServerGroupInstanceType::M5Large,
"m5.xlarge" => GameServerGroupInstanceType::M5Xlarge,
"m5a.12xlarge" => GameServerGroupInstanceType::M5a12xlarge,
"m5a.16xlarge" => GameServerGroupInstanceType::M5a16xlarge,
"m5a.24xlarge" => GameServerGroupInstanceType::M5a24xlarge,
"m5a.2xlarge" => GameServerGroupInstanceType::M5a2xlarge,
"m5a.4xlarge" => GameServerGroupInstanceType::M5a4xlarge,
"m5a.8xlarge" => GameServerGroupInstanceType::M5a8xlarge,
"m5a.large" => GameServerGroupInstanceType::M5aLarge,
"m5a.xlarge" => GameServerGroupInstanceType::M5aXlarge,
"r4.16xlarge" => GameServerGroupInstanceType::R416xlarge,
"r4.2xlarge" => GameServerGroupInstanceType::R42xlarge,
"r4.4xlarge" => GameServerGroupInstanceType::R44xlarge,
"r4.8xlarge" => GameServerGroupInstanceType::R48xlarge,
"r4.large" => GameServerGroupInstanceType::R4Large,
"r4.xlarge" => GameServerGroupInstanceType::R4Xlarge,
"r5.12xlarge" => GameServerGroupInstanceType::R512xlarge,
"r5.16xlarge" => GameServerGroupInstanceType::R516xlarge,
"r5.24xlarge" => GameServerGroupInstanceType::R524xlarge,
"r5.2xlarge" => GameServerGroupInstanceType::R52xlarge,
"r5.4xlarge" => GameServerGroupInstanceType::R54xlarge,
"r5.8xlarge" => GameServerGroupInstanceType::R58xlarge,
"r5.large" => GameServerGroupInstanceType::R5Large,
"r5.xlarge" => GameServerGroupInstanceType::R5Xlarge,
"r5a.12xlarge" => GameServerGroupInstanceType::R5a12xlarge,
"r5a.16xlarge" => GameServerGroupInstanceType::R5a16xlarge,
"r5a.24xlarge" => GameServerGroupInstanceType::R5a24xlarge,
"r5a.2xlarge" => GameServerGroupInstanceType::R5a2xlarge,
"r5a.4xlarge" => GameServerGroupInstanceType::R5a4xlarge,
"r5a.8xlarge" => GameServerGroupInstanceType::R5a8xlarge,
"r5a.large" => GameServerGroupInstanceType::R5aLarge,
"r5a.xlarge" => GameServerGroupInstanceType::R5aXlarge,
other => GameServerGroupInstanceType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerGroupInstanceType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerGroupInstanceType::from(s))
}
}
impl GameServerGroupInstanceType {
pub fn as_str(&self) -> &str {
match self {
GameServerGroupInstanceType::C42xlarge => "c4.2xlarge",
GameServerGroupInstanceType::C44xlarge => "c4.4xlarge",
GameServerGroupInstanceType::C48xlarge => "c4.8xlarge",
GameServerGroupInstanceType::C4Large => "c4.large",
GameServerGroupInstanceType::C4Xlarge => "c4.xlarge",
GameServerGroupInstanceType::C512xlarge => "c5.12xlarge",
GameServerGroupInstanceType::C518xlarge => "c5.18xlarge",
GameServerGroupInstanceType::C524xlarge => "c5.24xlarge",
GameServerGroupInstanceType::C52xlarge => "c5.2xlarge",
GameServerGroupInstanceType::C54xlarge => "c5.4xlarge",
GameServerGroupInstanceType::C59xlarge => "c5.9xlarge",
GameServerGroupInstanceType::C5Large => "c5.large",
GameServerGroupInstanceType::C5Xlarge => "c5.xlarge",
GameServerGroupInstanceType::C5a12xlarge => "c5a.12xlarge",
GameServerGroupInstanceType::C5a16xlarge => "c5a.16xlarge",
GameServerGroupInstanceType::C5a24xlarge => "c5a.24xlarge",
GameServerGroupInstanceType::C5a2xlarge => "c5a.2xlarge",
GameServerGroupInstanceType::C5a4xlarge => "c5a.4xlarge",
GameServerGroupInstanceType::C5a8xlarge => "c5a.8xlarge",
GameServerGroupInstanceType::C5aLarge => "c5a.large",
GameServerGroupInstanceType::C5aXlarge => "c5a.xlarge",
GameServerGroupInstanceType::M410xlarge => "m4.10xlarge",
GameServerGroupInstanceType::M42xlarge => "m4.2xlarge",
GameServerGroupInstanceType::M44xlarge => "m4.4xlarge",
GameServerGroupInstanceType::M4Large => "m4.large",
GameServerGroupInstanceType::M4Xlarge => "m4.xlarge",
GameServerGroupInstanceType::M512xlarge => "m5.12xlarge",
GameServerGroupInstanceType::M516xlarge => "m5.16xlarge",
GameServerGroupInstanceType::M524xlarge => "m5.24xlarge",
GameServerGroupInstanceType::M52xlarge => "m5.2xlarge",
GameServerGroupInstanceType::M54xlarge => "m5.4xlarge",
GameServerGroupInstanceType::M58xlarge => "m5.8xlarge",
GameServerGroupInstanceType::M5Large => "m5.large",
GameServerGroupInstanceType::M5Xlarge => "m5.xlarge",
GameServerGroupInstanceType::M5a12xlarge => "m5a.12xlarge",
GameServerGroupInstanceType::M5a16xlarge => "m5a.16xlarge",
GameServerGroupInstanceType::M5a24xlarge => "m5a.24xlarge",
GameServerGroupInstanceType::M5a2xlarge => "m5a.2xlarge",
GameServerGroupInstanceType::M5a4xlarge => "m5a.4xlarge",
GameServerGroupInstanceType::M5a8xlarge => "m5a.8xlarge",
GameServerGroupInstanceType::M5aLarge => "m5a.large",
GameServerGroupInstanceType::M5aXlarge => "m5a.xlarge",
GameServerGroupInstanceType::R416xlarge => "r4.16xlarge",
GameServerGroupInstanceType::R42xlarge => "r4.2xlarge",
GameServerGroupInstanceType::R44xlarge => "r4.4xlarge",
GameServerGroupInstanceType::R48xlarge => "r4.8xlarge",
GameServerGroupInstanceType::R4Large => "r4.large",
GameServerGroupInstanceType::R4Xlarge => "r4.xlarge",
GameServerGroupInstanceType::R512xlarge => "r5.12xlarge",
GameServerGroupInstanceType::R516xlarge => "r5.16xlarge",
GameServerGroupInstanceType::R524xlarge => "r5.24xlarge",
GameServerGroupInstanceType::R52xlarge => "r5.2xlarge",
GameServerGroupInstanceType::R54xlarge => "r5.4xlarge",
GameServerGroupInstanceType::R58xlarge => "r5.8xlarge",
GameServerGroupInstanceType::R5Large => "r5.large",
GameServerGroupInstanceType::R5Xlarge => "r5.xlarge",
GameServerGroupInstanceType::R5a12xlarge => "r5a.12xlarge",
GameServerGroupInstanceType::R5a16xlarge => "r5a.16xlarge",
GameServerGroupInstanceType::R5a24xlarge => "r5a.24xlarge",
GameServerGroupInstanceType::R5a2xlarge => "r5a.2xlarge",
GameServerGroupInstanceType::R5a4xlarge => "r5a.4xlarge",
GameServerGroupInstanceType::R5a8xlarge => "r5a.8xlarge",
GameServerGroupInstanceType::R5aLarge => "r5a.large",
GameServerGroupInstanceType::R5aXlarge => "r5a.xlarge",
GameServerGroupInstanceType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5a.12xlarge",
"c5a.16xlarge",
"c5a.24xlarge",
"c5a.2xlarge",
"c5a.4xlarge",
"c5a.8xlarge",
"c5a.large",
"c5a.xlarge",
"m4.10xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
]
}
}
impl AsRef<str> for GameServerGroupInstanceType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p>Properties describing a game server that is running on an instance in a <a>GameServerGroup</a>. </p>
/// <p>A game server is created by a successful call to <code>RegisterGameServer</code> and
/// deleted by calling <code>DeregisterGameServer</code>. A game server is claimed to host a
/// game session by calling <code>ClaimGameServer</code>. </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>RegisterGameServer</a> |
/// <a>ListGameServers</a> |
/// <a>ClaimGameServer</a> |
/// <a>DescribeGameServer</a> |
/// <a>UpdateGameServer</a> |
/// <a>DeregisterGameServer</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameServer {
/// <p>A unique identifier for the game server group where the game server is running.
/// Use either the <a>GameServerGroup</a> name or ARN value.</p>
pub game_server_group_name: std::option::Option<std::string::String>,
/// <p>The ARN identifier for the game server group where the game server is located.</p>
pub game_server_group_arn: std::option::Option<std::string::String>,
/// <p>A custom string that uniquely identifies the game server. Game server IDs are
/// developer-defined and are unique across all game server groups in an AWS
/// account.</p>
pub game_server_id: std::option::Option<std::string::String>,
/// <p>The unique identifier for the instance where the game server is running. This ID is
/// available in the instance metadata. EC2 instance IDs
/// use a 17-character format, for example: <code>i-1234567890abcdef0</code>.</p>
pub instance_id: std::option::Option<std::string::String>,
/// <p>The port and IP address that must be used to establish a client connection to the game server.</p>
pub connection_info: std::option::Option<std::string::String>,
/// <p>A set of custom game server properties, formatted as a single string value. This data
/// is passed to a game client or service when it requests information on game servers using
/// <a>ListGameServers</a> or <a>ClaimGameServer</a>.</p>
pub game_server_data: std::option::Option<std::string::String>,
/// <p>Indicates when an available game server has been reserved for gameplay but has not yet
/// started hosting a game. Once it is claimed, the game server remains in
/// <code>CLAIMED</code> status for a maximum of one minute. During this time, game
/// clients connect to the game server to start the game and trigger the game server to
/// update its utilization status. After one minute, the game server claim status reverts to
/// null.</p>
pub claim_status: std::option::Option<crate::model::GameServerClaimStatus>,
/// <p>Indicates whether the game server is currently available for new games or is busy. Possible statuses include:</p>
/// <ul>
/// <li>
/// <p>
/// <code>AVAILABLE</code> - The game server is available to be claimed. A game server that has
/// been claimed remains in this status until it reports game hosting activity. </p>
/// </li>
/// <li>
/// <p>
/// <code>UTILIZED</code> - The game server is currently hosting a game session with players. </p>
/// </li>
/// </ul>
pub utilization_status: std::option::Option<crate::model::GameServerUtilizationStatus>,
/// <p>Timestamp that indicates when the game server was created with a <a>RegisterGameServer</a> request. The format is a number expressed in Unix
/// time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub registration_time: std::option::Option<smithy_types::Instant>,
/// <p>Timestamp that indicates the last time the game server was claimed with a <a>ClaimGameServer</a> request. The format is a number expressed in Unix time
/// as milliseconds (for example <code>"1469498468.057"</code>). This value is used to
/// calculate when a claimed game server's status should revert to null.</p>
pub last_claim_time: std::option::Option<smithy_types::Instant>,
/// <p>Timestamp that indicates the last time the game server was updated with health status
/// using an <a>UpdateGameServer</a> request. The format is a number expressed in
/// Unix time as milliseconds (for example <code>"1469498468.057"</code>). After game server
/// registration, this property is only changed when a game server update specifies a health
/// check value.</p>
pub last_health_check_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for GameServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameServer");
formatter.field("game_server_group_name", &self.game_server_group_name);
formatter.field("game_server_group_arn", &self.game_server_group_arn);
formatter.field("game_server_id", &self.game_server_id);
formatter.field("instance_id", &self.instance_id);
formatter.field("connection_info", &self.connection_info);
formatter.field("game_server_data", &self.game_server_data);
formatter.field("claim_status", &self.claim_status);
formatter.field("utilization_status", &self.utilization_status);
formatter.field("registration_time", &self.registration_time);
formatter.field("last_claim_time", &self.last_claim_time);
formatter.field("last_health_check_time", &self.last_health_check_time);
formatter.finish()
}
}
/// See [`GameServer`](crate::model::GameServer)
pub mod game_server {
/// A builder for [`GameServer`](crate::model::GameServer)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_server_group_name: std::option::Option<std::string::String>,
pub(crate) game_server_group_arn: std::option::Option<std::string::String>,
pub(crate) game_server_id: std::option::Option<std::string::String>,
pub(crate) instance_id: std::option::Option<std::string::String>,
pub(crate) connection_info: std::option::Option<std::string::String>,
pub(crate) game_server_data: std::option::Option<std::string::String>,
pub(crate) claim_status: std::option::Option<crate::model::GameServerClaimStatus>,
pub(crate) utilization_status:
std::option::Option<crate::model::GameServerUtilizationStatus>,
pub(crate) registration_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_claim_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_health_check_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A unique identifier for the game server group where the game server is running.
/// Use either the <a>GameServerGroup</a> name or ARN value.</p>
pub fn game_server_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_group_name = Some(input.into());
self
}
pub fn set_game_server_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_group_name = input;
self
}
/// <p>The ARN identifier for the game server group where the game server is located.</p>
pub fn game_server_group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_group_arn = Some(input.into());
self
}
pub fn set_game_server_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_group_arn = input;
self
}
/// <p>A custom string that uniquely identifies the game server. Game server IDs are
/// developer-defined and are unique across all game server groups in an AWS
/// account.</p>
pub fn game_server_id(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_id = Some(input.into());
self
}
pub fn set_game_server_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_id = input;
self
}
/// <p>The unique identifier for the instance where the game server is running. This ID is
/// available in the instance metadata. EC2 instance IDs
/// use a 17-character format, for example: <code>i-1234567890abcdef0</code>.</p>
pub fn instance_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_id = Some(input.into());
self
}
pub fn set_instance_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.instance_id = input;
self
}
/// <p>The port and IP address that must be used to establish a client connection to the game server.</p>
pub fn connection_info(mut self, input: impl Into<std::string::String>) -> Self {
self.connection_info = Some(input.into());
self
}
pub fn set_connection_info(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.connection_info = input;
self
}
/// <p>A set of custom game server properties, formatted as a single string value. This data
/// is passed to a game client or service when it requests information on game servers using
/// <a>ListGameServers</a> or <a>ClaimGameServer</a>.</p>
pub fn game_server_data(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_data = Some(input.into());
self
}
pub fn set_game_server_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_data = input;
self
}
/// <p>Indicates when an available game server has been reserved for gameplay but has not yet
/// started hosting a game. Once it is claimed, the game server remains in
/// <code>CLAIMED</code> status for a maximum of one minute. During this time, game
/// clients connect to the game server to start the game and trigger the game server to
/// update its utilization status. After one minute, the game server claim status reverts to
/// null.</p>
pub fn claim_status(mut self, input: crate::model::GameServerClaimStatus) -> Self {
self.claim_status = Some(input);
self
}
pub fn set_claim_status(
mut self,
input: std::option::Option<crate::model::GameServerClaimStatus>,
) -> Self {
self.claim_status = input;
self
}
/// <p>Indicates whether the game server is currently available for new games or is busy. Possible statuses include:</p>
/// <ul>
/// <li>
/// <p>
/// <code>AVAILABLE</code> - The game server is available to be claimed. A game server that has
/// been claimed remains in this status until it reports game hosting activity. </p>
/// </li>
/// <li>
/// <p>
/// <code>UTILIZED</code> - The game server is currently hosting a game session with players. </p>
/// </li>
/// </ul>
pub fn utilization_status(
mut self,
input: crate::model::GameServerUtilizationStatus,
) -> Self {
self.utilization_status = Some(input);
self
}
pub fn set_utilization_status(
mut self,
input: std::option::Option<crate::model::GameServerUtilizationStatus>,
) -> Self {
self.utilization_status = input;
self
}
/// <p>Timestamp that indicates when the game server was created with a <a>RegisterGameServer</a> request. The format is a number expressed in Unix
/// time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn registration_time(mut self, input: smithy_types::Instant) -> Self {
self.registration_time = Some(input);
self
}
pub fn set_registration_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.registration_time = input;
self
}
/// <p>Timestamp that indicates the last time the game server was claimed with a <a>ClaimGameServer</a> request. The format is a number expressed in Unix time
/// as milliseconds (for example <code>"1469498468.057"</code>). This value is used to
/// calculate when a claimed game server's status should revert to null.</p>
pub fn last_claim_time(mut self, input: smithy_types::Instant) -> Self {
self.last_claim_time = Some(input);
self
}
pub fn set_last_claim_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_claim_time = input;
self
}
/// <p>Timestamp that indicates the last time the game server was updated with health status
/// using an <a>UpdateGameServer</a> request. The format is a number expressed in
/// Unix time as milliseconds (for example <code>"1469498468.057"</code>). After game server
/// registration, this property is only changed when a game server update specifies a health
/// check value.</p>
pub fn last_health_check_time(mut self, input: smithy_types::Instant) -> Self {
self.last_health_check_time = Some(input);
self
}
pub fn set_last_health_check_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_health_check_time = input;
self
}
/// Consumes the builder and constructs a [`GameServer`](crate::model::GameServer)
pub fn build(self) -> crate::model::GameServer {
crate::model::GameServer {
game_server_group_name: self.game_server_group_name,
game_server_group_arn: self.game_server_group_arn,
game_server_id: self.game_server_id,
instance_id: self.instance_id,
connection_info: self.connection_info,
game_server_data: self.game_server_data,
claim_status: self.claim_status,
utilization_status: self.utilization_status,
registration_time: self.registration_time,
last_claim_time: self.last_claim_time,
last_health_check_time: self.last_health_check_time,
}
}
}
}
impl GameServer {
/// Creates a new builder-style object to manufacture [`GameServer`](crate::model::GameServer)
pub fn builder() -> crate::model::game_server::Builder {
crate::model::game_server::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerUtilizationStatus {
Available,
Utilized,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerUtilizationStatus {
fn from(s: &str) -> Self {
match s {
"AVAILABLE" => GameServerUtilizationStatus::Available,
"UTILIZED" => GameServerUtilizationStatus::Utilized,
other => GameServerUtilizationStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerUtilizationStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerUtilizationStatus::from(s))
}
}
impl GameServerUtilizationStatus {
pub fn as_str(&self) -> &str {
match self {
GameServerUtilizationStatus::Available => "AVAILABLE",
GameServerUtilizationStatus::Utilized => "UTILIZED",
GameServerUtilizationStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["AVAILABLE", "UTILIZED"]
}
}
impl AsRef<str> for GameServerUtilizationStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerClaimStatus {
Claimed,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerClaimStatus {
fn from(s: &str) -> Self {
match s {
"CLAIMED" => GameServerClaimStatus::Claimed,
other => GameServerClaimStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerClaimStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerClaimStatus::from(s))
}
}
impl GameServerClaimStatus {
pub fn as_str(&self) -> &str {
match self {
GameServerClaimStatus::Claimed => "CLAIMED",
GameServerClaimStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["CLAIMED"]
}
}
impl AsRef<str> for GameServerClaimStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerHealthCheck {
Healthy,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerHealthCheck {
fn from(s: &str) -> Self {
match s {
"HEALTHY" => GameServerHealthCheck::Healthy,
other => GameServerHealthCheck::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerHealthCheck {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerHealthCheck::from(s))
}
}
impl GameServerHealthCheck {
pub fn as_str(&self) -> &str {
match self {
GameServerHealthCheck::Healthy => "HEALTHY",
GameServerHealthCheck::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["HEALTHY"]
}
}
impl AsRef<str> for GameServerHealthCheck {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A range of IP addresses and port settings that allow inbound traffic to connect to
/// server processes on an instance in a fleet. New game sessions are assigned an IP
/// address/port number combination, which must fall into the fleet's allowed ranges. Fleets
/// with custom game builds must have permissions explicitly set. For Realtime Servers fleets, GameLift
/// automatically opens two port ranges, one for TCP messaging and one for UDP.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetPortSettings</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct IpPermission {
/// <p>A starting value for a range of allowed port numbers.</p>
pub from_port: std::option::Option<i32>,
/// <p>An ending value for a range of allowed port numbers. Port numbers are end-inclusive.
/// This value must be higher than <code>FromPort</code>.</p>
pub to_port: std::option::Option<i32>,
/// <p>A range of allowed IP addresses. This value must be expressed in CIDR notation.
/// Example: "<code>000.000.000.000/[subnet mask]</code>" or optionally the shortened
/// version "<code>0.0.0.0/[subnet mask]</code>".</p>
pub ip_range: std::option::Option<std::string::String>,
/// <p>The network communication protocol used by the fleet.</p>
pub protocol: std::option::Option<crate::model::IpProtocol>,
}
impl std::fmt::Debug for IpPermission {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("IpPermission");
formatter.field("from_port", &self.from_port);
formatter.field("to_port", &self.to_port);
formatter.field("ip_range", &self.ip_range);
formatter.field("protocol", &self.protocol);
formatter.finish()
}
}
/// See [`IpPermission`](crate::model::IpPermission)
pub mod ip_permission {
/// A builder for [`IpPermission`](crate::model::IpPermission)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) from_port: std::option::Option<i32>,
pub(crate) to_port: std::option::Option<i32>,
pub(crate) ip_range: std::option::Option<std::string::String>,
pub(crate) protocol: std::option::Option<crate::model::IpProtocol>,
}
impl Builder {
/// <p>A starting value for a range of allowed port numbers.</p>
pub fn from_port(mut self, input: i32) -> Self {
self.from_port = Some(input);
self
}
pub fn set_from_port(mut self, input: std::option::Option<i32>) -> Self {
self.from_port = input;
self
}
/// <p>An ending value for a range of allowed port numbers. Port numbers are end-inclusive.
/// This value must be higher than <code>FromPort</code>.</p>
pub fn to_port(mut self, input: i32) -> Self {
self.to_port = Some(input);
self
}
pub fn set_to_port(mut self, input: std::option::Option<i32>) -> Self {
self.to_port = input;
self
}
/// <p>A range of allowed IP addresses. This value must be expressed in CIDR notation.
/// Example: "<code>000.000.000.000/[subnet mask]</code>" or optionally the shortened
/// version "<code>0.0.0.0/[subnet mask]</code>".</p>
pub fn ip_range(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_range = Some(input.into());
self
}
pub fn set_ip_range(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_range = input;
self
}
/// <p>The network communication protocol used by the fleet.</p>
pub fn protocol(mut self, input: crate::model::IpProtocol) -> Self {
self.protocol = Some(input);
self
}
pub fn set_protocol(
mut self,
input: std::option::Option<crate::model::IpProtocol>,
) -> Self {
self.protocol = input;
self
}
/// Consumes the builder and constructs a [`IpPermission`](crate::model::IpPermission)
pub fn build(self) -> crate::model::IpPermission {
crate::model::IpPermission {
from_port: self.from_port,
to_port: self.to_port,
ip_range: self.ip_range,
protocol: self.protocol,
}
}
}
}
impl IpPermission {
/// Creates a new builder-style object to manufacture [`IpPermission`](crate::model::IpPermission)
pub fn builder() -> crate::model::ip_permission::Builder {
crate::model::ip_permission::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum IpProtocol {
Tcp,
Udp,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for IpProtocol {
fn from(s: &str) -> Self {
match s {
"TCP" => IpProtocol::Tcp,
"UDP" => IpProtocol::Udp,
other => IpProtocol::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for IpProtocol {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(IpProtocol::from(s))
}
}
impl IpProtocol {
pub fn as_str(&self) -> &str {
match self {
IpProtocol::Tcp => "TCP",
IpProtocol::Udp => "UDP",
IpProtocol::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["TCP", "UDP"]
}
}
impl AsRef<str> for IpProtocol {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A policy that puts limits on the number of game sessions that a player can create
/// within a specified span of time. With this policy, you can control players' ability to
/// consume available resources.</p>
/// <p>The policy is evaluated when a player tries to create a new game session. On receiving
/// a <code>CreateGameSession</code> request, GameLift checks that the player (identified by
/// <code>CreatorId</code>) has created fewer than game session limit in the specified
/// time period.</p>
/// <p>The resource creation limit policy is included in <a>FleetAttributes</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResourceCreationLimitPolicy {
/// <p>The maximum number of game sessions that an individual can create during the policy
/// period. </p>
pub new_game_sessions_per_creator: std::option::Option<i32>,
/// <p>The time span used in evaluating the resource creation limit policy. </p>
pub policy_period_in_minutes: std::option::Option<i32>,
}
impl std::fmt::Debug for ResourceCreationLimitPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResourceCreationLimitPolicy");
formatter.field(
"new_game_sessions_per_creator",
&self.new_game_sessions_per_creator,
);
formatter.field("policy_period_in_minutes", &self.policy_period_in_minutes);
formatter.finish()
}
}
/// See [`ResourceCreationLimitPolicy`](crate::model::ResourceCreationLimitPolicy)
pub mod resource_creation_limit_policy {
/// A builder for [`ResourceCreationLimitPolicy`](crate::model::ResourceCreationLimitPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) new_game_sessions_per_creator: std::option::Option<i32>,
pub(crate) policy_period_in_minutes: std::option::Option<i32>,
}
impl Builder {
/// <p>The maximum number of game sessions that an individual can create during the policy
/// period. </p>
pub fn new_game_sessions_per_creator(mut self, input: i32) -> Self {
self.new_game_sessions_per_creator = Some(input);
self
}
pub fn set_new_game_sessions_per_creator(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.new_game_sessions_per_creator = input;
self
}
/// <p>The time span used in evaluating the resource creation limit policy. </p>
pub fn policy_period_in_minutes(mut self, input: i32) -> Self {
self.policy_period_in_minutes = Some(input);
self
}
pub fn set_policy_period_in_minutes(mut self, input: std::option::Option<i32>) -> Self {
self.policy_period_in_minutes = input;
self
}
/// Consumes the builder and constructs a [`ResourceCreationLimitPolicy`](crate::model::ResourceCreationLimitPolicy)
pub fn build(self) -> crate::model::ResourceCreationLimitPolicy {
crate::model::ResourceCreationLimitPolicy {
new_game_sessions_per_creator: self.new_game_sessions_per_creator,
policy_period_in_minutes: self.policy_period_in_minutes,
}
}
}
}
impl ResourceCreationLimitPolicy {
/// Creates a new builder-style object to manufacture [`ResourceCreationLimitPolicy`](crate::model::ResourceCreationLimitPolicy)
pub fn builder() -> crate::model::resource_creation_limit_policy::Builder {
crate::model::resource_creation_limit_policy::Builder::default()
}
}
/// <p>Properties describing a custom game build.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateBuild</a> |
/// <a>ListBuilds</a> |
/// <a>DescribeBuild</a> |
/// <a>UpdateBuild</a> |
/// <a>DeleteBuild</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Build {
/// <p>A unique identifier for the build.</p>
pub build_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift build resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::build/build-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>. In a GameLift build ARN, the resource ID matches the
/// <i>BuildId</i> value.</p>
pub build_arn: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with a build. Build names do not need to be unique. It can be set using <a>CreateBuild</a> or <a>UpdateBuild</a>.</p>
pub name: std::option::Option<std::string::String>,
/// <p>Version information that is associated with a build or script. Version strings do not need to be unique. This value can be set using <a>CreateBuild</a> or <a>UpdateBuild</a>.</p>
pub version: std::option::Option<std::string::String>,
/// <p>Current status of the build.</p>
/// <p>Possible build statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>INITIALIZED</b> -- A new build has been defined,
/// but no files have been uploaded. You cannot create fleets for builds that are in
/// this status. When a build is successfully created, the build status is set to
/// this value. </p>
/// </li>
/// <li>
/// <p>
/// <b>READY</b> -- The game build has been successfully
/// uploaded. You can now create new fleets for this build.</p>
/// </li>
/// <li>
/// <p>
/// <b>FAILED</b> -- The game build upload failed. You
/// cannot create new fleets for this build. </p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::BuildStatus>,
/// <p>File size of the uploaded game build, expressed in bytes. When the build status is
/// <code>INITIALIZED</code>, this value is 0.</p>
pub size_on_disk: std::option::Option<i64>,
/// <p>Operating system that the game server binaries are built to run on. This value
/// determines the type of fleet resources that you can use for this build.</p>
pub operating_system: std::option::Option<crate::model::OperatingSystem>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for Build {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Build");
formatter.field("build_id", &self.build_id);
formatter.field("build_arn", &self.build_arn);
formatter.field("name", &self.name);
formatter.field("version", &self.version);
formatter.field("status", &self.status);
formatter.field("size_on_disk", &self.size_on_disk);
formatter.field("operating_system", &self.operating_system);
formatter.field("creation_time", &self.creation_time);
formatter.finish()
}
}
/// See [`Build`](crate::model::Build)
pub mod build {
/// A builder for [`Build`](crate::model::Build)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) build_id: std::option::Option<std::string::String>,
pub(crate) build_arn: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) version: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::BuildStatus>,
pub(crate) size_on_disk: std::option::Option<i64>,
pub(crate) operating_system: std::option::Option<crate::model::OperatingSystem>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A unique identifier for the build.</p>
pub fn build_id(mut self, input: impl Into<std::string::String>) -> Self {
self.build_id = Some(input.into());
self
}
pub fn set_build_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.build_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift build resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::build/build-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>. In a GameLift build ARN, the resource ID matches the
/// <i>BuildId</i> value.</p>
pub fn build_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.build_arn = Some(input.into());
self
}
pub fn set_build_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.build_arn = input;
self
}
/// <p>A descriptive label that is associated with a build. Build names do not need to be unique. It can be set using <a>CreateBuild</a> or <a>UpdateBuild</a>.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>Version information that is associated with a build or script. Version strings do not need to be unique. This value can be set using <a>CreateBuild</a> or <a>UpdateBuild</a>.</p>
pub fn version(mut self, input: impl Into<std::string::String>) -> Self {
self.version = Some(input.into());
self
}
pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version = input;
self
}
/// <p>Current status of the build.</p>
/// <p>Possible build statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>INITIALIZED</b> -- A new build has been defined,
/// but no files have been uploaded. You cannot create fleets for builds that are in
/// this status. When a build is successfully created, the build status is set to
/// this value. </p>
/// </li>
/// <li>
/// <p>
/// <b>READY</b> -- The game build has been successfully
/// uploaded. You can now create new fleets for this build.</p>
/// </li>
/// <li>
/// <p>
/// <b>FAILED</b> -- The game build upload failed. You
/// cannot create new fleets for this build. </p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::BuildStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(mut self, input: std::option::Option<crate::model::BuildStatus>) -> Self {
self.status = input;
self
}
/// <p>File size of the uploaded game build, expressed in bytes. When the build status is
/// <code>INITIALIZED</code>, this value is 0.</p>
pub fn size_on_disk(mut self, input: i64) -> Self {
self.size_on_disk = Some(input);
self
}
pub fn set_size_on_disk(mut self, input: std::option::Option<i64>) -> Self {
self.size_on_disk = input;
self
}
/// <p>Operating system that the game server binaries are built to run on. This value
/// determines the type of fleet resources that you can use for this build.</p>
pub fn operating_system(mut self, input: crate::model::OperatingSystem) -> Self {
self.operating_system = Some(input);
self
}
pub fn set_operating_system(
mut self,
input: std::option::Option<crate::model::OperatingSystem>,
) -> Self {
self.operating_system = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// Consumes the builder and constructs a [`Build`](crate::model::Build)
pub fn build(self) -> crate::model::Build {
crate::model::Build {
build_id: self.build_id,
build_arn: self.build_arn,
name: self.name,
version: self.version,
status: self.status,
size_on_disk: self.size_on_disk,
operating_system: self.operating_system,
creation_time: self.creation_time,
}
}
}
}
impl Build {
/// Creates a new builder-style object to manufacture [`Build`](crate::model::Build)
pub fn builder() -> crate::model::build::Builder {
crate::model::build::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum OperatingSystem {
AmazonLinux,
AmazonLinux2,
Windows2012,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for OperatingSystem {
fn from(s: &str) -> Self {
match s {
"AMAZON_LINUX" => OperatingSystem::AmazonLinux,
"AMAZON_LINUX_2" => OperatingSystem::AmazonLinux2,
"WINDOWS_2012" => OperatingSystem::Windows2012,
other => OperatingSystem::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for OperatingSystem {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(OperatingSystem::from(s))
}
}
impl OperatingSystem {
pub fn as_str(&self) -> &str {
match self {
OperatingSystem::AmazonLinux => "AMAZON_LINUX",
OperatingSystem::AmazonLinux2 => "AMAZON_LINUX_2",
OperatingSystem::Windows2012 => "WINDOWS_2012",
OperatingSystem::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["AMAZON_LINUX", "AMAZON_LINUX_2", "WINDOWS_2012"]
}
}
impl AsRef<str> for OperatingSystem {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum BuildStatus {
Failed,
Initialized,
Ready,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for BuildStatus {
fn from(s: &str) -> Self {
match s {
"FAILED" => BuildStatus::Failed,
"INITIALIZED" => BuildStatus::Initialized,
"READY" => BuildStatus::Ready,
other => BuildStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for BuildStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(BuildStatus::from(s))
}
}
impl BuildStatus {
pub fn as_str(&self) -> &str {
match self {
BuildStatus::Failed => "FAILED",
BuildStatus::Initialized => "INITIALIZED",
BuildStatus::Ready => "READY",
BuildStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["FAILED", "INITIALIZED", "READY"]
}
}
impl AsRef<str> for BuildStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Properties that describe an alias resource.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateAlias</a> |
/// <a>ListAliases</a> |
/// <a>DescribeAlias</a> |
/// <a>UpdateAlias</a> |
/// <a>DeleteAlias</a> |
/// <a>ResolveAlias</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Alias {
/// <p>A unique identifier for the alias. Alias IDs are unique within a Region.</p>
pub alias_id: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with an alias. Alias names do not need to be unique.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::alias/alias-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>. In a GameLift alias ARN, the resource ID matches the alias ID value.</p>
pub alias_arn: std::option::Option<std::string::String>,
/// <p>A human-readable description of an alias.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The routing configuration, including routing type and fleet target, for the alias. </p>
pub routing_strategy: std::option::Option<crate::model::RoutingStrategy>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>The time that this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub last_updated_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for Alias {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Alias");
formatter.field("alias_id", &self.alias_id);
formatter.field("name", &self.name);
formatter.field("alias_arn", &self.alias_arn);
formatter.field("description", &self.description);
formatter.field("routing_strategy", &self.routing_strategy);
formatter.field("creation_time", &self.creation_time);
formatter.field("last_updated_time", &self.last_updated_time);
formatter.finish()
}
}
/// See [`Alias`](crate::model::Alias)
pub mod alias {
/// A builder for [`Alias`](crate::model::Alias)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alias_id: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) alias_arn: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) routing_strategy: std::option::Option<crate::model::RoutingStrategy>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_updated_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A unique identifier for the alias. Alias IDs are unique within a Region.</p>
pub fn alias_id(mut self, input: impl Into<std::string::String>) -> Self {
self.alias_id = Some(input.into());
self
}
pub fn set_alias_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alias_id = input;
self
}
/// <p>A descriptive label that is associated with an alias. Alias names do not need to be unique.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::alias/alias-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>. In a GameLift alias ARN, the resource ID matches the alias ID value.</p>
pub fn alias_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.alias_arn = Some(input.into());
self
}
pub fn set_alias_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alias_arn = input;
self
}
/// <p>A human-readable description of an alias.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The routing configuration, including routing type and fleet target, for the alias. </p>
pub fn routing_strategy(mut self, input: crate::model::RoutingStrategy) -> Self {
self.routing_strategy = Some(input);
self
}
pub fn set_routing_strategy(
mut self,
input: std::option::Option<crate::model::RoutingStrategy>,
) -> Self {
self.routing_strategy = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>The time that this data object was last modified. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn last_updated_time(mut self, input: smithy_types::Instant) -> Self {
self.last_updated_time = Some(input);
self
}
pub fn set_last_updated_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_updated_time = input;
self
}
/// Consumes the builder and constructs a [`Alias`](crate::model::Alias)
pub fn build(self) -> crate::model::Alias {
crate::model::Alias {
alias_id: self.alias_id,
name: self.name,
alias_arn: self.alias_arn,
description: self.description,
routing_strategy: self.routing_strategy,
creation_time: self.creation_time,
last_updated_time: self.last_updated_time,
}
}
}
}
impl Alias {
/// Creates a new builder-style object to manufacture [`Alias`](crate::model::Alias)
pub fn builder() -> crate::model::alias::Builder {
crate::model::alias::Builder::default()
}
}
/// <p>The routing configuration for a fleet alias.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateAlias</a> |
/// <a>ListAliases</a> |
/// <a>DescribeAlias</a> |
/// <a>UpdateAlias</a> |
/// <a>DeleteAlias</a> |
/// <a>ResolveAlias</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RoutingStrategy {
/// <p>The type of routing strategy for the alias.</p>
/// <p>Possible routing types include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>SIMPLE</b> - The alias resolves to one specific
/// fleet. Use this type when routing to active fleets.</p>
/// </li>
/// <li>
/// <p>
/// <b>TERMINAL</b> - The alias does not resolve to a
/// fleet but instead can be used to display a message to the user. A terminal alias
/// throws a TerminalRoutingStrategyException with the <a>RoutingStrategy</a> message embedded.</p>
/// </li>
/// </ul>
pub r#type: std::option::Option<crate::model::RoutingStrategyType>,
/// <p>A unique identifier for the fleet that the alias points to. This value is the fleet ID, not the fleet ARN.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>The message text to be used with a terminal routing strategy.</p>
pub message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for RoutingStrategy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RoutingStrategy");
formatter.field("r#type", &self.r#type);
formatter.field("fleet_id", &self.fleet_id);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`RoutingStrategy`](crate::model::RoutingStrategy)
pub mod routing_strategy {
/// A builder for [`RoutingStrategy`](crate::model::RoutingStrategy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) r#type: std::option::Option<crate::model::RoutingStrategyType>,
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The type of routing strategy for the alias.</p>
/// <p>Possible routing types include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>SIMPLE</b> - The alias resolves to one specific
/// fleet. Use this type when routing to active fleets.</p>
/// </li>
/// <li>
/// <p>
/// <b>TERMINAL</b> - The alias does not resolve to a
/// fleet but instead can be used to display a message to the user. A terminal alias
/// throws a TerminalRoutingStrategyException with the <a>RoutingStrategy</a> message embedded.</p>
/// </li>
/// </ul>
pub fn r#type(mut self, input: crate::model::RoutingStrategyType) -> Self {
self.r#type = Some(input);
self
}
pub fn set_type(
mut self,
input: std::option::Option<crate::model::RoutingStrategyType>,
) -> Self {
self.r#type = input;
self
}
/// <p>A unique identifier for the fleet that the alias points to. This value is the fleet ID, not the fleet ARN.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>The message text to be used with a terminal routing strategy.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`RoutingStrategy`](crate::model::RoutingStrategy)
pub fn build(self) -> crate::model::RoutingStrategy {
crate::model::RoutingStrategy {
r#type: self.r#type,
fleet_id: self.fleet_id,
message: self.message,
}
}
}
}
impl RoutingStrategy {
/// Creates a new builder-style object to manufacture [`RoutingStrategy`](crate::model::RoutingStrategy)
pub fn builder() -> crate::model::routing_strategy::Builder {
crate::model::routing_strategy::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum RoutingStrategyType {
Simple,
Terminal,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for RoutingStrategyType {
fn from(s: &str) -> Self {
match s {
"SIMPLE" => RoutingStrategyType::Simple,
"TERMINAL" => RoutingStrategyType::Terminal,
other => RoutingStrategyType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for RoutingStrategyType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(RoutingStrategyType::from(s))
}
}
impl RoutingStrategyType {
pub fn as_str(&self) -> &str {
match self {
RoutingStrategyType::Simple => "SIMPLE",
RoutingStrategyType::Terminal => "TERMINAL",
RoutingStrategyType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["SIMPLE", "TERMINAL"]
}
}
impl AsRef<str> for RoutingStrategyType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>
/// A label that can be assigned to a GameLift resource.
/// </p>
/// <p>
/// <b>Learn more</b>
/// </p>
/// <p>
/// <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging AWS Resources</a> in the
/// <i>AWS General Reference</i>
/// </p>
/// <p>
/// <a href="http://aws.amazon.com/answers/account-management/aws-tagging-strategies/">
/// AWS Tagging Strategies</a>
/// </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>TagResource</a> |
/// <a>UntagResource</a> |
/// <a>ListTagsForResource</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Tag {
/// <p>
/// The key for a developer-defined key:value pair for tagging an AWS resource.
/// </p>
pub key: std::option::Option<std::string::String>,
/// <p>
/// The value for a developer-defined key:value pair for tagging an AWS resource.
/// </p>
pub value: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for Tag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Tag");
formatter.field("key", &self.key);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`Tag`](crate::model::Tag)
pub mod tag {
/// A builder for [`Tag`](crate::model::Tag)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>
/// The key for a developer-defined key:value pair for tagging an AWS resource.
/// </p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>
/// The value for a developer-defined key:value pair for tagging an AWS resource.
/// </p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`Tag`](crate::model::Tag)
pub fn build(self) -> crate::model::Tag {
crate::model::Tag {
key: self.key,
value: self.value,
}
}
}
}
impl Tag {
/// Creates a new builder-style object to manufacture [`Tag`](crate::model::Tag)
pub fn builder() -> crate::model::tag::Builder {
crate::model::tag::Builder::default()
}
}
/// <p>Object that describes a <a>StartGameSessionPlacement</a> request. This
/// object includes the full details of the original request plus the current status and
/// start/end time stamps.</p>
/// <p>Game session placement-related operations include:</p>
/// <ul>
/// <li>
/// <p>
/// <a>StartGameSessionPlacement</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a>DescribeGameSessionPlacement</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a>StopGameSessionPlacement</a>
/// </p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameSessionPlacement {
/// <p>A unique identifier for a game session placement.</p>
pub placement_id: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with game session queue. Queue names must be unique within each Region.</p>
pub game_session_queue_name: std::option::Option<std::string::String>,
/// <p>Current status of the game session placement request.</p>
/// <ul>
/// <li>
/// <p>
/// <b>PENDING</b> -- The placement request is currently
/// in the queue waiting to be processed.</p>
/// </li>
/// <li>
/// <p>
/// <b>FULFILLED</b> -- A new game session and player
/// sessions (if requested) have been successfully created. Values for
/// <i>GameSessionArn</i> and
/// <i>GameSessionRegion</i> are available. </p>
/// </li>
/// <li>
/// <p>
/// <b>CANCELLED</b> -- The placement request was canceled
/// with a call to <a>StopGameSessionPlacement</a>.</p>
/// </li>
/// <li>
/// <p>
/// <b>TIMED_OUT</b> -- A new game session was not
/// successfully created before the time limit expired. You can resubmit the
/// placement request as needed.</p>
/// </li>
/// <li>
/// <p>
/// <b>FAILED</b> -- GameLift is not able to complete the
/// process of placing the game session. Common reasons are the
/// game session terminated before the placement process was completed, or an unexpected
/// internal error.</p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::GameSessionPlacementState>,
/// <p>A set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session (see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession">Start a Game Session</a>).</p>
pub game_properties: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
/// <p>The maximum number of players that can be connected simultaneously to the game session.</p>
pub maximum_player_session_count: std::option::Option<i32>,
/// <p>A descriptive label that is associated with a game session. Session names do not need to be unique.</p>
pub game_session_name: std::option::Option<std::string::String>,
/// <p>A unique identifier for the game session. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>).</p>
pub game_session_id: std::option::Option<std::string::String>,
/// <p>Identifier for the game session created by this placement request. This value is
/// set once the new game session is placed (placement status is <code>FULFILLED</code>).
/// This identifier is unique across all Regions. You can use this value as a
/// <code>GameSessionId</code> value as needed.</p>
pub game_session_arn: std::option::Option<std::string::String>,
/// <p>Name of the Region where the game session created by this placement request is
/// running. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>).</p>
pub game_session_region: std::option::Option<std::string::String>,
/// <p>A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to AWS Regions.</p>
pub player_latencies: std::option::Option<std::vec::Vec<crate::model::PlayerLatency>>,
/// <p>Time stamp indicating when this request was placed in the queue. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>Time stamp indicating when this request was completed, canceled, or timed
/// out.</p>
pub end_time: std::option::Option<smithy_types::Instant>,
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>). </p>
pub ip_address: std::option::Option<std::string::String>,
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub dns_name: std::option::Option<std::string::String>,
/// <p>The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>).</p>
pub port: std::option::Option<i32>,
/// <p>A collection of information on player sessions created in response to the game
/// session placement request. These player sessions are created only once a new game
/// session is successfully placed (placement status is <code>FULFILLED</code>). This
/// information includes the player ID (as provided in the placement request) and the
/// corresponding player session ID. Retrieve full player sessions by calling <a>DescribePlayerSessions</a> with the player session ID.</p>
pub placed_player_sessions:
std::option::Option<std::vec::Vec<crate::model::PlacedPlayerSession>>,
/// <p>A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session (see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession">Start a Game Session</a>).</p>
pub game_session_data: std::option::Option<std::string::String>,
/// <p>Information on the matchmaking process for this game. Data is in JSON syntax,
/// formatted as a string. It identifies the matchmaking configuration used to create the
/// match, and contains data on all players assigned to the match, including player
/// attributes and team assignments. For more details on matchmaker data, see <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data">Match
/// Data</a>.</p>
pub matchmaker_data: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GameSessionPlacement {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameSessionPlacement");
formatter.field("placement_id", &self.placement_id);
formatter.field("game_session_queue_name", &self.game_session_queue_name);
formatter.field("status", &self.status);
formatter.field("game_properties", &self.game_properties);
formatter.field(
"maximum_player_session_count",
&self.maximum_player_session_count,
);
formatter.field("game_session_name", &self.game_session_name);
formatter.field("game_session_id", &self.game_session_id);
formatter.field("game_session_arn", &self.game_session_arn);
formatter.field("game_session_region", &self.game_session_region);
formatter.field("player_latencies", &self.player_latencies);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field("ip_address", &self.ip_address);
formatter.field("dns_name", &self.dns_name);
formatter.field("port", &self.port);
formatter.field("placed_player_sessions", &self.placed_player_sessions);
formatter.field("game_session_data", &self.game_session_data);
formatter.field("matchmaker_data", &self.matchmaker_data);
formatter.finish()
}
}
/// See [`GameSessionPlacement`](crate::model::GameSessionPlacement)
pub mod game_session_placement {
/// A builder for [`GameSessionPlacement`](crate::model::GameSessionPlacement)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) placement_id: std::option::Option<std::string::String>,
pub(crate) game_session_queue_name: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::GameSessionPlacementState>,
pub(crate) game_properties: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
pub(crate) maximum_player_session_count: std::option::Option<i32>,
pub(crate) game_session_name: std::option::Option<std::string::String>,
pub(crate) game_session_id: std::option::Option<std::string::String>,
pub(crate) game_session_arn: std::option::Option<std::string::String>,
pub(crate) game_session_region: std::option::Option<std::string::String>,
pub(crate) player_latencies:
std::option::Option<std::vec::Vec<crate::model::PlayerLatency>>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
pub(crate) ip_address: std::option::Option<std::string::String>,
pub(crate) dns_name: std::option::Option<std::string::String>,
pub(crate) port: std::option::Option<i32>,
pub(crate) placed_player_sessions:
std::option::Option<std::vec::Vec<crate::model::PlacedPlayerSession>>,
pub(crate) game_session_data: std::option::Option<std::string::String>,
pub(crate) matchmaker_data: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for a game session placement.</p>
pub fn placement_id(mut self, input: impl Into<std::string::String>) -> Self {
self.placement_id = Some(input.into());
self
}
pub fn set_placement_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.placement_id = input;
self
}
/// <p>A descriptive label that is associated with game session queue. Queue names must be unique within each Region.</p>
pub fn game_session_queue_name(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_queue_name = Some(input.into());
self
}
pub fn set_game_session_queue_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_queue_name = input;
self
}
/// <p>Current status of the game session placement request.</p>
/// <ul>
/// <li>
/// <p>
/// <b>PENDING</b> -- The placement request is currently
/// in the queue waiting to be processed.</p>
/// </li>
/// <li>
/// <p>
/// <b>FULFILLED</b> -- A new game session and player
/// sessions (if requested) have been successfully created. Values for
/// <i>GameSessionArn</i> and
/// <i>GameSessionRegion</i> are available. </p>
/// </li>
/// <li>
/// <p>
/// <b>CANCELLED</b> -- The placement request was canceled
/// with a call to <a>StopGameSessionPlacement</a>.</p>
/// </li>
/// <li>
/// <p>
/// <b>TIMED_OUT</b> -- A new game session was not
/// successfully created before the time limit expired. You can resubmit the
/// placement request as needed.</p>
/// </li>
/// <li>
/// <p>
/// <b>FAILED</b> -- GameLift is not able to complete the
/// process of placing the game session. Common reasons are the
/// game session terminated before the placement process was completed, or an unexpected
/// internal error.</p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::GameSessionPlacementState) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::GameSessionPlacementState>,
) -> Self {
self.status = input;
self
}
pub fn game_properties(mut self, input: impl Into<crate::model::GameProperty>) -> Self {
let mut v = self.game_properties.unwrap_or_default();
v.push(input.into());
self.game_properties = Some(v);
self
}
pub fn set_game_properties(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::GameProperty>>,
) -> Self {
self.game_properties = input;
self
}
/// <p>The maximum number of players that can be connected simultaneously to the game session.</p>
pub fn maximum_player_session_count(mut self, input: i32) -> Self {
self.maximum_player_session_count = Some(input);
self
}
pub fn set_maximum_player_session_count(mut self, input: std::option::Option<i32>) -> Self {
self.maximum_player_session_count = input;
self
}
/// <p>A descriptive label that is associated with a game session. Session names do not need to be unique.</p>
pub fn game_session_name(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_name = Some(input.into());
self
}
pub fn set_game_session_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_name = input;
self
}
/// <p>A unique identifier for the game session. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>).</p>
pub fn game_session_id(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_id = Some(input.into());
self
}
pub fn set_game_session_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_id = input;
self
}
/// <p>Identifier for the game session created by this placement request. This value is
/// set once the new game session is placed (placement status is <code>FULFILLED</code>).
/// This identifier is unique across all Regions. You can use this value as a
/// <code>GameSessionId</code> value as needed.</p>
pub fn game_session_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_arn = Some(input.into());
self
}
pub fn set_game_session_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_arn = input;
self
}
/// <p>Name of the Region where the game session created by this placement request is
/// running. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>).</p>
pub fn game_session_region(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_region = Some(input.into());
self
}
pub fn set_game_session_region(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_region = input;
self
}
pub fn player_latencies(mut self, input: impl Into<crate::model::PlayerLatency>) -> Self {
let mut v = self.player_latencies.unwrap_or_default();
v.push(input.into());
self.player_latencies = Some(v);
self
}
pub fn set_player_latencies(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PlayerLatency>>,
) -> Self {
self.player_latencies = input;
self
}
/// <p>Time stamp indicating when this request was placed in the queue. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>Time stamp indicating when this request was completed, canceled, or timed
/// out.</p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>). </p>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_address = Some(input.into());
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_address = input;
self
}
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dns_name = Some(input.into());
self
}
pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.dns_name = input;
self
}
/// <p>The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is
/// <code>FULFILLED</code>).</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
pub fn placed_player_sessions(
mut self,
input: impl Into<crate::model::PlacedPlayerSession>,
) -> Self {
let mut v = self.placed_player_sessions.unwrap_or_default();
v.push(input.into());
self.placed_player_sessions = Some(v);
self
}
pub fn set_placed_player_sessions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PlacedPlayerSession>>,
) -> Self {
self.placed_player_sessions = input;
self
}
/// <p>A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the
/// <a>GameSession</a> object with a request to start a new game session (see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession">Start a Game Session</a>).</p>
pub fn game_session_data(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_data = Some(input.into());
self
}
pub fn set_game_session_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_data = input;
self
}
/// <p>Information on the matchmaking process for this game. Data is in JSON syntax,
/// formatted as a string. It identifies the matchmaking configuration used to create the
/// match, and contains data on all players assigned to the match, including player
/// attributes and team assignments. For more details on matchmaker data, see <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-server.html#match-server-data">Match
/// Data</a>.</p>
pub fn matchmaker_data(mut self, input: impl Into<std::string::String>) -> Self {
self.matchmaker_data = Some(input.into());
self
}
pub fn set_matchmaker_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.matchmaker_data = input;
self
}
/// Consumes the builder and constructs a [`GameSessionPlacement`](crate::model::GameSessionPlacement)
pub fn build(self) -> crate::model::GameSessionPlacement {
crate::model::GameSessionPlacement {
placement_id: self.placement_id,
game_session_queue_name: self.game_session_queue_name,
status: self.status,
game_properties: self.game_properties,
maximum_player_session_count: self.maximum_player_session_count,
game_session_name: self.game_session_name,
game_session_id: self.game_session_id,
game_session_arn: self.game_session_arn,
game_session_region: self.game_session_region,
player_latencies: self.player_latencies,
start_time: self.start_time,
end_time: self.end_time,
ip_address: self.ip_address,
dns_name: self.dns_name,
port: self.port,
placed_player_sessions: self.placed_player_sessions,
game_session_data: self.game_session_data,
matchmaker_data: self.matchmaker_data,
}
}
}
}
impl GameSessionPlacement {
/// Creates a new builder-style object to manufacture [`GameSessionPlacement`](crate::model::GameSessionPlacement)
pub fn builder() -> crate::model::game_session_placement::Builder {
crate::model::game_session_placement::Builder::default()
}
}
/// <p>Information about a player session that was created as part of a <a>StartGameSessionPlacement</a> request. This object contains only the player
/// ID and player session ID. To retrieve full details on a player session, call <a>DescribePlayerSessions</a> with the player session ID.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreatePlayerSession</a> |
/// <a>CreatePlayerSessions</a> |
/// <a>DescribePlayerSessions</a> |
/// <a>StartGameSessionPlacement</a> |
/// <a>DescribeGameSessionPlacement</a> |
/// <a>StopGameSessionPlacement</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PlacedPlayerSession {
/// <p>A unique identifier for a player that is associated with this player session.</p>
pub player_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for a player session.</p>
pub player_session_id: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for PlacedPlayerSession {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PlacedPlayerSession");
formatter.field("player_id", &self.player_id);
formatter.field("player_session_id", &self.player_session_id);
formatter.finish()
}
}
/// See [`PlacedPlayerSession`](crate::model::PlacedPlayerSession)
pub mod placed_player_session {
/// A builder for [`PlacedPlayerSession`](crate::model::PlacedPlayerSession)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) player_id: std::option::Option<std::string::String>,
pub(crate) player_session_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for a player that is associated with this player session.</p>
pub fn player_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_id = Some(input.into());
self
}
pub fn set_player_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_id = input;
self
}
/// <p>A unique identifier for a player session.</p>
pub fn player_session_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_session_id = Some(input.into());
self
}
pub fn set_player_session_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.player_session_id = input;
self
}
/// Consumes the builder and constructs a [`PlacedPlayerSession`](crate::model::PlacedPlayerSession)
pub fn build(self) -> crate::model::PlacedPlayerSession {
crate::model::PlacedPlayerSession {
player_id: self.player_id,
player_session_id: self.player_session_id,
}
}
}
}
impl PlacedPlayerSession {
/// Creates a new builder-style object to manufacture [`PlacedPlayerSession`](crate::model::PlacedPlayerSession)
pub fn builder() -> crate::model::placed_player_session::Builder {
crate::model::placed_player_session::Builder::default()
}
}
/// <p>Regional latency information for a player, used when requesting a new game session
/// with <a>StartGameSessionPlacement</a>. This value indicates the amount of
/// time lag that exists when the player is connected to a fleet in the specified Region.
/// The relative difference between a player's latency values for multiple Regions are used
/// to determine which fleets are best suited to place a new game session for the player.
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PlayerLatency {
/// <p>A unique identifier for a player associated with the latency data.</p>
pub player_id: std::option::Option<std::string::String>,
/// <p>Name of the Region that is associated with the latency value.</p>
pub region_identifier: std::option::Option<std::string::String>,
/// <p>Amount of time that represents the time lag experienced by the player when
/// connected to the specified Region.</p>
pub latency_in_milliseconds: f32,
}
impl std::fmt::Debug for PlayerLatency {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PlayerLatency");
formatter.field("player_id", &self.player_id);
formatter.field("region_identifier", &self.region_identifier);
formatter.field("latency_in_milliseconds", &self.latency_in_milliseconds);
formatter.finish()
}
}
/// See [`PlayerLatency`](crate::model::PlayerLatency)
pub mod player_latency {
/// A builder for [`PlayerLatency`](crate::model::PlayerLatency)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) player_id: std::option::Option<std::string::String>,
pub(crate) region_identifier: std::option::Option<std::string::String>,
pub(crate) latency_in_milliseconds: std::option::Option<f32>,
}
impl Builder {
/// <p>A unique identifier for a player associated with the latency data.</p>
pub fn player_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_id = Some(input.into());
self
}
pub fn set_player_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_id = input;
self
}
/// <p>Name of the Region that is associated with the latency value.</p>
pub fn region_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.region_identifier = Some(input.into());
self
}
pub fn set_region_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.region_identifier = input;
self
}
/// <p>Amount of time that represents the time lag experienced by the player when
/// connected to the specified Region.</p>
pub fn latency_in_milliseconds(mut self, input: f32) -> Self {
self.latency_in_milliseconds = Some(input);
self
}
pub fn set_latency_in_milliseconds(mut self, input: std::option::Option<f32>) -> Self {
self.latency_in_milliseconds = input;
self
}
/// Consumes the builder and constructs a [`PlayerLatency`](crate::model::PlayerLatency)
pub fn build(self) -> crate::model::PlayerLatency {
crate::model::PlayerLatency {
player_id: self.player_id,
region_identifier: self.region_identifier,
latency_in_milliseconds: self.latency_in_milliseconds.unwrap_or_default(),
}
}
}
}
impl PlayerLatency {
/// Creates a new builder-style object to manufacture [`PlayerLatency`](crate::model::PlayerLatency)
pub fn builder() -> crate::model::player_latency::Builder {
crate::model::player_latency::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameSessionPlacementState {
Cancelled,
Failed,
Fulfilled,
Pending,
TimedOut,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameSessionPlacementState {
fn from(s: &str) -> Self {
match s {
"CANCELLED" => GameSessionPlacementState::Cancelled,
"FAILED" => GameSessionPlacementState::Failed,
"FULFILLED" => GameSessionPlacementState::Fulfilled,
"PENDING" => GameSessionPlacementState::Pending,
"TIMED_OUT" => GameSessionPlacementState::TimedOut,
other => GameSessionPlacementState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameSessionPlacementState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameSessionPlacementState::from(s))
}
}
impl GameSessionPlacementState {
pub fn as_str(&self) -> &str {
match self {
GameSessionPlacementState::Cancelled => "CANCELLED",
GameSessionPlacementState::Failed => "FAILED",
GameSessionPlacementState::Fulfilled => "FULFILLED",
GameSessionPlacementState::Pending => "PENDING",
GameSessionPlacementState::TimedOut => "TIMED_OUT",
GameSessionPlacementState::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["CANCELLED", "FAILED", "FULFILLED", "PENDING", "TIMED_OUT"]
}
}
impl AsRef<str> for GameSessionPlacementState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum FleetAction {
AutoScaling,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for FleetAction {
fn from(s: &str) -> Self {
match s {
"AUTO_SCALING" => FleetAction::AutoScaling,
other => FleetAction::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for FleetAction {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(FleetAction::from(s))
}
}
impl FleetAction {
pub fn as_str(&self) -> &str {
match self {
FleetAction::AutoScaling => "AUTO_SCALING",
FleetAction::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["AUTO_SCALING"]
}
}
impl AsRef<str> for FleetAction {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Ticket generated to track the progress of a matchmaking request. Each ticket is
/// uniquely identified by a ticket ID, supplied by the requester, when creating a
/// matchmaking request with <a>StartMatchmaking</a>. Tickets can be retrieved by
/// calling <a>DescribeMatchmaking</a> with the ticket ID.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct MatchmakingTicket {
/// <p>A unique identifier for a matchmaking ticket.</p>
pub ticket_id: std::option::Option<std::string::String>,
/// <p>Name of the <a>MatchmakingConfiguration</a> that is used with this
/// ticket. Matchmaking configurations determine how players are grouped into a match and
/// how a new game session is created for the match.</p>
pub configuration_name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift matchmaking configuration resource that is used with this ticket.</p>
pub configuration_arn: std::option::Option<std::string::String>,
/// <p>Current status of the matchmaking request.</p>
/// <ul>
/// <li>
/// <p>
/// <b>QUEUED</b> -- The matchmaking request has been
/// received and is currently waiting to be processed.</p>
/// </li>
/// <li>
/// <p>
/// <b>SEARCHING</b> -- The matchmaking request is
/// currently being processed. </p>
/// </li>
/// <li>
/// <p>
/// <b>REQUIRES_ACCEPTANCE</b> -- A match has been
/// proposed and the players must accept the match (see <a>AcceptMatch</a>). This status is used only with requests that use a matchmaking configuration
/// with a player acceptance requirement.</p>
/// </li>
/// <li>
/// <p>
/// <b>PLACING</b> -- The FlexMatch engine has matched
/// players and is in the process of placing a new game session for the
/// match.</p>
/// </li>
/// <li>
/// <p>
/// <b>COMPLETED</b> -- Players have been matched and a
/// game session is ready to host the players. A ticket in this state contains the
/// necessary connection information for players.</p>
/// </li>
/// <li>
/// <p>
/// <b>FAILED</b> -- The matchmaking request was not
/// completed.</p>
/// </li>
/// <li>
/// <p>
/// <b>CANCELLED</b> -- The matchmaking request was
/// canceled. This may be the result of a call to <a>StopMatchmaking</a>
/// or a proposed match that one or more players failed to accept.</p>
/// </li>
/// <li>
/// <p>
/// <b>TIMED_OUT</b> -- The matchmaking request was not
/// successful within the duration specified in the matchmaking configuration.
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Matchmaking requests that fail to successfully complete (statuses FAILED,
/// CANCELLED, TIMED_OUT) can be resubmitted as new requests with new ticket
/// IDs.</p>
/// </note>
pub status: std::option::Option<crate::model::MatchmakingConfigurationStatus>,
/// <p>Code to explain the current status. For example, a status reason may indicate when
/// a ticket has returned to <code>SEARCHING</code> status after a proposed match fails to
/// receive player acceptances.</p>
pub status_reason: std::option::Option<std::string::String>,
/// <p>Additional information about the current status.</p>
pub status_message: std::option::Option<std::string::String>,
/// <p>Time stamp indicating when this matchmaking request was received. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>Time stamp indicating when this matchmaking request stopped being processed due to
/// success, failure, or cancellation. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub end_time: std::option::Option<smithy_types::Instant>,
/// <p>A set of <code>Player</code> objects, each representing a player to find matches
/// for. Players are identified by a unique player ID and may include latency data for use
/// during matchmaking. If the ticket is in status <code>COMPLETED</code>, the
/// <code>Player</code> objects include the team the players were assigned to in the
/// resulting match.</p>
pub players: std::option::Option<std::vec::Vec<crate::model::Player>>,
/// <p>Identifier and connection information of the game session created for the match. This
/// information is added to the ticket only after the matchmaking request has been
/// successfully completed. This parameter is not set when FlexMatch is being used without
/// GameLift hosting.</p>
pub game_session_connection_info: std::option::Option<crate::model::GameSessionConnectionInfo>,
/// <p>Average amount of time (in seconds) that players are currently waiting for a match.
/// If there is not enough recent data, this property may be empty.</p>
pub estimated_wait_time: std::option::Option<i32>,
}
impl std::fmt::Debug for MatchmakingTicket {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("MatchmakingTicket");
formatter.field("ticket_id", &self.ticket_id);
formatter.field("configuration_name", &self.configuration_name);
formatter.field("configuration_arn", &self.configuration_arn);
formatter.field("status", &self.status);
formatter.field("status_reason", &self.status_reason);
formatter.field("status_message", &self.status_message);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field("players", &self.players);
formatter.field(
"game_session_connection_info",
&self.game_session_connection_info,
);
formatter.field("estimated_wait_time", &self.estimated_wait_time);
formatter.finish()
}
}
/// See [`MatchmakingTicket`](crate::model::MatchmakingTicket)
pub mod matchmaking_ticket {
/// A builder for [`MatchmakingTicket`](crate::model::MatchmakingTicket)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) ticket_id: std::option::Option<std::string::String>,
pub(crate) configuration_name: std::option::Option<std::string::String>,
pub(crate) configuration_arn: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::MatchmakingConfigurationStatus>,
pub(crate) status_reason: std::option::Option<std::string::String>,
pub(crate) status_message: std::option::Option<std::string::String>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
pub(crate) players: std::option::Option<std::vec::Vec<crate::model::Player>>,
pub(crate) game_session_connection_info:
std::option::Option<crate::model::GameSessionConnectionInfo>,
pub(crate) estimated_wait_time: std::option::Option<i32>,
}
impl Builder {
/// <p>A unique identifier for a matchmaking ticket.</p>
pub fn ticket_id(mut self, input: impl Into<std::string::String>) -> Self {
self.ticket_id = Some(input.into());
self
}
pub fn set_ticket_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ticket_id = input;
self
}
/// <p>Name of the <a>MatchmakingConfiguration</a> that is used with this
/// ticket. Matchmaking configurations determine how players are grouped into a match and
/// how a new game session is created for the match.</p>
pub fn configuration_name(mut self, input: impl Into<std::string::String>) -> Self {
self.configuration_name = Some(input.into());
self
}
pub fn set_configuration_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.configuration_name = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift matchmaking configuration resource that is used with this ticket.</p>
pub fn configuration_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.configuration_arn = Some(input.into());
self
}
pub fn set_configuration_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.configuration_arn = input;
self
}
/// <p>Current status of the matchmaking request.</p>
/// <ul>
/// <li>
/// <p>
/// <b>QUEUED</b> -- The matchmaking request has been
/// received and is currently waiting to be processed.</p>
/// </li>
/// <li>
/// <p>
/// <b>SEARCHING</b> -- The matchmaking request is
/// currently being processed. </p>
/// </li>
/// <li>
/// <p>
/// <b>REQUIRES_ACCEPTANCE</b> -- A match has been
/// proposed and the players must accept the match (see <a>AcceptMatch</a>). This status is used only with requests that use a matchmaking configuration
/// with a player acceptance requirement.</p>
/// </li>
/// <li>
/// <p>
/// <b>PLACING</b> -- The FlexMatch engine has matched
/// players and is in the process of placing a new game session for the
/// match.</p>
/// </li>
/// <li>
/// <p>
/// <b>COMPLETED</b> -- Players have been matched and a
/// game session is ready to host the players. A ticket in this state contains the
/// necessary connection information for players.</p>
/// </li>
/// <li>
/// <p>
/// <b>FAILED</b> -- The matchmaking request was not
/// completed.</p>
/// </li>
/// <li>
/// <p>
/// <b>CANCELLED</b> -- The matchmaking request was
/// canceled. This may be the result of a call to <a>StopMatchmaking</a>
/// or a proposed match that one or more players failed to accept.</p>
/// </li>
/// <li>
/// <p>
/// <b>TIMED_OUT</b> -- The matchmaking request was not
/// successful within the duration specified in the matchmaking configuration.
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>Matchmaking requests that fail to successfully complete (statuses FAILED,
/// CANCELLED, TIMED_OUT) can be resubmitted as new requests with new ticket
/// IDs.</p>
/// </note>
pub fn status(mut self, input: crate::model::MatchmakingConfigurationStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::MatchmakingConfigurationStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Code to explain the current status. For example, a status reason may indicate when
/// a ticket has returned to <code>SEARCHING</code> status after a proposed match fails to
/// receive player acceptances.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
/// <p>Additional information about the current status.</p>
pub fn status_message(mut self, input: impl Into<std::string::String>) -> Self {
self.status_message = Some(input.into());
self
}
pub fn set_status_message(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_message = input;
self
}
/// <p>Time stamp indicating when this matchmaking request was received. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>Time stamp indicating when this matchmaking request stopped being processed due to
/// success, failure, or cancellation. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
pub fn players(mut self, input: impl Into<crate::model::Player>) -> Self {
let mut v = self.players.unwrap_or_default();
v.push(input.into());
self.players = Some(v);
self
}
pub fn set_players(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Player>>,
) -> Self {
self.players = input;
self
}
/// <p>Identifier and connection information of the game session created for the match. This
/// information is added to the ticket only after the matchmaking request has been
/// successfully completed. This parameter is not set when FlexMatch is being used without
/// GameLift hosting.</p>
pub fn game_session_connection_info(
mut self,
input: crate::model::GameSessionConnectionInfo,
) -> Self {
self.game_session_connection_info = Some(input);
self
}
pub fn set_game_session_connection_info(
mut self,
input: std::option::Option<crate::model::GameSessionConnectionInfo>,
) -> Self {
self.game_session_connection_info = input;
self
}
/// <p>Average amount of time (in seconds) that players are currently waiting for a match.
/// If there is not enough recent data, this property may be empty.</p>
pub fn estimated_wait_time(mut self, input: i32) -> Self {
self.estimated_wait_time = Some(input);
self
}
pub fn set_estimated_wait_time(mut self, input: std::option::Option<i32>) -> Self {
self.estimated_wait_time = input;
self
}
/// Consumes the builder and constructs a [`MatchmakingTicket`](crate::model::MatchmakingTicket)
pub fn build(self) -> crate::model::MatchmakingTicket {
crate::model::MatchmakingTicket {
ticket_id: self.ticket_id,
configuration_name: self.configuration_name,
configuration_arn: self.configuration_arn,
status: self.status,
status_reason: self.status_reason,
status_message: self.status_message,
start_time: self.start_time,
end_time: self.end_time,
players: self.players,
game_session_connection_info: self.game_session_connection_info,
estimated_wait_time: self.estimated_wait_time,
}
}
}
}
impl MatchmakingTicket {
/// Creates a new builder-style object to manufacture [`MatchmakingTicket`](crate::model::MatchmakingTicket)
pub fn builder() -> crate::model::matchmaking_ticket::Builder {
crate::model::matchmaking_ticket::Builder::default()
}
}
/// <p>Connection information for a new game session that is created in response to a <a>StartMatchmaking</a> request. Once a match is made, the FlexMatch engine
/// creates a new game session for it. This information, including the game session endpoint
/// and player sessions for each player in the original matchmaking request, is added to the
/// <a>MatchmakingTicket</a>, which can be retrieved by calling <a>DescribeMatchmaking</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameSessionConnectionInfo {
/// <p>A unique identifier for the game session. Use the game session ID.</p>
pub game_session_arn: std::option::Option<std::string::String>,
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub ip_address: std::option::Option<std::string::String>,
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub dns_name: std::option::Option<std::string::String>,
/// <p>The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub port: std::option::Option<i32>,
/// <p>A collection of player session IDs, one for each player ID that was included in the
/// original matchmaking request. </p>
pub matched_player_sessions:
std::option::Option<std::vec::Vec<crate::model::MatchedPlayerSession>>,
}
impl std::fmt::Debug for GameSessionConnectionInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameSessionConnectionInfo");
formatter.field("game_session_arn", &self.game_session_arn);
formatter.field("ip_address", &self.ip_address);
formatter.field("dns_name", &self.dns_name);
formatter.field("port", &self.port);
formatter.field("matched_player_sessions", &self.matched_player_sessions);
formatter.finish()
}
}
/// See [`GameSessionConnectionInfo`](crate::model::GameSessionConnectionInfo)
pub mod game_session_connection_info {
/// A builder for [`GameSessionConnectionInfo`](crate::model::GameSessionConnectionInfo)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_session_arn: std::option::Option<std::string::String>,
pub(crate) ip_address: std::option::Option<std::string::String>,
pub(crate) dns_name: std::option::Option<std::string::String>,
pub(crate) port: std::option::Option<i32>,
pub(crate) matched_player_sessions:
std::option::Option<std::vec::Vec<crate::model::MatchedPlayerSession>>,
}
impl Builder {
/// <p>A unique identifier for the game session. Use the game session ID.</p>
pub fn game_session_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_arn = Some(input.into());
self
}
pub fn set_game_session_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_arn = input;
self
}
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_address = Some(input.into());
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_address = input;
self
}
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dns_name = Some(input.into());
self
}
pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.dns_name = input;
self
}
/// <p>The port number for the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
pub fn matched_player_sessions(
mut self,
input: impl Into<crate::model::MatchedPlayerSession>,
) -> Self {
let mut v = self.matched_player_sessions.unwrap_or_default();
v.push(input.into());
self.matched_player_sessions = Some(v);
self
}
pub fn set_matched_player_sessions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MatchedPlayerSession>>,
) -> Self {
self.matched_player_sessions = input;
self
}
/// Consumes the builder and constructs a [`GameSessionConnectionInfo`](crate::model::GameSessionConnectionInfo)
pub fn build(self) -> crate::model::GameSessionConnectionInfo {
crate::model::GameSessionConnectionInfo {
game_session_arn: self.game_session_arn,
ip_address: self.ip_address,
dns_name: self.dns_name,
port: self.port,
matched_player_sessions: self.matched_player_sessions,
}
}
}
}
impl GameSessionConnectionInfo {
/// Creates a new builder-style object to manufacture [`GameSessionConnectionInfo`](crate::model::GameSessionConnectionInfo)
pub fn builder() -> crate::model::game_session_connection_info::Builder {
crate::model::game_session_connection_info::Builder::default()
}
}
/// <p>Represents a new player session that is created as a result of a successful FlexMatch
/// match. A successful match automatically creates new player sessions for every player ID
/// in the original matchmaking request. </p>
/// <p>When players connect to the match's game session, they must include both player ID
/// and player session ID in order to claim their assigned player slot.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct MatchedPlayerSession {
/// <p>A unique identifier for a player </p>
pub player_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for a player session</p>
pub player_session_id: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for MatchedPlayerSession {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("MatchedPlayerSession");
formatter.field("player_id", &self.player_id);
formatter.field("player_session_id", &self.player_session_id);
formatter.finish()
}
}
/// See [`MatchedPlayerSession`](crate::model::MatchedPlayerSession)
pub mod matched_player_session {
/// A builder for [`MatchedPlayerSession`](crate::model::MatchedPlayerSession)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) player_id: std::option::Option<std::string::String>,
pub(crate) player_session_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for a player </p>
pub fn player_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_id = Some(input.into());
self
}
pub fn set_player_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_id = input;
self
}
/// <p>A unique identifier for a player session</p>
pub fn player_session_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_session_id = Some(input.into());
self
}
pub fn set_player_session_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.player_session_id = input;
self
}
/// Consumes the builder and constructs a [`MatchedPlayerSession`](crate::model::MatchedPlayerSession)
pub fn build(self) -> crate::model::MatchedPlayerSession {
crate::model::MatchedPlayerSession {
player_id: self.player_id,
player_session_id: self.player_session_id,
}
}
}
}
impl MatchedPlayerSession {
/// Creates a new builder-style object to manufacture [`MatchedPlayerSession`](crate::model::MatchedPlayerSession)
pub fn builder() -> crate::model::matched_player_session::Builder {
crate::model::matched_player_session::Builder::default()
}
}
/// <p>Represents a player in matchmaking. When starting a matchmaking request, a player
/// has a player ID, attributes, and may have latency data. Team information is added after
/// a match has been successfully completed.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Player {
/// <p>A unique identifier for a player</p>
pub player_id: std::option::Option<std::string::String>,
/// <p>A collection of key:value pairs containing player information for use in matchmaking.
/// Player attribute keys must match the <i>playerAttributes</i> used in a
/// matchmaking rule set. Example: <code>"PlayerAttributes": {"skill": {"N": "23"},
/// "gameMode": {"S": "deathmatch"}}</code>.</p>
pub player_attributes: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
/// <p>Name of the team that the player is assigned to in a match. Team names are defined
/// in a matchmaking rule set.</p>
pub team: std::option::Option<std::string::String>,
/// <p>A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to AWS Regions. If this property is present, FlexMatch considers placing the match only
/// in Regions for which latency is reported. </p>
/// <p>If a matchmaker has a rule that evaluates player latency, players must report
/// latency in order to be matched. If no latency is reported in this scenario, FlexMatch
/// assumes that no Regions are available to the player and the ticket is not matchable.
/// </p>
pub latency_in_ms: std::option::Option<std::collections::HashMap<std::string::String, i32>>,
}
impl std::fmt::Debug for Player {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Player");
formatter.field("player_id", &self.player_id);
formatter.field("player_attributes", &self.player_attributes);
formatter.field("team", &self.team);
formatter.field("latency_in_ms", &self.latency_in_ms);
formatter.finish()
}
}
/// See [`Player`](crate::model::Player)
pub mod player {
/// A builder for [`Player`](crate::model::Player)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) player_id: std::option::Option<std::string::String>,
pub(crate) player_attributes: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
pub(crate) team: std::option::Option<std::string::String>,
pub(crate) latency_in_ms:
std::option::Option<std::collections::HashMap<std::string::String, i32>>,
}
impl Builder {
/// <p>A unique identifier for a player</p>
pub fn player_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_id = Some(input.into());
self
}
pub fn set_player_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_id = input;
self
}
pub fn player_attributes(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::AttributeValue>,
) -> Self {
let mut hash_map = self.player_attributes.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.player_attributes = Some(hash_map);
self
}
pub fn set_player_attributes(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::AttributeValue>,
>,
) -> Self {
self.player_attributes = input;
self
}
/// <p>Name of the team that the player is assigned to in a match. Team names are defined
/// in a matchmaking rule set.</p>
pub fn team(mut self, input: impl Into<std::string::String>) -> Self {
self.team = Some(input.into());
self
}
pub fn set_team(mut self, input: std::option::Option<std::string::String>) -> Self {
self.team = input;
self
}
pub fn latency_in_ms(
mut self,
k: impl Into<std::string::String>,
v: impl Into<i32>,
) -> Self {
let mut hash_map = self.latency_in_ms.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.latency_in_ms = Some(hash_map);
self
}
pub fn set_latency_in_ms(
mut self,
input: std::option::Option<std::collections::HashMap<std::string::String, i32>>,
) -> Self {
self.latency_in_ms = input;
self
}
/// Consumes the builder and constructs a [`Player`](crate::model::Player)
pub fn build(self) -> crate::model::Player {
crate::model::Player {
player_id: self.player_id,
player_attributes: self.player_attributes,
team: self.team,
latency_in_ms: self.latency_in_ms,
}
}
}
}
impl Player {
/// Creates a new builder-style object to manufacture [`Player`](crate::model::Player)
pub fn builder() -> crate::model::player::Builder {
crate::model::player::Builder::default()
}
}
/// <p>Values for use in <a>Player</a> attribute key-value pairs. This object lets
/// you specify an attribute value using any of the valid data types: string, number, string
/// array, or data map. Each <code>AttributeValue</code> object can use only one of the
/// available properties.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AttributeValue {
/// <p>For single string values. Maximum string length is 100 characters.</p>
pub s: std::option::Option<std::string::String>,
/// <p>For number values, expressed as double.</p>
pub n: std::option::Option<f64>,
/// <p>For a list of up to 10 strings. Maximum length for each string is 100 characters.
/// Duplicate values are not recognized; all occurrences of the repeated value after the
/// first of a repeated value are ignored.</p>
pub sl: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>For a map of up to 10 data type:value pairs. Maximum length for each string value
/// is 100 characters. </p>
pub sdm: std::option::Option<std::collections::HashMap<std::string::String, f64>>,
}
impl std::fmt::Debug for AttributeValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AttributeValue");
formatter.field("s", &self.s);
formatter.field("n", &self.n);
formatter.field("sl", &self.sl);
formatter.field("sdm", &self.sdm);
formatter.finish()
}
}
/// See [`AttributeValue`](crate::model::AttributeValue)
pub mod attribute_value {
/// A builder for [`AttributeValue`](crate::model::AttributeValue)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) s: std::option::Option<std::string::String>,
pub(crate) n: std::option::Option<f64>,
pub(crate) sl: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) sdm: std::option::Option<std::collections::HashMap<std::string::String, f64>>,
}
impl Builder {
/// <p>For single string values. Maximum string length is 100 characters.</p>
pub fn s(mut self, input: impl Into<std::string::String>) -> Self {
self.s = Some(input.into());
self
}
pub fn set_s(mut self, input: std::option::Option<std::string::String>) -> Self {
self.s = input;
self
}
/// <p>For number values, expressed as double.</p>
pub fn n(mut self, input: f64) -> Self {
self.n = Some(input);
self
}
pub fn set_n(mut self, input: std::option::Option<f64>) -> Self {
self.n = input;
self
}
pub fn sl(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.sl.unwrap_or_default();
v.push(input.into());
self.sl = Some(v);
self
}
pub fn set_sl(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.sl = input;
self
}
pub fn sdm(mut self, k: impl Into<std::string::String>, v: impl Into<f64>) -> Self {
let mut hash_map = self.sdm.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.sdm = Some(hash_map);
self
}
pub fn set_sdm(
mut self,
input: std::option::Option<std::collections::HashMap<std::string::String, f64>>,
) -> Self {
self.sdm = input;
self
}
/// Consumes the builder and constructs a [`AttributeValue`](crate::model::AttributeValue)
pub fn build(self) -> crate::model::AttributeValue {
crate::model::AttributeValue {
s: self.s,
n: self.n,
sl: self.sl,
sdm: self.sdm,
}
}
}
}
impl AttributeValue {
/// Creates a new builder-style object to manufacture [`AttributeValue`](crate::model::AttributeValue)
pub fn builder() -> crate::model::attribute_value::Builder {
crate::model::attribute_value::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum MatchmakingConfigurationStatus {
Cancelled,
Completed,
Failed,
Placing,
Queued,
RequiresAcceptance,
Searching,
TimedOut,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for MatchmakingConfigurationStatus {
fn from(s: &str) -> Self {
match s {
"CANCELLED" => MatchmakingConfigurationStatus::Cancelled,
"COMPLETED" => MatchmakingConfigurationStatus::Completed,
"FAILED" => MatchmakingConfigurationStatus::Failed,
"PLACING" => MatchmakingConfigurationStatus::Placing,
"QUEUED" => MatchmakingConfigurationStatus::Queued,
"REQUIRES_ACCEPTANCE" => MatchmakingConfigurationStatus::RequiresAcceptance,
"SEARCHING" => MatchmakingConfigurationStatus::Searching,
"TIMED_OUT" => MatchmakingConfigurationStatus::TimedOut,
other => MatchmakingConfigurationStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for MatchmakingConfigurationStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(MatchmakingConfigurationStatus::from(s))
}
}
impl MatchmakingConfigurationStatus {
pub fn as_str(&self) -> &str {
match self {
MatchmakingConfigurationStatus::Cancelled => "CANCELLED",
MatchmakingConfigurationStatus::Completed => "COMPLETED",
MatchmakingConfigurationStatus::Failed => "FAILED",
MatchmakingConfigurationStatus::Placing => "PLACING",
MatchmakingConfigurationStatus::Queued => "QUEUED",
MatchmakingConfigurationStatus::RequiresAcceptance => "REQUIRES_ACCEPTANCE",
MatchmakingConfigurationStatus::Searching => "SEARCHING",
MatchmakingConfigurationStatus::TimedOut => "TIMED_OUT",
MatchmakingConfigurationStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"CANCELLED",
"COMPLETED",
"FAILED",
"PLACING",
"QUEUED",
"REQUIRES_ACCEPTANCE",
"SEARCHING",
"TIMED_OUT",
]
}
}
impl AsRef<str> for MatchmakingConfigurationStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Player information for use when creating player sessions using a game session
/// placement request with <a>StartGameSessionPlacement</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DesiredPlayerSession {
/// <p>A unique identifier for a player to associate with the player session.</p>
pub player_id: std::option::Option<std::string::String>,
/// <p>Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game.</p>
pub player_data: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DesiredPlayerSession {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DesiredPlayerSession");
formatter.field("player_id", &self.player_id);
formatter.field("player_data", &self.player_data);
formatter.finish()
}
}
/// See [`DesiredPlayerSession`](crate::model::DesiredPlayerSession)
pub mod desired_player_session {
/// A builder for [`DesiredPlayerSession`](crate::model::DesiredPlayerSession)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) player_id: std::option::Option<std::string::String>,
pub(crate) player_data: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for a player to associate with the player session.</p>
pub fn player_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_id = Some(input.into());
self
}
pub fn set_player_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_id = input;
self
}
/// <p>Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game.</p>
pub fn player_data(mut self, input: impl Into<std::string::String>) -> Self {
self.player_data = Some(input.into());
self
}
pub fn set_player_data(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_data = input;
self
}
/// Consumes the builder and constructs a [`DesiredPlayerSession`](crate::model::DesiredPlayerSession)
pub fn build(self) -> crate::model::DesiredPlayerSession {
crate::model::DesiredPlayerSession {
player_id: self.player_id,
player_data: self.player_data,
}
}
}
}
impl DesiredPlayerSession {
/// Creates a new builder-style object to manufacture [`DesiredPlayerSession`](crate::model::DesiredPlayerSession)
pub fn builder() -> crate::model::desired_player_session::Builder {
crate::model::desired_player_session::Builder::default()
}
}
/// <p>Temporary access credentials used for uploading game build files to Amazon GameLift. They
/// are valid for a limited time. If they expire before you upload your game build, get a
/// new set by calling <a>RequestUploadCredentials</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AwsCredentials {
/// <p>Temporary key allowing access to the Amazon GameLift S3 account.</p>
pub access_key_id: std::option::Option<std::string::String>,
/// <p>Temporary secret key allowing access to the Amazon GameLift S3 account.</p>
pub secret_access_key: std::option::Option<std::string::String>,
/// <p>Token used to associate a specific build ID with the files uploaded using these
/// credentials.</p>
pub session_token: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for AwsCredentials {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AwsCredentials");
formatter.field("access_key_id", &self.access_key_id);
formatter.field("secret_access_key", &self.secret_access_key);
formatter.field("session_token", &self.session_token);
formatter.finish()
}
}
/// See [`AwsCredentials`](crate::model::AwsCredentials)
pub mod aws_credentials {
/// A builder for [`AwsCredentials`](crate::model::AwsCredentials)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) access_key_id: std::option::Option<std::string::String>,
pub(crate) secret_access_key: std::option::Option<std::string::String>,
pub(crate) session_token: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Temporary key allowing access to the Amazon GameLift S3 account.</p>
pub fn access_key_id(mut self, input: impl Into<std::string::String>) -> Self {
self.access_key_id = Some(input.into());
self
}
pub fn set_access_key_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.access_key_id = input;
self
}
/// <p>Temporary secret key allowing access to the Amazon GameLift S3 account.</p>
pub fn secret_access_key(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_access_key = Some(input.into());
self
}
pub fn set_secret_access_key(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.secret_access_key = input;
self
}
/// <p>Token used to associate a specific build ID with the files uploaded using these
/// credentials.</p>
pub fn session_token(mut self, input: impl Into<std::string::String>) -> Self {
self.session_token = Some(input.into());
self
}
pub fn set_session_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.session_token = input;
self
}
/// Consumes the builder and constructs a [`AwsCredentials`](crate::model::AwsCredentials)
pub fn build(self) -> crate::model::AwsCredentials {
crate::model::AwsCredentials {
access_key_id: self.access_key_id,
secret_access_key: self.secret_access_key,
session_token: self.session_token,
}
}
}
}
impl AwsCredentials {
/// Creates a new builder-style object to manufacture [`AwsCredentials`](crate::model::AwsCredentials)
pub fn builder() -> crate::model::aws_credentials::Builder {
crate::model::aws_credentials::Builder::default()
}
}
/// <p>Settings for a target-based scaling policy (see <a>ScalingPolicy</a>. A
/// target-based policy tracks a particular fleet metric specifies a target value for the
/// metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so
/// that the metric returns to the target value. The target configuration specifies settings
/// as needed for the target based policy, including the target value. </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetCapacity</a> |
/// <a>UpdateFleetCapacity</a> |
/// <a>DescribeEC2InstanceLimits</a> |
/// <a>PutScalingPolicy</a> |
/// <a>DescribeScalingPolicies</a> |
/// <a>DeleteScalingPolicy</a> |
/// <a>StopFleetActions</a> |
/// <a>StartFleetActions</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TargetConfiguration {
/// <p>Desired value to use with a target-based scaling policy. The value must be relevant
/// for whatever metric the scaling policy is using. For example, in a policy using the
/// metric PercentAvailableGameSessions, the target value should be the preferred size of
/// the fleet's buffer (the percent of capacity that should be idle and ready for new game
/// sessions).</p>
pub target_value: f64,
}
impl std::fmt::Debug for TargetConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TargetConfiguration");
formatter.field("target_value", &self.target_value);
formatter.finish()
}
}
/// See [`TargetConfiguration`](crate::model::TargetConfiguration)
pub mod target_configuration {
/// A builder for [`TargetConfiguration`](crate::model::TargetConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) target_value: std::option::Option<f64>,
}
impl Builder {
/// <p>Desired value to use with a target-based scaling policy. The value must be relevant
/// for whatever metric the scaling policy is using. For example, in a policy using the
/// metric PercentAvailableGameSessions, the target value should be the preferred size of
/// the fleet's buffer (the percent of capacity that should be idle and ready for new game
/// sessions).</p>
pub fn target_value(mut self, input: f64) -> Self {
self.target_value = Some(input);
self
}
pub fn set_target_value(mut self, input: std::option::Option<f64>) -> Self {
self.target_value = input;
self
}
/// Consumes the builder and constructs a [`TargetConfiguration`](crate::model::TargetConfiguration)
pub fn build(self) -> crate::model::TargetConfiguration {
crate::model::TargetConfiguration {
target_value: self.target_value.unwrap_or_default(),
}
}
}
}
impl TargetConfiguration {
/// Creates a new builder-style object to manufacture [`TargetConfiguration`](crate::model::TargetConfiguration)
pub fn builder() -> crate::model::target_configuration::Builder {
crate::model::target_configuration::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum PolicyType {
RuleBased,
TargetBased,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for PolicyType {
fn from(s: &str) -> Self {
match s {
"RuleBased" => PolicyType::RuleBased,
"TargetBased" => PolicyType::TargetBased,
other => PolicyType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for PolicyType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(PolicyType::from(s))
}
}
impl PolicyType {
pub fn as_str(&self) -> &str {
match self {
PolicyType::RuleBased => "RuleBased",
PolicyType::TargetBased => "TargetBased",
PolicyType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["RuleBased", "TargetBased"]
}
}
impl AsRef<str> for PolicyType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum MetricName {
ActivatingGameSessions,
ActiveGameSessions,
ActiveInstances,
AvailableGameSessions,
AvailablePlayerSessions,
CurrentPlayerSessions,
IdleInstances,
PercentAvailableGameSessions,
PercentIdleInstances,
QueueDepth,
WaitTime,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for MetricName {
fn from(s: &str) -> Self {
match s {
"ActivatingGameSessions" => MetricName::ActivatingGameSessions,
"ActiveGameSessions" => MetricName::ActiveGameSessions,
"ActiveInstances" => MetricName::ActiveInstances,
"AvailableGameSessions" => MetricName::AvailableGameSessions,
"AvailablePlayerSessions" => MetricName::AvailablePlayerSessions,
"CurrentPlayerSessions" => MetricName::CurrentPlayerSessions,
"IdleInstances" => MetricName::IdleInstances,
"PercentAvailableGameSessions" => MetricName::PercentAvailableGameSessions,
"PercentIdleInstances" => MetricName::PercentIdleInstances,
"QueueDepth" => MetricName::QueueDepth,
"WaitTime" => MetricName::WaitTime,
other => MetricName::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for MetricName {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(MetricName::from(s))
}
}
impl MetricName {
pub fn as_str(&self) -> &str {
match self {
MetricName::ActivatingGameSessions => "ActivatingGameSessions",
MetricName::ActiveGameSessions => "ActiveGameSessions",
MetricName::ActiveInstances => "ActiveInstances",
MetricName::AvailableGameSessions => "AvailableGameSessions",
MetricName::AvailablePlayerSessions => "AvailablePlayerSessions",
MetricName::CurrentPlayerSessions => "CurrentPlayerSessions",
MetricName::IdleInstances => "IdleInstances",
MetricName::PercentAvailableGameSessions => "PercentAvailableGameSessions",
MetricName::PercentIdleInstances => "PercentIdleInstances",
MetricName::QueueDepth => "QueueDepth",
MetricName::WaitTime => "WaitTime",
MetricName::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"ActivatingGameSessions",
"ActiveGameSessions",
"ActiveInstances",
"AvailableGameSessions",
"AvailablePlayerSessions",
"CurrentPlayerSessions",
"IdleInstances",
"PercentAvailableGameSessions",
"PercentIdleInstances",
"QueueDepth",
"WaitTime",
]
}
}
impl AsRef<str> for MetricName {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ComparisonOperatorType {
GreaterThanOrEqualToThreshold,
GreaterThanThreshold,
LessThanOrEqualToThreshold,
LessThanThreshold,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ComparisonOperatorType {
fn from(s: &str) -> Self {
match s {
"GreaterThanOrEqualToThreshold" => {
ComparisonOperatorType::GreaterThanOrEqualToThreshold
}
"GreaterThanThreshold" => ComparisonOperatorType::GreaterThanThreshold,
"LessThanOrEqualToThreshold" => ComparisonOperatorType::LessThanOrEqualToThreshold,
"LessThanThreshold" => ComparisonOperatorType::LessThanThreshold,
other => ComparisonOperatorType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ComparisonOperatorType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ComparisonOperatorType::from(s))
}
}
impl ComparisonOperatorType {
pub fn as_str(&self) -> &str {
match self {
ComparisonOperatorType::GreaterThanOrEqualToThreshold => {
"GreaterThanOrEqualToThreshold"
}
ComparisonOperatorType::GreaterThanThreshold => "GreaterThanThreshold",
ComparisonOperatorType::LessThanOrEqualToThreshold => "LessThanOrEqualToThreshold",
ComparisonOperatorType::LessThanThreshold => "LessThanThreshold",
ComparisonOperatorType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"GreaterThanOrEqualToThreshold",
"GreaterThanThreshold",
"LessThanOrEqualToThreshold",
"LessThanThreshold",
]
}
}
impl AsRef<str> for ComparisonOperatorType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ScalingAdjustmentType {
ChangeInCapacity,
ExactCapacity,
PercentChangeInCapacity,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ScalingAdjustmentType {
fn from(s: &str) -> Self {
match s {
"ChangeInCapacity" => ScalingAdjustmentType::ChangeInCapacity,
"ExactCapacity" => ScalingAdjustmentType::ExactCapacity,
"PercentChangeInCapacity" => ScalingAdjustmentType::PercentChangeInCapacity,
other => ScalingAdjustmentType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ScalingAdjustmentType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ScalingAdjustmentType::from(s))
}
}
impl ScalingAdjustmentType {
pub fn as_str(&self) -> &str {
match self {
ScalingAdjustmentType::ChangeInCapacity => "ChangeInCapacity",
ScalingAdjustmentType::ExactCapacity => "ExactCapacity",
ScalingAdjustmentType::PercentChangeInCapacity => "PercentChangeInCapacity",
ScalingAdjustmentType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"ChangeInCapacity",
"ExactCapacity",
"PercentChangeInCapacity",
]
}
}
impl AsRef<str> for ScalingAdjustmentType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum SortOrder {
Ascending,
Descending,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for SortOrder {
fn from(s: &str) -> Self {
match s {
"ASCENDING" => SortOrder::Ascending,
"DESCENDING" => SortOrder::Descending,
other => SortOrder::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for SortOrder {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(SortOrder::from(s))
}
}
impl SortOrder {
pub fn as_str(&self) -> &str {
match self {
SortOrder::Ascending => "ASCENDING",
SortOrder::Descending => "DESCENDING",
SortOrder::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ASCENDING", "DESCENDING"]
}
}
impl AsRef<str> for SortOrder {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Information required to remotely connect to a fleet instance. Access is requested
/// by calling <a>GetInstanceAccess</a>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceAccess {
/// <p>A unique identifier for the fleet containing the instance being accessed.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for the instance being accessed.</p>
pub instance_id: std::option::Option<std::string::String>,
/// <p>IP address that is assigned to the instance.</p>
pub ip_address: std::option::Option<std::string::String>,
/// <p>Operating system that is running on the instance.</p>
pub operating_system: std::option::Option<crate::model::OperatingSystem>,
/// <p>Credentials required to access the instance.</p>
pub credentials: std::option::Option<crate::model::InstanceCredentials>,
}
impl std::fmt::Debug for InstanceAccess {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceAccess");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("instance_id", &self.instance_id);
formatter.field("ip_address", &self.ip_address);
formatter.field("operating_system", &self.operating_system);
formatter.field("credentials", &"*** Sensitive Data Redacted ***");
formatter.finish()
}
}
/// See [`InstanceAccess`](crate::model::InstanceAccess)
pub mod instance_access {
/// A builder for [`InstanceAccess`](crate::model::InstanceAccess)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) instance_id: std::option::Option<std::string::String>,
pub(crate) ip_address: std::option::Option<std::string::String>,
pub(crate) operating_system: std::option::Option<crate::model::OperatingSystem>,
pub(crate) credentials: std::option::Option<crate::model::InstanceCredentials>,
}
impl Builder {
/// <p>A unique identifier for the fleet containing the instance being accessed.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>A unique identifier for the instance being accessed.</p>
pub fn instance_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_id = Some(input.into());
self
}
pub fn set_instance_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.instance_id = input;
self
}
/// <p>IP address that is assigned to the instance.</p>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_address = Some(input.into());
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_address = input;
self
}
/// <p>Operating system that is running on the instance.</p>
pub fn operating_system(mut self, input: crate::model::OperatingSystem) -> Self {
self.operating_system = Some(input);
self
}
pub fn set_operating_system(
mut self,
input: std::option::Option<crate::model::OperatingSystem>,
) -> Self {
self.operating_system = input;
self
}
/// <p>Credentials required to access the instance.</p>
pub fn credentials(mut self, input: crate::model::InstanceCredentials) -> Self {
self.credentials = Some(input);
self
}
pub fn set_credentials(
mut self,
input: std::option::Option<crate::model::InstanceCredentials>,
) -> Self {
self.credentials = input;
self
}
/// Consumes the builder and constructs a [`InstanceAccess`](crate::model::InstanceAccess)
pub fn build(self) -> crate::model::InstanceAccess {
crate::model::InstanceAccess {
fleet_id: self.fleet_id,
instance_id: self.instance_id,
ip_address: self.ip_address,
operating_system: self.operating_system,
credentials: self.credentials,
}
}
}
}
impl InstanceAccess {
/// Creates a new builder-style object to manufacture [`InstanceAccess`](crate::model::InstanceAccess)
pub fn builder() -> crate::model::instance_access::Builder {
crate::model::instance_access::Builder::default()
}
}
/// <p>Set of credentials required to remotely access a fleet instance. Access credentials
/// are requested by calling <a>GetInstanceAccess</a> and returned in an <a>InstanceAccess</a> object.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct InstanceCredentials {
/// <p>User login string.</p>
pub user_name: std::option::Option<std::string::String>,
/// <p>Secret string. For Windows instances, the secret is a password for use with Windows
/// Remote Desktop. For Linux instances, it is a private key (which must be saved as a
/// <code>.pem</code> file) for use with SSH.</p>
pub secret: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for InstanceCredentials {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("InstanceCredentials");
formatter.field("user_name", &self.user_name);
formatter.field("secret", &self.secret);
formatter.finish()
}
}
/// See [`InstanceCredentials`](crate::model::InstanceCredentials)
pub mod instance_credentials {
/// A builder for [`InstanceCredentials`](crate::model::InstanceCredentials)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) user_name: std::option::Option<std::string::String>,
pub(crate) secret: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>User login string.</p>
pub fn user_name(mut self, input: impl Into<std::string::String>) -> Self {
self.user_name = Some(input.into());
self
}
pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.user_name = input;
self
}
/// <p>Secret string. For Windows instances, the secret is a password for use with Windows
/// Remote Desktop. For Linux instances, it is a private key (which must be saved as a
/// <code>.pem</code> file) for use with SSH.</p>
pub fn secret(mut self, input: impl Into<std::string::String>) -> Self {
self.secret = Some(input.into());
self
}
pub fn set_secret(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret = input;
self
}
/// Consumes the builder and constructs a [`InstanceCredentials`](crate::model::InstanceCredentials)
pub fn build(self) -> crate::model::InstanceCredentials {
crate::model::InstanceCredentials {
user_name: self.user_name,
secret: self.secret,
}
}
}
}
impl InstanceCredentials {
/// Creates a new builder-style object to manufacture [`InstanceCredentials`](crate::model::InstanceCredentials)
pub fn builder() -> crate::model::instance_credentials::Builder {
crate::model::instance_credentials::Builder::default()
}
}
/// <p>Represents a peering connection between a VPC on one of your AWS accounts and the
/// VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a
/// pending connection that has not yet been established.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateVpcPeeringAuthorization</a> |
/// <a>DescribeVpcPeeringAuthorizations</a> |
/// <a>DeleteVpcPeeringAuthorization</a> |
/// <a>CreateVpcPeeringConnection</a> |
/// <a>DescribeVpcPeeringConnections</a> |
/// <a>DeleteVpcPeeringConnection</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct VpcPeeringConnection {
/// <p>A unique identifier for the fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>
/// The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift fleet resource for this connection.
/// </p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>CIDR block of IPv4 addresses assigned to the VPC peering connection for the
/// GameLift VPC. The peered VPC also has an IPv4 CIDR block associated with it; these
/// blocks cannot overlap or the peering connection cannot be created. </p>
pub ip_v4_cidr_block: std::option::Option<std::string::String>,
/// <p>A unique identifier that is automatically assigned to the connection record. This ID
/// is referenced in VPC peering connection events, and is used when deleting a connection
/// with <a>DeleteVpcPeeringConnection</a>. </p>
pub vpc_peering_connection_id: std::option::Option<std::string::String>,
/// <p>The status information about the connection. Status indicates if a
/// connection is pending, successful, or failed.</p>
pub status: std::option::Option<crate::model::VpcPeeringConnectionStatus>,
/// <p>A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The
/// VPC must be in the same Region as your fleet. To look up a VPC ID, use the
/// <a href="https://console.aws.amazon.com/vpc/">VPC Dashboard</a> in the AWS Management Console.
/// Learn more about VPC peering in <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html">VPC Peering with GameLift Fleets</a>.</p>
pub peer_vpc_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for the VPC that contains the Amazon GameLift fleet for this
/// connection. This VPC is managed by Amazon GameLift and does not appear in your AWS account.
/// </p>
pub game_lift_vpc_id: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for VpcPeeringConnection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("VpcPeeringConnection");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("ip_v4_cidr_block", &self.ip_v4_cidr_block);
formatter.field("vpc_peering_connection_id", &self.vpc_peering_connection_id);
formatter.field("status", &self.status);
formatter.field("peer_vpc_id", &self.peer_vpc_id);
formatter.field("game_lift_vpc_id", &self.game_lift_vpc_id);
formatter.finish()
}
}
/// See [`VpcPeeringConnection`](crate::model::VpcPeeringConnection)
pub mod vpc_peering_connection {
/// A builder for [`VpcPeeringConnection`](crate::model::VpcPeeringConnection)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) ip_v4_cidr_block: std::option::Option<std::string::String>,
pub(crate) vpc_peering_connection_id: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::VpcPeeringConnectionStatus>,
pub(crate) peer_vpc_id: std::option::Option<std::string::String>,
pub(crate) game_lift_vpc_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for the fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>
/// The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift fleet resource for this connection.
/// </p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>CIDR block of IPv4 addresses assigned to the VPC peering connection for the
/// GameLift VPC. The peered VPC also has an IPv4 CIDR block associated with it; these
/// blocks cannot overlap or the peering connection cannot be created. </p>
pub fn ip_v4_cidr_block(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_v4_cidr_block = Some(input.into());
self
}
pub fn set_ip_v4_cidr_block(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ip_v4_cidr_block = input;
self
}
/// <p>A unique identifier that is automatically assigned to the connection record. This ID
/// is referenced in VPC peering connection events, and is used when deleting a connection
/// with <a>DeleteVpcPeeringConnection</a>. </p>
pub fn vpc_peering_connection_id(mut self, input: impl Into<std::string::String>) -> Self {
self.vpc_peering_connection_id = Some(input.into());
self
}
pub fn set_vpc_peering_connection_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.vpc_peering_connection_id = input;
self
}
/// <p>The status information about the connection. Status indicates if a
/// connection is pending, successful, or failed.</p>
pub fn status(mut self, input: crate::model::VpcPeeringConnectionStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::VpcPeeringConnectionStatus>,
) -> Self {
self.status = input;
self
}
/// <p>A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The
/// VPC must be in the same Region as your fleet. To look up a VPC ID, use the
/// <a href="https://console.aws.amazon.com/vpc/">VPC Dashboard</a> in the AWS Management Console.
/// Learn more about VPC peering in <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html">VPC Peering with GameLift Fleets</a>.</p>
pub fn peer_vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
self.peer_vpc_id = Some(input.into());
self
}
pub fn set_peer_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.peer_vpc_id = input;
self
}
/// <p>A unique identifier for the VPC that contains the Amazon GameLift fleet for this
/// connection. This VPC is managed by Amazon GameLift and does not appear in your AWS account.
/// </p>
pub fn game_lift_vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
self.game_lift_vpc_id = Some(input.into());
self
}
pub fn set_game_lift_vpc_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_lift_vpc_id = input;
self
}
/// Consumes the builder and constructs a [`VpcPeeringConnection`](crate::model::VpcPeeringConnection)
pub fn build(self) -> crate::model::VpcPeeringConnection {
crate::model::VpcPeeringConnection {
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
ip_v4_cidr_block: self.ip_v4_cidr_block,
vpc_peering_connection_id: self.vpc_peering_connection_id,
status: self.status,
peer_vpc_id: self.peer_vpc_id,
game_lift_vpc_id: self.game_lift_vpc_id,
}
}
}
}
impl VpcPeeringConnection {
/// Creates a new builder-style object to manufacture [`VpcPeeringConnection`](crate::model::VpcPeeringConnection)
pub fn builder() -> crate::model::vpc_peering_connection::Builder {
crate::model::vpc_peering_connection::Builder::default()
}
}
/// <p>Represents status information for a VPC peering connection. Status is associated
/// with a <a>VpcPeeringConnection</a> object. Status codes and messages are
/// provided from EC2 (see <a href="https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpcPeeringConnectionStateReason.html">VpcPeeringConnectionStateReason</a>). Connection status information is also
/// communicated as a fleet <a>Event</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct VpcPeeringConnectionStatus {
/// <p>Code indicating the status of a VPC peering connection.</p>
pub code: std::option::Option<std::string::String>,
/// <p>Additional messaging associated with the connection status. </p>
pub message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for VpcPeeringConnectionStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("VpcPeeringConnectionStatus");
formatter.field("code", &self.code);
formatter.field("message", &self.message);
formatter.finish()
}
}
/// See [`VpcPeeringConnectionStatus`](crate::model::VpcPeeringConnectionStatus)
pub mod vpc_peering_connection_status {
/// A builder for [`VpcPeeringConnectionStatus`](crate::model::VpcPeeringConnectionStatus)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) code: std::option::Option<std::string::String>,
pub(crate) message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Code indicating the status of a VPC peering connection.</p>
pub fn code(mut self, input: impl Into<std::string::String>) -> Self {
self.code = Some(input.into());
self
}
pub fn set_code(mut self, input: std::option::Option<std::string::String>) -> Self {
self.code = input;
self
}
/// <p>Additional messaging associated with the connection status. </p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// Consumes the builder and constructs a [`VpcPeeringConnectionStatus`](crate::model::VpcPeeringConnectionStatus)
pub fn build(self) -> crate::model::VpcPeeringConnectionStatus {
crate::model::VpcPeeringConnectionStatus {
code: self.code,
message: self.message,
}
}
}
}
impl VpcPeeringConnectionStatus {
/// Creates a new builder-style object to manufacture [`VpcPeeringConnectionStatus`](crate::model::VpcPeeringConnectionStatus)
pub fn builder() -> crate::model::vpc_peering_connection_status::Builder {
crate::model::vpc_peering_connection_status::Builder::default()
}
}
/// <p>Represents an authorization for a VPC peering connection between the VPC for an
/// Amazon GameLift fleet and another VPC on an account you have access to. This authorization
/// must exist and be valid for the peering connection to be established. Authorizations are
/// valid for 24 hours after they are issued.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateVpcPeeringAuthorization</a> |
/// <a>DescribeVpcPeeringAuthorizations</a> |
/// <a>DeleteVpcPeeringAuthorization</a> |
/// <a>CreateVpcPeeringConnection</a> |
/// <a>DescribeVpcPeeringConnections</a> |
/// <a>DeleteVpcPeeringConnection</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct VpcPeeringAuthorization {
/// <p>A unique identifier for the AWS account that you use to manage your GameLift fleet.
/// You can find your Account ID in the AWS Management Console under account settings.</p>
pub game_lift_aws_account_id: std::option::Option<std::string::String>,
/// <p></p>
pub peer_vpc_aws_account_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The
/// VPC must be in the same Region as your fleet. To look up a VPC ID, use the
/// <a href="https://console.aws.amazon.com/vpc/">VPC Dashboard</a> in the AWS Management Console.
/// Learn more about VPC peering in <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html">VPC Peering with GameLift Fleets</a>.</p>
pub peer_vpc_id: std::option::Option<std::string::String>,
/// <p>Time stamp indicating when this authorization was issued. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>Time stamp indicating when this authorization expires (24 hours after issuance).
/// Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub expiration_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for VpcPeeringAuthorization {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("VpcPeeringAuthorization");
formatter.field("game_lift_aws_account_id", &self.game_lift_aws_account_id);
formatter.field("peer_vpc_aws_account_id", &self.peer_vpc_aws_account_id);
formatter.field("peer_vpc_id", &self.peer_vpc_id);
formatter.field("creation_time", &self.creation_time);
formatter.field("expiration_time", &self.expiration_time);
formatter.finish()
}
}
/// See [`VpcPeeringAuthorization`](crate::model::VpcPeeringAuthorization)
pub mod vpc_peering_authorization {
/// A builder for [`VpcPeeringAuthorization`](crate::model::VpcPeeringAuthorization)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_lift_aws_account_id: std::option::Option<std::string::String>,
pub(crate) peer_vpc_aws_account_id: std::option::Option<std::string::String>,
pub(crate) peer_vpc_id: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) expiration_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A unique identifier for the AWS account that you use to manage your GameLift fleet.
/// You can find your Account ID in the AWS Management Console under account settings.</p>
pub fn game_lift_aws_account_id(mut self, input: impl Into<std::string::String>) -> Self {
self.game_lift_aws_account_id = Some(input.into());
self
}
pub fn set_game_lift_aws_account_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_lift_aws_account_id = input;
self
}
/// <p></p>
pub fn peer_vpc_aws_account_id(mut self, input: impl Into<std::string::String>) -> Self {
self.peer_vpc_aws_account_id = Some(input.into());
self
}
pub fn set_peer_vpc_aws_account_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.peer_vpc_aws_account_id = input;
self
}
/// <p>A unique identifier for a VPC with resources to be accessed by your GameLift fleet. The
/// VPC must be in the same Region as your fleet. To look up a VPC ID, use the
/// <a href="https://console.aws.amazon.com/vpc/">VPC Dashboard</a> in the AWS Management Console.
/// Learn more about VPC peering in <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html">VPC Peering with GameLift Fleets</a>.</p>
pub fn peer_vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
self.peer_vpc_id = Some(input.into());
self
}
pub fn set_peer_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.peer_vpc_id = input;
self
}
/// <p>Time stamp indicating when this authorization was issued. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>Time stamp indicating when this authorization expires (24 hours after issuance).
/// Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn expiration_time(mut self, input: smithy_types::Instant) -> Self {
self.expiration_time = Some(input);
self
}
pub fn set_expiration_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.expiration_time = input;
self
}
/// Consumes the builder and constructs a [`VpcPeeringAuthorization`](crate::model::VpcPeeringAuthorization)
pub fn build(self) -> crate::model::VpcPeeringAuthorization {
crate::model::VpcPeeringAuthorization {
game_lift_aws_account_id: self.game_lift_aws_account_id,
peer_vpc_aws_account_id: self.peer_vpc_aws_account_id,
peer_vpc_id: self.peer_vpc_id,
creation_time: self.creation_time,
expiration_time: self.expiration_time,
}
}
}
}
impl VpcPeeringAuthorization {
/// Creates a new builder-style object to manufacture [`VpcPeeringAuthorization`](crate::model::VpcPeeringAuthorization)
pub fn builder() -> crate::model::vpc_peering_authorization::Builder {
crate::model::vpc_peering_authorization::Builder::default()
}
}
/// <p>Rule that controls how a fleet is scaled. Scaling policies are uniquely identified
/// by the combination of name and fleet ID.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetCapacity</a> |
/// <a>UpdateFleetCapacity</a> |
/// <a>DescribeEC2InstanceLimits</a> |
/// <a>PutScalingPolicy</a> |
/// <a>DescribeScalingPolicies</a> |
/// <a>DeleteScalingPolicy</a> |
/// <a>StopFleetActions</a> |
/// <a>StartFleetActions</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ScalingPolicy {
/// <p>A unique identifier for the fleet that is associated with this scaling policy.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique.</p>
pub name: std::option::Option<std::string::String>,
/// <p>Current status of the scaling policy. The scaling policy can be in force only when
/// in an <code>ACTIVE</code> status. Scaling policies can be suspended for individual
/// fleets (see <a>StopFleetActions</a>; if suspended for a fleet, the policy
/// status does not change. View a fleet's stopped actions by calling <a>DescribeFleetCapacity</a>.</p>
/// <ul>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The scaling policy can be used for
/// auto-scaling a fleet.</p>
/// </li>
/// <li>
/// <p>
/// <b>UPDATE_REQUESTED</b> -- A request to update the
/// scaling policy has been received.</p>
/// </li>
/// <li>
/// <p>
/// <b>UPDATING</b> -- A change is being made to the
/// scaling policy.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETE_REQUESTED</b> -- A request to delete the
/// scaling policy has been received.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETING</b> -- The scaling policy is being
/// deleted.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETED</b> -- The scaling policy has been
/// deleted.</p>
/// </li>
/// <li>
/// <p>
/// <b>ERROR</b> -- An error occurred in creating the
/// policy. It should be removed and recreated.</p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::ScalingStatusType>,
/// <p>Amount of adjustment to make, based on the scaling adjustment type.</p>
pub scaling_adjustment: i32,
/// <p>The type of adjustment to make to a fleet's instance count (see <a>FleetCapacity</a>):</p>
/// <ul>
/// <li>
/// <p>
/// <b>ChangeInCapacity</b> -- add (or subtract) the
/// scaling adjustment value from the current instance count. Positive values scale
/// up while negative values scale down.</p>
/// </li>
/// <li>
/// <p>
/// <b>ExactCapacity</b> -- set the instance count to the
/// scaling adjustment value.</p>
/// </li>
/// <li>
/// <p>
/// <b>PercentChangeInCapacity</b> -- increase or reduce
/// the current instance count by the scaling adjustment, read as a percentage.
/// Positive values scale up while negative values scale down.</p>
/// </li>
/// </ul>
pub scaling_adjustment_type: std::option::Option<crate::model::ScalingAdjustmentType>,
/// <p>Comparison operator to use when measuring a metric against the threshold
/// value.</p>
pub comparison_operator: std::option::Option<crate::model::ComparisonOperatorType>,
/// <p>Metric value used to trigger a scaling event.</p>
pub threshold: f64,
/// <p>Length of time (in minutes) the metric must be at or beyond the threshold before a
/// scaling event is triggered.</p>
pub evaluation_periods: std::option::Option<i32>,
/// <p>Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For
/// detailed descriptions of fleet metrics, see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html">Monitor Amazon GameLift
/// with Amazon CloudWatch</a>. </p>
/// <ul>
/// <li>
/// <p>
/// <b>ActivatingGameSessions</b> -- Game sessions in
/// the process of being created.</p>
/// </li>
/// <li>
/// <p>
/// <b>ActiveGameSessions</b> -- Game sessions that
/// are currently running.</p>
/// </li>
/// <li>
/// <p>
/// <b>ActiveInstances</b> -- Fleet instances that
/// are currently running at least one game session.</p>
/// </li>
/// <li>
/// <p>
/// <b>AvailableGameSessions</b> -- Additional game
/// sessions that fleet could host simultaneously, given current capacity.</p>
/// </li>
/// <li>
/// <p>
/// <b>AvailablePlayerSessions</b> -- Empty player
/// slots in currently active game sessions. This includes game sessions that are
/// not currently accepting players. Reserved player slots are not
/// included.</p>
/// </li>
/// <li>
/// <p>
/// <b>CurrentPlayerSessions</b> -- Player slots in
/// active game sessions that are being used by a player or are reserved for a
/// player. </p>
/// </li>
/// <li>
/// <p>
/// <b>IdleInstances</b> -- Active instances that are
/// currently hosting zero game sessions. </p>
/// </li>
/// <li>
/// <p>
/// <b>PercentAvailableGameSessions</b> -- Unused
/// percentage of the total number of game sessions that a fleet could host
/// simultaneously, given current capacity. Use this metric for a target-based
/// scaling policy.</p>
/// </li>
/// <li>
/// <p>
/// <b>PercentIdleInstances</b> -- Percentage of the
/// total number of active instances that are hosting zero game sessions.</p>
/// </li>
/// <li>
/// <p>
/// <b>QueueDepth</b> -- Pending game session
/// placement requests, in any queue, where the current fleet is the top-priority
/// destination.</p>
/// </li>
/// <li>
/// <p>
/// <b>WaitTime</b> -- Current wait time for pending
/// game session placement requests, in any queue, where the current fleet is the
/// top-priority destination. </p>
/// </li>
/// </ul>
pub metric_name: std::option::Option<crate::model::MetricName>,
/// <p>The type of scaling policy to create. For a target-based policy, set the parameter
/// <i>MetricName</i> to 'PercentAvailableGameSessions' and specify a
/// <i>TargetConfiguration</i>. For a rule-based policy set the following
/// parameters: <i>MetricName</i>, <i>ComparisonOperator</i>,
/// <i>Threshold</i>, <i>EvaluationPeriods</i>,
/// <i>ScalingAdjustmentType</i>, and
/// <i>ScalingAdjustment</i>.</p>
pub policy_type: std::option::Option<crate::model::PolicyType>,
/// <p>An object that contains settings for a target-based scaling policy.</p>
pub target_configuration: std::option::Option<crate::model::TargetConfiguration>,
/// <p>The current status of the fleet's scaling policies in a requested fleet location. The
/// status <code>PENDING_UPDATE</code> indicates that an update was requested for the fleet
/// but has not yet been completed for the location.</p>
pub update_status: std::option::Option<crate::model::LocationUpdateStatus>,
/// <p>
/// </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ScalingPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ScalingPolicy");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("name", &self.name);
formatter.field("status", &self.status);
formatter.field("scaling_adjustment", &self.scaling_adjustment);
formatter.field("scaling_adjustment_type", &self.scaling_adjustment_type);
formatter.field("comparison_operator", &self.comparison_operator);
formatter.field("threshold", &self.threshold);
formatter.field("evaluation_periods", &self.evaluation_periods);
formatter.field("metric_name", &self.metric_name);
formatter.field("policy_type", &self.policy_type);
formatter.field("target_configuration", &self.target_configuration);
formatter.field("update_status", &self.update_status);
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`ScalingPolicy`](crate::model::ScalingPolicy)
pub mod scaling_policy {
/// A builder for [`ScalingPolicy`](crate::model::ScalingPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::ScalingStatusType>,
pub(crate) scaling_adjustment: std::option::Option<i32>,
pub(crate) scaling_adjustment_type:
std::option::Option<crate::model::ScalingAdjustmentType>,
pub(crate) comparison_operator: std::option::Option<crate::model::ComparisonOperatorType>,
pub(crate) threshold: std::option::Option<f64>,
pub(crate) evaluation_periods: std::option::Option<i32>,
pub(crate) metric_name: std::option::Option<crate::model::MetricName>,
pub(crate) policy_type: std::option::Option<crate::model::PolicyType>,
pub(crate) target_configuration: std::option::Option<crate::model::TargetConfiguration>,
pub(crate) update_status: std::option::Option<crate::model::LocationUpdateStatus>,
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for the fleet that is associated with this scaling policy.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>A descriptive label that is associated with a fleet's scaling policy. Policy names do not need to be unique.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>Current status of the scaling policy. The scaling policy can be in force only when
/// in an <code>ACTIVE</code> status. Scaling policies can be suspended for individual
/// fleets (see <a>StopFleetActions</a>; if suspended for a fleet, the policy
/// status does not change. View a fleet's stopped actions by calling <a>DescribeFleetCapacity</a>.</p>
/// <ul>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The scaling policy can be used for
/// auto-scaling a fleet.</p>
/// </li>
/// <li>
/// <p>
/// <b>UPDATE_REQUESTED</b> -- A request to update the
/// scaling policy has been received.</p>
/// </li>
/// <li>
/// <p>
/// <b>UPDATING</b> -- A change is being made to the
/// scaling policy.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETE_REQUESTED</b> -- A request to delete the
/// scaling policy has been received.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETING</b> -- The scaling policy is being
/// deleted.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETED</b> -- The scaling policy has been
/// deleted.</p>
/// </li>
/// <li>
/// <p>
/// <b>ERROR</b> -- An error occurred in creating the
/// policy. It should be removed and recreated.</p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::ScalingStatusType) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::ScalingStatusType>,
) -> Self {
self.status = input;
self
}
/// <p>Amount of adjustment to make, based on the scaling adjustment type.</p>
pub fn scaling_adjustment(mut self, input: i32) -> Self {
self.scaling_adjustment = Some(input);
self
}
pub fn set_scaling_adjustment(mut self, input: std::option::Option<i32>) -> Self {
self.scaling_adjustment = input;
self
}
/// <p>The type of adjustment to make to a fleet's instance count (see <a>FleetCapacity</a>):</p>
/// <ul>
/// <li>
/// <p>
/// <b>ChangeInCapacity</b> -- add (or subtract) the
/// scaling adjustment value from the current instance count. Positive values scale
/// up while negative values scale down.</p>
/// </li>
/// <li>
/// <p>
/// <b>ExactCapacity</b> -- set the instance count to the
/// scaling adjustment value.</p>
/// </li>
/// <li>
/// <p>
/// <b>PercentChangeInCapacity</b> -- increase or reduce
/// the current instance count by the scaling adjustment, read as a percentage.
/// Positive values scale up while negative values scale down.</p>
/// </li>
/// </ul>
pub fn scaling_adjustment_type(
mut self,
input: crate::model::ScalingAdjustmentType,
) -> Self {
self.scaling_adjustment_type = Some(input);
self
}
pub fn set_scaling_adjustment_type(
mut self,
input: std::option::Option<crate::model::ScalingAdjustmentType>,
) -> Self {
self.scaling_adjustment_type = input;
self
}
/// <p>Comparison operator to use when measuring a metric against the threshold
/// value.</p>
pub fn comparison_operator(mut self, input: crate::model::ComparisonOperatorType) -> Self {
self.comparison_operator = Some(input);
self
}
pub fn set_comparison_operator(
mut self,
input: std::option::Option<crate::model::ComparisonOperatorType>,
) -> Self {
self.comparison_operator = input;
self
}
/// <p>Metric value used to trigger a scaling event.</p>
pub fn threshold(mut self, input: f64) -> Self {
self.threshold = Some(input);
self
}
pub fn set_threshold(mut self, input: std::option::Option<f64>) -> Self {
self.threshold = input;
self
}
/// <p>Length of time (in minutes) the metric must be at or beyond the threshold before a
/// scaling event is triggered.</p>
pub fn evaluation_periods(mut self, input: i32) -> Self {
self.evaluation_periods = Some(input);
self
}
pub fn set_evaluation_periods(mut self, input: std::option::Option<i32>) -> Self {
self.evaluation_periods = input;
self
}
/// <p>Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For
/// detailed descriptions of fleet metrics, see <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html">Monitor Amazon GameLift
/// with Amazon CloudWatch</a>. </p>
/// <ul>
/// <li>
/// <p>
/// <b>ActivatingGameSessions</b> -- Game sessions in
/// the process of being created.</p>
/// </li>
/// <li>
/// <p>
/// <b>ActiveGameSessions</b> -- Game sessions that
/// are currently running.</p>
/// </li>
/// <li>
/// <p>
/// <b>ActiveInstances</b> -- Fleet instances that
/// are currently running at least one game session.</p>
/// </li>
/// <li>
/// <p>
/// <b>AvailableGameSessions</b> -- Additional game
/// sessions that fleet could host simultaneously, given current capacity.</p>
/// </li>
/// <li>
/// <p>
/// <b>AvailablePlayerSessions</b> -- Empty player
/// slots in currently active game sessions. This includes game sessions that are
/// not currently accepting players. Reserved player slots are not
/// included.</p>
/// </li>
/// <li>
/// <p>
/// <b>CurrentPlayerSessions</b> -- Player slots in
/// active game sessions that are being used by a player or are reserved for a
/// player. </p>
/// </li>
/// <li>
/// <p>
/// <b>IdleInstances</b> -- Active instances that are
/// currently hosting zero game sessions. </p>
/// </li>
/// <li>
/// <p>
/// <b>PercentAvailableGameSessions</b> -- Unused
/// percentage of the total number of game sessions that a fleet could host
/// simultaneously, given current capacity. Use this metric for a target-based
/// scaling policy.</p>
/// </li>
/// <li>
/// <p>
/// <b>PercentIdleInstances</b> -- Percentage of the
/// total number of active instances that are hosting zero game sessions.</p>
/// </li>
/// <li>
/// <p>
/// <b>QueueDepth</b> -- Pending game session
/// placement requests, in any queue, where the current fleet is the top-priority
/// destination.</p>
/// </li>
/// <li>
/// <p>
/// <b>WaitTime</b> -- Current wait time for pending
/// game session placement requests, in any queue, where the current fleet is the
/// top-priority destination. </p>
/// </li>
/// </ul>
pub fn metric_name(mut self, input: crate::model::MetricName) -> Self {
self.metric_name = Some(input);
self
}
pub fn set_metric_name(
mut self,
input: std::option::Option<crate::model::MetricName>,
) -> Self {
self.metric_name = input;
self
}
/// <p>The type of scaling policy to create. For a target-based policy, set the parameter
/// <i>MetricName</i> to 'PercentAvailableGameSessions' and specify a
/// <i>TargetConfiguration</i>. For a rule-based policy set the following
/// parameters: <i>MetricName</i>, <i>ComparisonOperator</i>,
/// <i>Threshold</i>, <i>EvaluationPeriods</i>,
/// <i>ScalingAdjustmentType</i>, and
/// <i>ScalingAdjustment</i>.</p>
pub fn policy_type(mut self, input: crate::model::PolicyType) -> Self {
self.policy_type = Some(input);
self
}
pub fn set_policy_type(
mut self,
input: std::option::Option<crate::model::PolicyType>,
) -> Self {
self.policy_type = input;
self
}
/// <p>An object that contains settings for a target-based scaling policy.</p>
pub fn target_configuration(mut self, input: crate::model::TargetConfiguration) -> Self {
self.target_configuration = Some(input);
self
}
pub fn set_target_configuration(
mut self,
input: std::option::Option<crate::model::TargetConfiguration>,
) -> Self {
self.target_configuration = input;
self
}
/// <p>The current status of the fleet's scaling policies in a requested fleet location. The
/// status <code>PENDING_UPDATE</code> indicates that an update was requested for the fleet
/// but has not yet been completed for the location.</p>
pub fn update_status(mut self, input: crate::model::LocationUpdateStatus) -> Self {
self.update_status = Some(input);
self
}
pub fn set_update_status(
mut self,
input: std::option::Option<crate::model::LocationUpdateStatus>,
) -> Self {
self.update_status = input;
self
}
/// <p>
/// </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`ScalingPolicy`](crate::model::ScalingPolicy)
pub fn build(self) -> crate::model::ScalingPolicy {
crate::model::ScalingPolicy {
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
name: self.name,
status: self.status,
scaling_adjustment: self.scaling_adjustment.unwrap_or_default(),
scaling_adjustment_type: self.scaling_adjustment_type,
comparison_operator: self.comparison_operator,
threshold: self.threshold.unwrap_or_default(),
evaluation_periods: self.evaluation_periods,
metric_name: self.metric_name,
policy_type: self.policy_type,
target_configuration: self.target_configuration,
update_status: self.update_status,
location: self.location,
}
}
}
}
impl ScalingPolicy {
/// Creates a new builder-style object to manufacture [`ScalingPolicy`](crate::model::ScalingPolicy)
pub fn builder() -> crate::model::scaling_policy::Builder {
crate::model::scaling_policy::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum LocationUpdateStatus {
PendingUpdate,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for LocationUpdateStatus {
fn from(s: &str) -> Self {
match s {
"PENDING_UPDATE" => LocationUpdateStatus::PendingUpdate,
other => LocationUpdateStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for LocationUpdateStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(LocationUpdateStatus::from(s))
}
}
impl LocationUpdateStatus {
pub fn as_str(&self) -> &str {
match self {
LocationUpdateStatus::PendingUpdate => "PENDING_UPDATE",
LocationUpdateStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["PENDING_UPDATE"]
}
}
impl AsRef<str> for LocationUpdateStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ScalingStatusType {
Active,
Deleted,
DeleteRequested,
Deleting,
Error,
UpdateRequested,
Updating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ScalingStatusType {
fn from(s: &str) -> Self {
match s {
"ACTIVE" => ScalingStatusType::Active,
"DELETED" => ScalingStatusType::Deleted,
"DELETE_REQUESTED" => ScalingStatusType::DeleteRequested,
"DELETING" => ScalingStatusType::Deleting,
"ERROR" => ScalingStatusType::Error,
"UPDATE_REQUESTED" => ScalingStatusType::UpdateRequested,
"UPDATING" => ScalingStatusType::Updating,
other => ScalingStatusType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ScalingStatusType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ScalingStatusType::from(s))
}
}
impl ScalingStatusType {
pub fn as_str(&self) -> &str {
match self {
ScalingStatusType::Active => "ACTIVE",
ScalingStatusType::Deleted => "DELETED",
ScalingStatusType::DeleteRequested => "DELETE_REQUESTED",
ScalingStatusType::Deleting => "DELETING",
ScalingStatusType::Error => "ERROR",
ScalingStatusType::UpdateRequested => "UPDATE_REQUESTED",
ScalingStatusType::Updating => "UPDATING",
ScalingStatusType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"ACTIVE",
"DELETED",
"DELETE_REQUESTED",
"DELETING",
"ERROR",
"UPDATE_REQUESTED",
"UPDATING",
]
}
}
impl AsRef<str> for ScalingStatusType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Represents a player session. Player sessions are created either for a specific game
/// session, or as part of a game session placement or matchmaking request. A player session
/// can represents a reserved player slot in a game session (when status is
/// <code>RESERVED</code>) or actual player activity in a game session (when status is
/// <code>ACTIVE</code>). A player session object, including player data, is
/// automatically passed to a game session when the player connects to the game session and
/// is validated. After the game session ends, player sessions information is retained for
/// 30 days and then removed.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreatePlayerSession</a> |
/// <a>CreatePlayerSessions</a> |
/// <a>DescribePlayerSessions</a> |
/// <a>StartGameSessionPlacement</a> |
/// <a>DescribeGameSessionPlacement</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html#reference-awssdk-resources-fleets">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PlayerSession {
/// <p>A unique identifier for a player session.</p>
pub player_session_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for a player that is associated with this player session.</p>
pub player_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for the game session that the player session is connected to.</p>
pub game_session_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for the fleet that the player's game session is running on.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>
/// The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift fleet that the player's game session is running on.
/// </p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub termination_time: std::option::Option<smithy_types::Instant>,
/// <p>Current status of the player session.</p>
/// <p>Possible player session statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>RESERVED</b> -- The player session request has been
/// received, but the player has not yet connected to the server process and/or been
/// validated. </p>
/// </li>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The player has been validated by the
/// server process and is currently connected.</p>
/// </li>
/// <li>
/// <p>
/// <b>COMPLETED</b> -- The player connection has been
/// dropped.</p>
/// </li>
/// <li>
/// <p>
/// <b>TIMEDOUT</b> -- A player session request was
/// received, but the player did not connect and/or was not validated within the
/// timeout limit (60 seconds).</p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::PlayerSessionStatus>,
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub ip_address: std::option::Option<std::string::String>,
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub dns_name: std::option::Option<std::string::String>,
/// <p>Port number for the game session. To connect to a Amazon GameLift server process, an app
/// needs both the IP address and port number.</p>
pub port: std::option::Option<i32>,
/// <p>Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game. </p>
pub player_data: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for PlayerSession {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PlayerSession");
formatter.field("player_session_id", &self.player_session_id);
formatter.field("player_id", &self.player_id);
formatter.field("game_session_id", &self.game_session_id);
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("creation_time", &self.creation_time);
formatter.field("termination_time", &self.termination_time);
formatter.field("status", &self.status);
formatter.field("ip_address", &self.ip_address);
formatter.field("dns_name", &self.dns_name);
formatter.field("port", &self.port);
formatter.field("player_data", &self.player_data);
formatter.finish()
}
}
/// See [`PlayerSession`](crate::model::PlayerSession)
pub mod player_session {
/// A builder for [`PlayerSession`](crate::model::PlayerSession)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) player_session_id: std::option::Option<std::string::String>,
pub(crate) player_id: std::option::Option<std::string::String>,
pub(crate) game_session_id: std::option::Option<std::string::String>,
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) termination_time: std::option::Option<smithy_types::Instant>,
pub(crate) status: std::option::Option<crate::model::PlayerSessionStatus>,
pub(crate) ip_address: std::option::Option<std::string::String>,
pub(crate) dns_name: std::option::Option<std::string::String>,
pub(crate) port: std::option::Option<i32>,
pub(crate) player_data: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for a player session.</p>
pub fn player_session_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_session_id = Some(input.into());
self
}
pub fn set_player_session_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.player_session_id = input;
self
}
/// <p>A unique identifier for a player that is associated with this player session.</p>
pub fn player_id(mut self, input: impl Into<std::string::String>) -> Self {
self.player_id = Some(input.into());
self
}
pub fn set_player_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_id = input;
self
}
/// <p>A unique identifier for the game session that the player session is connected to.</p>
pub fn game_session_id(mut self, input: impl Into<std::string::String>) -> Self {
self.game_session_id = Some(input.into());
self
}
pub fn set_game_session_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_session_id = input;
self
}
/// <p>A unique identifier for the fleet that the player's game session is running on.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>
/// The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift fleet that the player's game session is running on.
/// </p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn termination_time(mut self, input: smithy_types::Instant) -> Self {
self.termination_time = Some(input);
self
}
pub fn set_termination_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.termination_time = input;
self
}
/// <p>Current status of the player session.</p>
/// <p>Possible player session statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>RESERVED</b> -- The player session request has been
/// received, but the player has not yet connected to the server process and/or been
/// validated. </p>
/// </li>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The player has been validated by the
/// server process and is currently connected.</p>
/// </li>
/// <li>
/// <p>
/// <b>COMPLETED</b> -- The player connection has been
/// dropped.</p>
/// </li>
/// <li>
/// <p>
/// <b>TIMEDOUT</b> -- A player session request was
/// received, but the player did not connect and/or was not validated within the
/// timeout limit (60 seconds).</p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::PlayerSessionStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::PlayerSessionStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The IP address of the game session. To connect to a GameLift game server, an app needs both the IP address and port number.</p>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_address = Some(input.into());
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_address = input;
self
}
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dns_name = Some(input.into());
self
}
pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.dns_name = input;
self
}
/// <p>Port number for the game session. To connect to a Amazon GameLift server process, an app
/// needs both the IP address and port number.</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
/// <p>Developer-defined information related to a player. GameLift does not use this data, so it can be formatted as needed for use in the game. </p>
pub fn player_data(mut self, input: impl Into<std::string::String>) -> Self {
self.player_data = Some(input.into());
self
}
pub fn set_player_data(mut self, input: std::option::Option<std::string::String>) -> Self {
self.player_data = input;
self
}
/// Consumes the builder and constructs a [`PlayerSession`](crate::model::PlayerSession)
pub fn build(self) -> crate::model::PlayerSession {
crate::model::PlayerSession {
player_session_id: self.player_session_id,
player_id: self.player_id,
game_session_id: self.game_session_id,
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
creation_time: self.creation_time,
termination_time: self.termination_time,
status: self.status,
ip_address: self.ip_address,
dns_name: self.dns_name,
port: self.port,
player_data: self.player_data,
}
}
}
}
impl PlayerSession {
/// Creates a new builder-style object to manufacture [`PlayerSession`](crate::model::PlayerSession)
pub fn builder() -> crate::model::player_session::Builder {
crate::model::player_session::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum PlayerSessionStatus {
Active,
Completed,
Reserved,
Timedout,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for PlayerSessionStatus {
fn from(s: &str) -> Self {
match s {
"ACTIVE" => PlayerSessionStatus::Active,
"COMPLETED" => PlayerSessionStatus::Completed,
"RESERVED" => PlayerSessionStatus::Reserved,
"TIMEDOUT" => PlayerSessionStatus::Timedout,
other => PlayerSessionStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for PlayerSessionStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(PlayerSessionStatus::from(s))
}
}
impl PlayerSessionStatus {
pub fn as_str(&self) -> &str {
match self {
PlayerSessionStatus::Active => "ACTIVE",
PlayerSessionStatus::Completed => "COMPLETED",
PlayerSessionStatus::Reserved => "RESERVED",
PlayerSessionStatus::Timedout => "TIMEDOUT",
PlayerSessionStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ACTIVE", "COMPLETED", "RESERVED", "TIMEDOUT"]
}
}
impl AsRef<str> for PlayerSessionStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Set of rule statements, used with FlexMatch, that determine how to build your player
/// matches. Each rule set describes a type of group to be created and defines the
/// parameters for acceptable player matches. Rule sets are used in <a>MatchmakingConfiguration</a> objects.</p>
/// <p>A rule set may define the following elements for a match. For detailed information
/// and examples showing how to construct a rule set, see <a href="https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-rulesets.html">Build a FlexMatch rule
/// set</a>. </p>
/// <ul>
/// <li>
/// <p>Teams -- Required. A rule set must define one or multiple teams for the
/// match and set minimum and maximum team sizes. For example, a rule set might
/// describe a 4x4 match that requires all eight slots to be filled. </p>
/// </li>
/// <li>
/// <p>Player attributes -- Optional. These attributes specify a set of player
/// characteristics to evaluate when looking for a match. Matchmaking requests that
/// use a rule set with player attributes must provide the corresponding attribute
/// values. For example, an attribute might specify a player's skill or
/// level.</p>
/// </li>
/// <li>
/// <p>Rules -- Optional. Rules define how to evaluate potential players for a
/// match based on player attributes. A rule might specify minimum requirements for
/// individual players, teams, or entire matches. For example, a rule might require
/// each player to meet a certain skill level, each team to have at least one player
/// in a certain role, or the match to have a minimum average skill level. or may
/// describe an entire group--such as all teams must be evenly matched or have at
/// least one player in a certain role. </p>
/// </li>
/// <li>
/// <p>Expansions -- Optional. Expansions allow you to relax the rules after a
/// period of time when no acceptable matches are found. This feature lets you
/// balance getting players into games in a reasonable amount of time instead of
/// making them wait indefinitely for the best possible match. For example, you
/// might use an expansion to increase the maximum skill variance between players
/// after 30 seconds.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct MatchmakingRuleSet {
/// <p>A unique identifier for the matchmaking rule set</p>
pub rule_set_name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::matchmakingruleset/<ruleset name></code>. In a GameLift rule set ARN, the resource ID matches the
/// <i>RuleSetName</i> value.</p>
pub rule_set_arn: std::option::Option<std::string::String>,
/// <p>A collection of matchmaking rules, formatted as a JSON string. Comments are not
/// allowed in JSON, but most elements support a description field.</p>
pub rule_set_body: std::option::Option<std::string::String>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for MatchmakingRuleSet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("MatchmakingRuleSet");
formatter.field("rule_set_name", &self.rule_set_name);
formatter.field("rule_set_arn", &self.rule_set_arn);
formatter.field("rule_set_body", &self.rule_set_body);
formatter.field("creation_time", &self.creation_time);
formatter.finish()
}
}
/// See [`MatchmakingRuleSet`](crate::model::MatchmakingRuleSet)
pub mod matchmaking_rule_set {
/// A builder for [`MatchmakingRuleSet`](crate::model::MatchmakingRuleSet)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rule_set_name: std::option::Option<std::string::String>,
pub(crate) rule_set_arn: std::option::Option<std::string::String>,
pub(crate) rule_set_body: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A unique identifier for the matchmaking rule set</p>
pub fn rule_set_name(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_set_name = Some(input.into());
self
}
pub fn set_rule_set_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.rule_set_name = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::matchmakingruleset/<ruleset name></code>. In a GameLift rule set ARN, the resource ID matches the
/// <i>RuleSetName</i> value.</p>
pub fn rule_set_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_set_arn = Some(input.into());
self
}
pub fn set_rule_set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.rule_set_arn = input;
self
}
/// <p>A collection of matchmaking rules, formatted as a JSON string. Comments are not
/// allowed in JSON, but most elements support a description field.</p>
pub fn rule_set_body(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_set_body = Some(input.into());
self
}
pub fn set_rule_set_body(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.rule_set_body = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// Consumes the builder and constructs a [`MatchmakingRuleSet`](crate::model::MatchmakingRuleSet)
pub fn build(self) -> crate::model::MatchmakingRuleSet {
crate::model::MatchmakingRuleSet {
rule_set_name: self.rule_set_name,
rule_set_arn: self.rule_set_arn,
rule_set_body: self.rule_set_body,
creation_time: self.creation_time,
}
}
}
}
impl MatchmakingRuleSet {
/// Creates a new builder-style object to manufacture [`MatchmakingRuleSet`](crate::model::MatchmakingRuleSet)
pub fn builder() -> crate::model::matchmaking_rule_set::Builder {
crate::model::matchmaking_rule_set::Builder::default()
}
}
/// <p>Represents an EC2 instance of virtual computing resources that hosts one or more game
/// servers. In GameLift, a fleet can contain zero or more instances.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeInstances</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Instance {
/// <p>A unique identifier for the fleet that the instance is in.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>A unique identifier for the instance.</p>
pub instance_id: std::option::Option<std::string::String>,
/// <p>IP address that is assigned to the instance.</p>
pub ip_address: std::option::Option<std::string::String>,
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub dns_name: std::option::Option<std::string::String>,
/// <p>Operating system that is running on this instance. </p>
pub operating_system: std::option::Option<crate::model::OperatingSystem>,
/// <p>EC2 instance type that defines the computing resources of this instance.
/// </p>
pub r#type: std::option::Option<crate::model::Ec2InstanceType>,
/// <p>Current status of the instance. Possible statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>PENDING</b> -- The instance is in the process of
/// being created and launching server processes as defined in the fleet's run-time
/// configuration. </p>
/// </li>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The instance has been successfully
/// created and at least one server process has successfully launched and reported
/// back to GameLift that it is ready to host a game session. The instance is now
/// considered ready to host game sessions. </p>
/// </li>
/// <li>
/// <p>
/// <b>TERMINATING</b> -- The instance is in the process
/// of shutting down. This may happen to reduce capacity during a scaling down event
/// or to recycle resources in the event of a problem.</p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::InstanceStatus>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>The fleet location of the instance, expressed as an AWS Region
/// code, such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for Instance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Instance");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("instance_id", &self.instance_id);
formatter.field("ip_address", &self.ip_address);
formatter.field("dns_name", &self.dns_name);
formatter.field("operating_system", &self.operating_system);
formatter.field("r#type", &self.r#type);
formatter.field("status", &self.status);
formatter.field("creation_time", &self.creation_time);
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`Instance`](crate::model::Instance)
pub mod instance {
/// A builder for [`Instance`](crate::model::Instance)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) instance_id: std::option::Option<std::string::String>,
pub(crate) ip_address: std::option::Option<std::string::String>,
pub(crate) dns_name: std::option::Option<std::string::String>,
pub(crate) operating_system: std::option::Option<crate::model::OperatingSystem>,
pub(crate) r#type: std::option::Option<crate::model::Ec2InstanceType>,
pub(crate) status: std::option::Option<crate::model::InstanceStatus>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for the fleet that the instance is in.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>A unique identifier for the instance.</p>
pub fn instance_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_id = Some(input.into());
self
}
pub fn set_instance_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.instance_id = input;
self
}
/// <p>IP address that is assigned to the instance.</p>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ip_address = Some(input.into());
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ip_address = input;
self
}
/// <p>The DNS identifier assigned to the instance that is running the game session. Values have
/// the following format:</p>
/// <ul>
/// <li>
/// <p>TLS-enabled fleets: <code><unique identifier>.<region identifier>.amazongamelift.com</code>.</p>
/// </li>
/// <li>
/// <p>Non-TLS-enabled fleets: <code>ec2-<unique identifier>.compute.amazonaws.com</code>. (See
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses">Amazon EC2 Instance IP Addressing</a>.)</p>
/// </li>
/// </ul>
/// <p>When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.</p>
pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dns_name = Some(input.into());
self
}
pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.dns_name = input;
self
}
/// <p>Operating system that is running on this instance. </p>
pub fn operating_system(mut self, input: crate::model::OperatingSystem) -> Self {
self.operating_system = Some(input);
self
}
pub fn set_operating_system(
mut self,
input: std::option::Option<crate::model::OperatingSystem>,
) -> Self {
self.operating_system = input;
self
}
/// <p>EC2 instance type that defines the computing resources of this instance.
/// </p>
pub fn r#type(mut self, input: crate::model::Ec2InstanceType) -> Self {
self.r#type = Some(input);
self
}
pub fn set_type(
mut self,
input: std::option::Option<crate::model::Ec2InstanceType>,
) -> Self {
self.r#type = input;
self
}
/// <p>Current status of the instance. Possible statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>PENDING</b> -- The instance is in the process of
/// being created and launching server processes as defined in the fleet's run-time
/// configuration. </p>
/// </li>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The instance has been successfully
/// created and at least one server process has successfully launched and reported
/// back to GameLift that it is ready to host a game session. The instance is now
/// considered ready to host game sessions. </p>
/// </li>
/// <li>
/// <p>
/// <b>TERMINATING</b> -- The instance is in the process
/// of shutting down. This may happen to reduce capacity during a scaling down event
/// or to recycle resources in the event of a problem.</p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::InstanceStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::InstanceStatus>,
) -> Self {
self.status = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>The fleet location of the instance, expressed as an AWS Region
/// code, such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`Instance`](crate::model::Instance)
pub fn build(self) -> crate::model::Instance {
crate::model::Instance {
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
instance_id: self.instance_id,
ip_address: self.ip_address,
dns_name: self.dns_name,
operating_system: self.operating_system,
r#type: self.r#type,
status: self.status,
creation_time: self.creation_time,
location: self.location,
}
}
}
}
impl Instance {
/// Creates a new builder-style object to manufacture [`Instance`](crate::model::Instance)
pub fn builder() -> crate::model::instance::Builder {
crate::model::instance::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum InstanceStatus {
Active,
Pending,
Terminating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for InstanceStatus {
fn from(s: &str) -> Self {
match s {
"ACTIVE" => InstanceStatus::Active,
"PENDING" => InstanceStatus::Pending,
"TERMINATING" => InstanceStatus::Terminating,
other => InstanceStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for InstanceStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(InstanceStatus::from(s))
}
}
impl InstanceStatus {
pub fn as_str(&self) -> &str {
match self {
InstanceStatus::Active => "ACTIVE",
InstanceStatus::Pending => "PENDING",
InstanceStatus::Terminating => "TERMINATING",
InstanceStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ACTIVE", "PENDING", "TERMINATING"]
}
}
impl AsRef<str> for InstanceStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum Ec2InstanceType {
C32xlarge,
C34xlarge,
C38xlarge,
C3Large,
C3Xlarge,
C42xlarge,
C44xlarge,
C48xlarge,
C4Large,
C4Xlarge,
C512xlarge,
C518xlarge,
C524xlarge,
C52xlarge,
C54xlarge,
C59xlarge,
C5Large,
C5Xlarge,
C5a12xlarge,
C5a16xlarge,
C5a24xlarge,
C5a2xlarge,
C5a4xlarge,
C5a8xlarge,
C5aLarge,
C5aXlarge,
M32xlarge,
M3Large,
M3Medium,
M3Xlarge,
M410xlarge,
M42xlarge,
M44xlarge,
M4Large,
M4Xlarge,
M512xlarge,
M516xlarge,
M524xlarge,
M52xlarge,
M54xlarge,
M58xlarge,
M5Large,
M5Xlarge,
M5a12xlarge,
M5a16xlarge,
M5a24xlarge,
M5a2xlarge,
M5a4xlarge,
M5a8xlarge,
M5aLarge,
M5aXlarge,
R32xlarge,
R34xlarge,
R38xlarge,
R3Large,
R3Xlarge,
R416xlarge,
R42xlarge,
R44xlarge,
R48xlarge,
R4Large,
R4Xlarge,
R512xlarge,
R516xlarge,
R524xlarge,
R52xlarge,
R54xlarge,
R58xlarge,
R5Large,
R5Xlarge,
R5a12xlarge,
R5a16xlarge,
R5a24xlarge,
R5a2xlarge,
R5a4xlarge,
R5a8xlarge,
R5aLarge,
R5aXlarge,
T2Large,
T2Medium,
T2Micro,
T2Small,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for Ec2InstanceType {
fn from(s: &str) -> Self {
match s {
"c3.2xlarge" => Ec2InstanceType::C32xlarge,
"c3.4xlarge" => Ec2InstanceType::C34xlarge,
"c3.8xlarge" => Ec2InstanceType::C38xlarge,
"c3.large" => Ec2InstanceType::C3Large,
"c3.xlarge" => Ec2InstanceType::C3Xlarge,
"c4.2xlarge" => Ec2InstanceType::C42xlarge,
"c4.4xlarge" => Ec2InstanceType::C44xlarge,
"c4.8xlarge" => Ec2InstanceType::C48xlarge,
"c4.large" => Ec2InstanceType::C4Large,
"c4.xlarge" => Ec2InstanceType::C4Xlarge,
"c5.12xlarge" => Ec2InstanceType::C512xlarge,
"c5.18xlarge" => Ec2InstanceType::C518xlarge,
"c5.24xlarge" => Ec2InstanceType::C524xlarge,
"c5.2xlarge" => Ec2InstanceType::C52xlarge,
"c5.4xlarge" => Ec2InstanceType::C54xlarge,
"c5.9xlarge" => Ec2InstanceType::C59xlarge,
"c5.large" => Ec2InstanceType::C5Large,
"c5.xlarge" => Ec2InstanceType::C5Xlarge,
"c5a.12xlarge" => Ec2InstanceType::C5a12xlarge,
"c5a.16xlarge" => Ec2InstanceType::C5a16xlarge,
"c5a.24xlarge" => Ec2InstanceType::C5a24xlarge,
"c5a.2xlarge" => Ec2InstanceType::C5a2xlarge,
"c5a.4xlarge" => Ec2InstanceType::C5a4xlarge,
"c5a.8xlarge" => Ec2InstanceType::C5a8xlarge,
"c5a.large" => Ec2InstanceType::C5aLarge,
"c5a.xlarge" => Ec2InstanceType::C5aXlarge,
"m3.2xlarge" => Ec2InstanceType::M32xlarge,
"m3.large" => Ec2InstanceType::M3Large,
"m3.medium" => Ec2InstanceType::M3Medium,
"m3.xlarge" => Ec2InstanceType::M3Xlarge,
"m4.10xlarge" => Ec2InstanceType::M410xlarge,
"m4.2xlarge" => Ec2InstanceType::M42xlarge,
"m4.4xlarge" => Ec2InstanceType::M44xlarge,
"m4.large" => Ec2InstanceType::M4Large,
"m4.xlarge" => Ec2InstanceType::M4Xlarge,
"m5.12xlarge" => Ec2InstanceType::M512xlarge,
"m5.16xlarge" => Ec2InstanceType::M516xlarge,
"m5.24xlarge" => Ec2InstanceType::M524xlarge,
"m5.2xlarge" => Ec2InstanceType::M52xlarge,
"m5.4xlarge" => Ec2InstanceType::M54xlarge,
"m5.8xlarge" => Ec2InstanceType::M58xlarge,
"m5.large" => Ec2InstanceType::M5Large,
"m5.xlarge" => Ec2InstanceType::M5Xlarge,
"m5a.12xlarge" => Ec2InstanceType::M5a12xlarge,
"m5a.16xlarge" => Ec2InstanceType::M5a16xlarge,
"m5a.24xlarge" => Ec2InstanceType::M5a24xlarge,
"m5a.2xlarge" => Ec2InstanceType::M5a2xlarge,
"m5a.4xlarge" => Ec2InstanceType::M5a4xlarge,
"m5a.8xlarge" => Ec2InstanceType::M5a8xlarge,
"m5a.large" => Ec2InstanceType::M5aLarge,
"m5a.xlarge" => Ec2InstanceType::M5aXlarge,
"r3.2xlarge" => Ec2InstanceType::R32xlarge,
"r3.4xlarge" => Ec2InstanceType::R34xlarge,
"r3.8xlarge" => Ec2InstanceType::R38xlarge,
"r3.large" => Ec2InstanceType::R3Large,
"r3.xlarge" => Ec2InstanceType::R3Xlarge,
"r4.16xlarge" => Ec2InstanceType::R416xlarge,
"r4.2xlarge" => Ec2InstanceType::R42xlarge,
"r4.4xlarge" => Ec2InstanceType::R44xlarge,
"r4.8xlarge" => Ec2InstanceType::R48xlarge,
"r4.large" => Ec2InstanceType::R4Large,
"r4.xlarge" => Ec2InstanceType::R4Xlarge,
"r5.12xlarge" => Ec2InstanceType::R512xlarge,
"r5.16xlarge" => Ec2InstanceType::R516xlarge,
"r5.24xlarge" => Ec2InstanceType::R524xlarge,
"r5.2xlarge" => Ec2InstanceType::R52xlarge,
"r5.4xlarge" => Ec2InstanceType::R54xlarge,
"r5.8xlarge" => Ec2InstanceType::R58xlarge,
"r5.large" => Ec2InstanceType::R5Large,
"r5.xlarge" => Ec2InstanceType::R5Xlarge,
"r5a.12xlarge" => Ec2InstanceType::R5a12xlarge,
"r5a.16xlarge" => Ec2InstanceType::R5a16xlarge,
"r5a.24xlarge" => Ec2InstanceType::R5a24xlarge,
"r5a.2xlarge" => Ec2InstanceType::R5a2xlarge,
"r5a.4xlarge" => Ec2InstanceType::R5a4xlarge,
"r5a.8xlarge" => Ec2InstanceType::R5a8xlarge,
"r5a.large" => Ec2InstanceType::R5aLarge,
"r5a.xlarge" => Ec2InstanceType::R5aXlarge,
"t2.large" => Ec2InstanceType::T2Large,
"t2.medium" => Ec2InstanceType::T2Medium,
"t2.micro" => Ec2InstanceType::T2Micro,
"t2.small" => Ec2InstanceType::T2Small,
other => Ec2InstanceType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for Ec2InstanceType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(Ec2InstanceType::from(s))
}
}
impl Ec2InstanceType {
pub fn as_str(&self) -> &str {
match self {
Ec2InstanceType::C32xlarge => "c3.2xlarge",
Ec2InstanceType::C34xlarge => "c3.4xlarge",
Ec2InstanceType::C38xlarge => "c3.8xlarge",
Ec2InstanceType::C3Large => "c3.large",
Ec2InstanceType::C3Xlarge => "c3.xlarge",
Ec2InstanceType::C42xlarge => "c4.2xlarge",
Ec2InstanceType::C44xlarge => "c4.4xlarge",
Ec2InstanceType::C48xlarge => "c4.8xlarge",
Ec2InstanceType::C4Large => "c4.large",
Ec2InstanceType::C4Xlarge => "c4.xlarge",
Ec2InstanceType::C512xlarge => "c5.12xlarge",
Ec2InstanceType::C518xlarge => "c5.18xlarge",
Ec2InstanceType::C524xlarge => "c5.24xlarge",
Ec2InstanceType::C52xlarge => "c5.2xlarge",
Ec2InstanceType::C54xlarge => "c5.4xlarge",
Ec2InstanceType::C59xlarge => "c5.9xlarge",
Ec2InstanceType::C5Large => "c5.large",
Ec2InstanceType::C5Xlarge => "c5.xlarge",
Ec2InstanceType::C5a12xlarge => "c5a.12xlarge",
Ec2InstanceType::C5a16xlarge => "c5a.16xlarge",
Ec2InstanceType::C5a24xlarge => "c5a.24xlarge",
Ec2InstanceType::C5a2xlarge => "c5a.2xlarge",
Ec2InstanceType::C5a4xlarge => "c5a.4xlarge",
Ec2InstanceType::C5a8xlarge => "c5a.8xlarge",
Ec2InstanceType::C5aLarge => "c5a.large",
Ec2InstanceType::C5aXlarge => "c5a.xlarge",
Ec2InstanceType::M32xlarge => "m3.2xlarge",
Ec2InstanceType::M3Large => "m3.large",
Ec2InstanceType::M3Medium => "m3.medium",
Ec2InstanceType::M3Xlarge => "m3.xlarge",
Ec2InstanceType::M410xlarge => "m4.10xlarge",
Ec2InstanceType::M42xlarge => "m4.2xlarge",
Ec2InstanceType::M44xlarge => "m4.4xlarge",
Ec2InstanceType::M4Large => "m4.large",
Ec2InstanceType::M4Xlarge => "m4.xlarge",
Ec2InstanceType::M512xlarge => "m5.12xlarge",
Ec2InstanceType::M516xlarge => "m5.16xlarge",
Ec2InstanceType::M524xlarge => "m5.24xlarge",
Ec2InstanceType::M52xlarge => "m5.2xlarge",
Ec2InstanceType::M54xlarge => "m5.4xlarge",
Ec2InstanceType::M58xlarge => "m5.8xlarge",
Ec2InstanceType::M5Large => "m5.large",
Ec2InstanceType::M5Xlarge => "m5.xlarge",
Ec2InstanceType::M5a12xlarge => "m5a.12xlarge",
Ec2InstanceType::M5a16xlarge => "m5a.16xlarge",
Ec2InstanceType::M5a24xlarge => "m5a.24xlarge",
Ec2InstanceType::M5a2xlarge => "m5a.2xlarge",
Ec2InstanceType::M5a4xlarge => "m5a.4xlarge",
Ec2InstanceType::M5a8xlarge => "m5a.8xlarge",
Ec2InstanceType::M5aLarge => "m5a.large",
Ec2InstanceType::M5aXlarge => "m5a.xlarge",
Ec2InstanceType::R32xlarge => "r3.2xlarge",
Ec2InstanceType::R34xlarge => "r3.4xlarge",
Ec2InstanceType::R38xlarge => "r3.8xlarge",
Ec2InstanceType::R3Large => "r3.large",
Ec2InstanceType::R3Xlarge => "r3.xlarge",
Ec2InstanceType::R416xlarge => "r4.16xlarge",
Ec2InstanceType::R42xlarge => "r4.2xlarge",
Ec2InstanceType::R44xlarge => "r4.4xlarge",
Ec2InstanceType::R48xlarge => "r4.8xlarge",
Ec2InstanceType::R4Large => "r4.large",
Ec2InstanceType::R4Xlarge => "r4.xlarge",
Ec2InstanceType::R512xlarge => "r5.12xlarge",
Ec2InstanceType::R516xlarge => "r5.16xlarge",
Ec2InstanceType::R524xlarge => "r5.24xlarge",
Ec2InstanceType::R52xlarge => "r5.2xlarge",
Ec2InstanceType::R54xlarge => "r5.4xlarge",
Ec2InstanceType::R58xlarge => "r5.8xlarge",
Ec2InstanceType::R5Large => "r5.large",
Ec2InstanceType::R5Xlarge => "r5.xlarge",
Ec2InstanceType::R5a12xlarge => "r5a.12xlarge",
Ec2InstanceType::R5a16xlarge => "r5a.16xlarge",
Ec2InstanceType::R5a24xlarge => "r5a.24xlarge",
Ec2InstanceType::R5a2xlarge => "r5a.2xlarge",
Ec2InstanceType::R5a4xlarge => "r5a.4xlarge",
Ec2InstanceType::R5a8xlarge => "r5a.8xlarge",
Ec2InstanceType::R5aLarge => "r5a.large",
Ec2InstanceType::R5aXlarge => "r5a.xlarge",
Ec2InstanceType::T2Large => "t2.large",
Ec2InstanceType::T2Medium => "t2.medium",
Ec2InstanceType::T2Micro => "t2.micro",
Ec2InstanceType::T2Small => "t2.small",
Ec2InstanceType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c3.large",
"c3.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"c4.large",
"c4.xlarge",
"c5.12xlarge",
"c5.18xlarge",
"c5.24xlarge",
"c5.2xlarge",
"c5.4xlarge",
"c5.9xlarge",
"c5.large",
"c5.xlarge",
"c5a.12xlarge",
"c5a.16xlarge",
"c5a.24xlarge",
"c5a.2xlarge",
"c5a.4xlarge",
"c5a.8xlarge",
"c5a.large",
"c5a.xlarge",
"m3.2xlarge",
"m3.large",
"m3.medium",
"m3.xlarge",
"m4.10xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.large",
"m4.xlarge",
"m5.12xlarge",
"m5.16xlarge",
"m5.24xlarge",
"m5.2xlarge",
"m5.4xlarge",
"m5.8xlarge",
"m5.large",
"m5.xlarge",
"m5a.12xlarge",
"m5a.16xlarge",
"m5a.24xlarge",
"m5a.2xlarge",
"m5a.4xlarge",
"m5a.8xlarge",
"m5a.large",
"m5a.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"r3.large",
"r3.xlarge",
"r4.16xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r4.large",
"r4.xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.large",
"r5.xlarge",
"r5a.12xlarge",
"r5a.16xlarge",
"r5a.24xlarge",
"r5a.2xlarge",
"r5a.4xlarge",
"r5a.8xlarge",
"r5a.large",
"r5a.xlarge",
"t2.large",
"t2.medium",
"t2.micro",
"t2.small",
]
}
}
impl AsRef<str> for Ec2InstanceType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A game session's properties plus the protection policy currently in
/// force.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameSessionDetail {
/// <p>Object that describes a game session.</p>
pub game_session: std::option::Option<crate::model::GameSession>,
/// <p>Current status of protection for the game session.</p>
/// <ul>
/// <li>
/// <p>
/// <b>NoProtection</b> -- The game session can be
/// terminated during a scale-down event.</p>
/// </li>
/// <li>
/// <p>
/// <b>FullProtection</b> -- If the game session is in an
/// <code>ACTIVE</code> status, it cannot be terminated during a scale-down
/// event.</p>
/// </li>
/// </ul>
pub protection_policy: std::option::Option<crate::model::ProtectionPolicy>,
}
impl std::fmt::Debug for GameSessionDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameSessionDetail");
formatter.field("game_session", &self.game_session);
formatter.field("protection_policy", &self.protection_policy);
formatter.finish()
}
}
/// See [`GameSessionDetail`](crate::model::GameSessionDetail)
pub mod game_session_detail {
/// A builder for [`GameSessionDetail`](crate::model::GameSessionDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_session: std::option::Option<crate::model::GameSession>,
pub(crate) protection_policy: std::option::Option<crate::model::ProtectionPolicy>,
}
impl Builder {
/// <p>Object that describes a game session.</p>
pub fn game_session(mut self, input: crate::model::GameSession) -> Self {
self.game_session = Some(input);
self
}
pub fn set_game_session(
mut self,
input: std::option::Option<crate::model::GameSession>,
) -> Self {
self.game_session = input;
self
}
/// <p>Current status of protection for the game session.</p>
/// <ul>
/// <li>
/// <p>
/// <b>NoProtection</b> -- The game session can be
/// terminated during a scale-down event.</p>
/// </li>
/// <li>
/// <p>
/// <b>FullProtection</b> -- If the game session is in an
/// <code>ACTIVE</code> status, it cannot be terminated during a scale-down
/// event.</p>
/// </li>
/// </ul>
pub fn protection_policy(mut self, input: crate::model::ProtectionPolicy) -> Self {
self.protection_policy = Some(input);
self
}
pub fn set_protection_policy(
mut self,
input: std::option::Option<crate::model::ProtectionPolicy>,
) -> Self {
self.protection_policy = input;
self
}
/// Consumes the builder and constructs a [`GameSessionDetail`](crate::model::GameSessionDetail)
pub fn build(self) -> crate::model::GameSessionDetail {
crate::model::GameSessionDetail {
game_session: self.game_session,
protection_policy: self.protection_policy,
}
}
}
}
impl GameSessionDetail {
/// Creates a new builder-style object to manufacture [`GameSessionDetail`](crate::model::GameSessionDetail)
pub fn builder() -> crate::model::game_session_detail::Builder {
crate::model::game_session_detail::Builder::default()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p> Additional properties, including status, that describe an EC2 instance in a game
/// server group. Instance configurations are set with game server group properties (see
/// <code>DescribeGameServerGroup</code> and with the EC2 launch template that was used
/// when creating the game server group. </p>
/// <p>Retrieve game server instances for a game server group by calling
/// <code>DescribeGameServerInstances</code>. </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateGameServerGroup</a> |
/// <a>ListGameServerGroups</a> |
/// <a>DescribeGameServerGroup</a> |
/// <a>UpdateGameServerGroup</a> |
/// <a>DeleteGameServerGroup</a> |
/// <a>ResumeGameServerGroup</a> |
/// <a>SuspendGameServerGroup</a> |
/// <a>DescribeGameServerInstances</a> |
/// <a href="https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/reference-awssdk-fleetiq.html">All APIs by task</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameServerInstance {
/// <p>A developer-defined identifier for the game server group that includes the game
/// server instance. The name is unique for each Region in each AWS account.</p>
pub game_server_group_name: std::option::Option<std::string::String>,
/// <p>A generated unique identifier for the game server group that includes the game
/// server instance. </p>
pub game_server_group_arn: std::option::Option<std::string::String>,
/// <p>The unique identifier for the instance where the game server is running. This ID is
/// available in the instance metadata. EC2 instance IDs
/// use a 17-character format, for example: <code>i-1234567890abcdef0</code>.</p>
pub instance_id: std::option::Option<std::string::String>,
/// <p>
/// Current status of the game server instance.
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The instance is viable for hosting
/// game servers. </p>
/// </li>
/// <li>
/// <p>
/// <b>DRAINING</b> -- The instance is not viable for
/// hosting game servers. Existing game servers are in the process of ending, and
/// new game servers are not started on this instance unless no other resources are
/// available. When the instance is put in DRAINING, a new instance is started up to
/// replace it. Once the instance has no UTILIZED game servers, it will be terminated
/// in favor of the new instance.</p>
/// </li>
/// <li>
/// <p>
/// <b>SPOT_TERMINATING</b> -- The instance is in the
/// process of shutting down due to a Spot instance interruption. No new game
/// servers are started on this instance.</p>
/// </li>
/// </ul>
pub instance_status: std::option::Option<crate::model::GameServerInstanceStatus>,
}
impl std::fmt::Debug for GameServerInstance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameServerInstance");
formatter.field("game_server_group_name", &self.game_server_group_name);
formatter.field("game_server_group_arn", &self.game_server_group_arn);
formatter.field("instance_id", &self.instance_id);
formatter.field("instance_status", &self.instance_status);
formatter.finish()
}
}
/// See [`GameServerInstance`](crate::model::GameServerInstance)
pub mod game_server_instance {
/// A builder for [`GameServerInstance`](crate::model::GameServerInstance)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) game_server_group_name: std::option::Option<std::string::String>,
pub(crate) game_server_group_arn: std::option::Option<std::string::String>,
pub(crate) instance_id: std::option::Option<std::string::String>,
pub(crate) instance_status: std::option::Option<crate::model::GameServerInstanceStatus>,
}
impl Builder {
/// <p>A developer-defined identifier for the game server group that includes the game
/// server instance. The name is unique for each Region in each AWS account.</p>
pub fn game_server_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_group_name = Some(input.into());
self
}
pub fn set_game_server_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_group_name = input;
self
}
/// <p>A generated unique identifier for the game server group that includes the game
/// server instance. </p>
pub fn game_server_group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.game_server_group_arn = Some(input.into());
self
}
pub fn set_game_server_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.game_server_group_arn = input;
self
}
/// <p>The unique identifier for the instance where the game server is running. This ID is
/// available in the instance metadata. EC2 instance IDs
/// use a 17-character format, for example: <code>i-1234567890abcdef0</code>.</p>
pub fn instance_id(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_id = Some(input.into());
self
}
pub fn set_instance_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.instance_id = input;
self
}
/// <p>
/// Current status of the game server instance.
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- The instance is viable for hosting
/// game servers. </p>
/// </li>
/// <li>
/// <p>
/// <b>DRAINING</b> -- The instance is not viable for
/// hosting game servers. Existing game servers are in the process of ending, and
/// new game servers are not started on this instance unless no other resources are
/// available. When the instance is put in DRAINING, a new instance is started up to
/// replace it. Once the instance has no UTILIZED game servers, it will be terminated
/// in favor of the new instance.</p>
/// </li>
/// <li>
/// <p>
/// <b>SPOT_TERMINATING</b> -- The instance is in the
/// process of shutting down due to a Spot instance interruption. No new game
/// servers are started on this instance.</p>
/// </li>
/// </ul>
pub fn instance_status(mut self, input: crate::model::GameServerInstanceStatus) -> Self {
self.instance_status = Some(input);
self
}
pub fn set_instance_status(
mut self,
input: std::option::Option<crate::model::GameServerInstanceStatus>,
) -> Self {
self.instance_status = input;
self
}
/// Consumes the builder and constructs a [`GameServerInstance`](crate::model::GameServerInstance)
pub fn build(self) -> crate::model::GameServerInstance {
crate::model::GameServerInstance {
game_server_group_name: self.game_server_group_name,
game_server_group_arn: self.game_server_group_arn,
instance_id: self.instance_id,
instance_status: self.instance_status,
}
}
}
}
impl GameServerInstance {
/// Creates a new builder-style object to manufacture [`GameServerInstance`](crate::model::GameServerInstance)
pub fn builder() -> crate::model::game_server_instance::Builder {
crate::model::game_server_instance::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerInstanceStatus {
Active,
Draining,
SpotTerminating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerInstanceStatus {
fn from(s: &str) -> Self {
match s {
"ACTIVE" => GameServerInstanceStatus::Active,
"DRAINING" => GameServerInstanceStatus::Draining,
"SPOT_TERMINATING" => GameServerInstanceStatus::SpotTerminating,
other => GameServerInstanceStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerInstanceStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerInstanceStatus::from(s))
}
}
impl GameServerInstanceStatus {
pub fn as_str(&self) -> &str {
match self {
GameServerInstanceStatus::Active => "ACTIVE",
GameServerInstanceStatus::Draining => "DRAINING",
GameServerInstanceStatus::SpotTerminating => "SPOT_TERMINATING",
GameServerInstanceStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ACTIVE", "DRAINING", "SPOT_TERMINATING"]
}
}
impl AsRef<str> for GameServerInstanceStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Current resource utilization statistics in a specified fleet or location. The location
/// value might refer to a fleet's remote location or its home Region.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetUtilization</a> | <a>DescribeFleetLocationUtilization</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FleetUtilization {
/// <p>A unique identifier for the fleet associated with the location.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>The number of server processes in <code>ACTIVE</code> status that are currently
/// running across all instances in the fleet location. </p>
pub active_server_process_count: std::option::Option<i32>,
/// <p>The number of active game sessions that are currently being hosted across all
/// instances in the fleet location.</p>
pub active_game_session_count: std::option::Option<i32>,
/// <p>The number of active player sessions that are currently being hosted across all
/// instances in the fleet location.</p>
pub current_player_session_count: std::option::Option<i32>,
/// <p>The maximum number of players allowed across all game sessions that are currently
/// being hosted across all instances in the fleet location.</p>
pub maximum_player_session_count: std::option::Option<i32>,
/// <p>The fleet location for the fleet utilization information, expressed as an AWS Region
/// code, such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for FleetUtilization {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FleetUtilization");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field(
"active_server_process_count",
&self.active_server_process_count,
);
formatter.field("active_game_session_count", &self.active_game_session_count);
formatter.field(
"current_player_session_count",
&self.current_player_session_count,
);
formatter.field(
"maximum_player_session_count",
&self.maximum_player_session_count,
);
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`FleetUtilization`](crate::model::FleetUtilization)
pub mod fleet_utilization {
/// A builder for [`FleetUtilization`](crate::model::FleetUtilization)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) active_server_process_count: std::option::Option<i32>,
pub(crate) active_game_session_count: std::option::Option<i32>,
pub(crate) current_player_session_count: std::option::Option<i32>,
pub(crate) maximum_player_session_count: std::option::Option<i32>,
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for the fleet associated with the location.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>The number of server processes in <code>ACTIVE</code> status that are currently
/// running across all instances in the fleet location. </p>
pub fn active_server_process_count(mut self, input: i32) -> Self {
self.active_server_process_count = Some(input);
self
}
pub fn set_active_server_process_count(mut self, input: std::option::Option<i32>) -> Self {
self.active_server_process_count = input;
self
}
/// <p>The number of active game sessions that are currently being hosted across all
/// instances in the fleet location.</p>
pub fn active_game_session_count(mut self, input: i32) -> Self {
self.active_game_session_count = Some(input);
self
}
pub fn set_active_game_session_count(mut self, input: std::option::Option<i32>) -> Self {
self.active_game_session_count = input;
self
}
/// <p>The number of active player sessions that are currently being hosted across all
/// instances in the fleet location.</p>
pub fn current_player_session_count(mut self, input: i32) -> Self {
self.current_player_session_count = Some(input);
self
}
pub fn set_current_player_session_count(mut self, input: std::option::Option<i32>) -> Self {
self.current_player_session_count = input;
self
}
/// <p>The maximum number of players allowed across all game sessions that are currently
/// being hosted across all instances in the fleet location.</p>
pub fn maximum_player_session_count(mut self, input: i32) -> Self {
self.maximum_player_session_count = Some(input);
self
}
pub fn set_maximum_player_session_count(mut self, input: std::option::Option<i32>) -> Self {
self.maximum_player_session_count = input;
self
}
/// <p>The fleet location for the fleet utilization information, expressed as an AWS Region
/// code, such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`FleetUtilization`](crate::model::FleetUtilization)
pub fn build(self) -> crate::model::FleetUtilization {
crate::model::FleetUtilization {
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
active_server_process_count: self.active_server_process_count,
active_game_session_count: self.active_game_session_count,
current_player_session_count: self.current_player_session_count,
maximum_player_session_count: self.maximum_player_session_count,
location: self.location,
}
}
}
}
impl FleetUtilization {
/// Creates a new builder-style object to manufacture [`FleetUtilization`](crate::model::FleetUtilization)
pub fn builder() -> crate::model::fleet_utilization::Builder {
crate::model::fleet_utilization::Builder::default()
}
}
/// <p>Current resource capacity settings in a specified fleet or location. The location
/// value might refer to a fleet's remote location or its home Region. </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetCapacity</a> |
/// <a>DescribeFleetLocationCapacity</a> |
/// <a>UpdateFleetCapacity</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FleetCapacity {
/// <p>A unique identifier for the fleet associated with the location.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>The EC2 instance type that is used for all instances in a fleet. The instance type
/// determines the computing resources in use, including CPU, memory, storage, and
/// networking capacity. See <a href="http://aws.amazon.com/ec2/instance-types/">Amazon EC2
/// Instance Types</a> for detailed descriptions.</p>
pub instance_type: std::option::Option<crate::model::Ec2InstanceType>,
/// <p>The current instance count and capacity settings for the fleet location. </p>
pub instance_counts: std::option::Option<crate::model::Ec2InstanceCounts>,
/// <p>The fleet location for the instance count information, expressed as an AWS Region
/// code, such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for FleetCapacity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FleetCapacity");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("instance_type", &self.instance_type);
formatter.field("instance_counts", &self.instance_counts);
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`FleetCapacity`](crate::model::FleetCapacity)
pub mod fleet_capacity {
/// A builder for [`FleetCapacity`](crate::model::FleetCapacity)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) instance_type: std::option::Option<crate::model::Ec2InstanceType>,
pub(crate) instance_counts: std::option::Option<crate::model::Ec2InstanceCounts>,
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for the fleet associated with the location.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>.</p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>The EC2 instance type that is used for all instances in a fleet. The instance type
/// determines the computing resources in use, including CPU, memory, storage, and
/// networking capacity. See <a href="http://aws.amazon.com/ec2/instance-types/">Amazon EC2
/// Instance Types</a> for detailed descriptions.</p>
pub fn instance_type(mut self, input: crate::model::Ec2InstanceType) -> Self {
self.instance_type = Some(input);
self
}
pub fn set_instance_type(
mut self,
input: std::option::Option<crate::model::Ec2InstanceType>,
) -> Self {
self.instance_type = input;
self
}
/// <p>The current instance count and capacity settings for the fleet location. </p>
pub fn instance_counts(mut self, input: crate::model::Ec2InstanceCounts) -> Self {
self.instance_counts = Some(input);
self
}
pub fn set_instance_counts(
mut self,
input: std::option::Option<crate::model::Ec2InstanceCounts>,
) -> Self {
self.instance_counts = input;
self
}
/// <p>The fleet location for the instance count information, expressed as an AWS Region
/// code, such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`FleetCapacity`](crate::model::FleetCapacity)
pub fn build(self) -> crate::model::FleetCapacity {
crate::model::FleetCapacity {
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
instance_type: self.instance_type,
instance_counts: self.instance_counts,
location: self.location,
}
}
}
}
impl FleetCapacity {
/// Creates a new builder-style object to manufacture [`FleetCapacity`](crate::model::FleetCapacity)
pub fn builder() -> crate::model::fleet_capacity::Builder {
crate::model::fleet_capacity::Builder::default()
}
}
/// <p>Resource capacity settings. Fleet capacity is measured in EC2 instances. Pending and
/// terminating counts are non-zero when the fleet capacity is adjusting to a scaling event
/// or if access to resources is temporarily affected.</p>
/// <p>EC2 instance counts are part of <a>FleetCapacity</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Ec2InstanceCounts {
/// <p>Ideal number of active instances. GameLift will always try to maintain the desired
/// number of instances. Capacity is scaled up or down by changing the desired instances. </p>
pub desired: std::option::Option<i32>,
/// <p>The minimum instance count value allowed.</p>
pub minimum: std::option::Option<i32>,
/// <p>The maximum instance count value allowed.</p>
pub maximum: std::option::Option<i32>,
/// <p>Number of instances that are starting but not yet active.</p>
pub pending: std::option::Option<i32>,
/// <p>Actual number of instances that are ready to host game sessions.</p>
pub active: std::option::Option<i32>,
/// <p>Number of active instances that are not currently hosting a game session.</p>
pub idle: std::option::Option<i32>,
/// <p>Number of instances that are no longer active but haven't yet been terminated.</p>
pub terminating: std::option::Option<i32>,
}
impl std::fmt::Debug for Ec2InstanceCounts {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Ec2InstanceCounts");
formatter.field("desired", &self.desired);
formatter.field("minimum", &self.minimum);
formatter.field("maximum", &self.maximum);
formatter.field("pending", &self.pending);
formatter.field("active", &self.active);
formatter.field("idle", &self.idle);
formatter.field("terminating", &self.terminating);
formatter.finish()
}
}
/// See [`Ec2InstanceCounts`](crate::model::Ec2InstanceCounts)
pub mod ec2_instance_counts {
/// A builder for [`Ec2InstanceCounts`](crate::model::Ec2InstanceCounts)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) desired: std::option::Option<i32>,
pub(crate) minimum: std::option::Option<i32>,
pub(crate) maximum: std::option::Option<i32>,
pub(crate) pending: std::option::Option<i32>,
pub(crate) active: std::option::Option<i32>,
pub(crate) idle: std::option::Option<i32>,
pub(crate) terminating: std::option::Option<i32>,
}
impl Builder {
/// <p>Ideal number of active instances. GameLift will always try to maintain the desired
/// number of instances. Capacity is scaled up or down by changing the desired instances. </p>
pub fn desired(mut self, input: i32) -> Self {
self.desired = Some(input);
self
}
pub fn set_desired(mut self, input: std::option::Option<i32>) -> Self {
self.desired = input;
self
}
/// <p>The minimum instance count value allowed.</p>
pub fn minimum(mut self, input: i32) -> Self {
self.minimum = Some(input);
self
}
pub fn set_minimum(mut self, input: std::option::Option<i32>) -> Self {
self.minimum = input;
self
}
/// <p>The maximum instance count value allowed.</p>
pub fn maximum(mut self, input: i32) -> Self {
self.maximum = Some(input);
self
}
pub fn set_maximum(mut self, input: std::option::Option<i32>) -> Self {
self.maximum = input;
self
}
/// <p>Number of instances that are starting but not yet active.</p>
pub fn pending(mut self, input: i32) -> Self {
self.pending = Some(input);
self
}
pub fn set_pending(mut self, input: std::option::Option<i32>) -> Self {
self.pending = input;
self
}
/// <p>Actual number of instances that are ready to host game sessions.</p>
pub fn active(mut self, input: i32) -> Self {
self.active = Some(input);
self
}
pub fn set_active(mut self, input: std::option::Option<i32>) -> Self {
self.active = input;
self
}
/// <p>Number of active instances that are not currently hosting a game session.</p>
pub fn idle(mut self, input: i32) -> Self {
self.idle = Some(input);
self
}
pub fn set_idle(mut self, input: std::option::Option<i32>) -> Self {
self.idle = input;
self
}
/// <p>Number of instances that are no longer active but haven't yet been terminated.</p>
pub fn terminating(mut self, input: i32) -> Self {
self.terminating = Some(input);
self
}
pub fn set_terminating(mut self, input: std::option::Option<i32>) -> Self {
self.terminating = input;
self
}
/// Consumes the builder and constructs a [`Ec2InstanceCounts`](crate::model::Ec2InstanceCounts)
pub fn build(self) -> crate::model::Ec2InstanceCounts {
crate::model::Ec2InstanceCounts {
desired: self.desired,
minimum: self.minimum,
maximum: self.maximum,
pending: self.pending,
active: self.active,
idle: self.idle,
terminating: self.terminating,
}
}
}
}
impl Ec2InstanceCounts {
/// Creates a new builder-style object to manufacture [`Ec2InstanceCounts`](crate::model::Ec2InstanceCounts)
pub fn builder() -> crate::model::ec2_instance_counts::Builder {
crate::model::ec2_instance_counts::Builder::default()
}
}
/// <p>Represents a location in a multi-location fleet.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetLocationAttributes</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LocationAttributes {
/// <p>A fleet location and its current life-cycle state.</p>
pub location_state: std::option::Option<crate::model::LocationState>,
/// <p>A list of fleet actions that have been suspended in the fleet location.</p>
pub stopped_actions: std::option::Option<std::vec::Vec<crate::model::FleetAction>>,
/// <p>The status of fleet activity updates to the location. The status
/// <code>PENDING_UPDATE</code> indicates that <a>StopFleetActions</a> or <a>StartFleetActions</a>
/// has been requested but the update has not yet been completed for the location.</p>
pub update_status: std::option::Option<crate::model::LocationUpdateStatus>,
}
impl std::fmt::Debug for LocationAttributes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LocationAttributes");
formatter.field("location_state", &self.location_state);
formatter.field("stopped_actions", &self.stopped_actions);
formatter.field("update_status", &self.update_status);
formatter.finish()
}
}
/// See [`LocationAttributes`](crate::model::LocationAttributes)
pub mod location_attributes {
/// A builder for [`LocationAttributes`](crate::model::LocationAttributes)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) location_state: std::option::Option<crate::model::LocationState>,
pub(crate) stopped_actions: std::option::Option<std::vec::Vec<crate::model::FleetAction>>,
pub(crate) update_status: std::option::Option<crate::model::LocationUpdateStatus>,
}
impl Builder {
/// <p>A fleet location and its current life-cycle state.</p>
pub fn location_state(mut self, input: crate::model::LocationState) -> Self {
self.location_state = Some(input);
self
}
pub fn set_location_state(
mut self,
input: std::option::Option<crate::model::LocationState>,
) -> Self {
self.location_state = input;
self
}
pub fn stopped_actions(mut self, input: impl Into<crate::model::FleetAction>) -> Self {
let mut v = self.stopped_actions.unwrap_or_default();
v.push(input.into());
self.stopped_actions = Some(v);
self
}
pub fn set_stopped_actions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::FleetAction>>,
) -> Self {
self.stopped_actions = input;
self
}
/// <p>The status of fleet activity updates to the location. The status
/// <code>PENDING_UPDATE</code> indicates that <a>StopFleetActions</a> or <a>StartFleetActions</a>
/// has been requested but the update has not yet been completed for the location.</p>
pub fn update_status(mut self, input: crate::model::LocationUpdateStatus) -> Self {
self.update_status = Some(input);
self
}
pub fn set_update_status(
mut self,
input: std::option::Option<crate::model::LocationUpdateStatus>,
) -> Self {
self.update_status = input;
self
}
/// Consumes the builder and constructs a [`LocationAttributes`](crate::model::LocationAttributes)
pub fn build(self) -> crate::model::LocationAttributes {
crate::model::LocationAttributes {
location_state: self.location_state,
stopped_actions: self.stopped_actions,
update_status: self.update_status,
}
}
}
}
impl LocationAttributes {
/// Creates a new builder-style object to manufacture [`LocationAttributes`](crate::model::LocationAttributes)
pub fn builder() -> crate::model::location_attributes::Builder {
crate::model::location_attributes::Builder::default()
}
}
/// <p>A fleet location and its life-cycle state. A location state object might
/// be used to describe a fleet's remote location or home Region.
/// Life-cycle state tracks the progress of launching the first instance in a new
/// location and preparing it for game hosting, and then removing all instances and
/// deleting the location from the fleet.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateFleet</a> |
/// <a>CreateFleetLocations</a> |
/// <a>DeleteFleetLocations</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LocationState {
/// <p>The fleet location, expressed as an AWS Region code such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
/// <p>The life-cycle status of a fleet location. </p>
pub status: std::option::Option<crate::model::FleetStatus>,
}
impl std::fmt::Debug for LocationState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LocationState");
formatter.field("location", &self.location);
formatter.field("status", &self.status);
formatter.finish()
}
}
/// See [`LocationState`](crate::model::LocationState)
pub mod location_state {
/// A builder for [`LocationState`](crate::model::LocationState)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) location: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::FleetStatus>,
}
impl Builder {
/// <p>The fleet location, expressed as an AWS Region code such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// <p>The life-cycle status of a fleet location. </p>
pub fn status(mut self, input: crate::model::FleetStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(mut self, input: std::option::Option<crate::model::FleetStatus>) -> Self {
self.status = input;
self
}
/// Consumes the builder and constructs a [`LocationState`](crate::model::LocationState)
pub fn build(self) -> crate::model::LocationState {
crate::model::LocationState {
location: self.location,
status: self.status,
}
}
}
}
impl LocationState {
/// Creates a new builder-style object to manufacture [`LocationState`](crate::model::LocationState)
pub fn builder() -> crate::model::location_state::Builder {
crate::model::location_state::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum FleetStatus {
Activating,
Active,
Building,
Deleting,
Downloading,
Error,
New,
Terminated,
Validating,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for FleetStatus {
fn from(s: &str) -> Self {
match s {
"ACTIVATING" => FleetStatus::Activating,
"ACTIVE" => FleetStatus::Active,
"BUILDING" => FleetStatus::Building,
"DELETING" => FleetStatus::Deleting,
"DOWNLOADING" => FleetStatus::Downloading,
"ERROR" => FleetStatus::Error,
"NEW" => FleetStatus::New,
"TERMINATED" => FleetStatus::Terminated,
"VALIDATING" => FleetStatus::Validating,
other => FleetStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for FleetStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(FleetStatus::from(s))
}
}
impl FleetStatus {
pub fn as_str(&self) -> &str {
match self {
FleetStatus::Activating => "ACTIVATING",
FleetStatus::Active => "ACTIVE",
FleetStatus::Building => "BUILDING",
FleetStatus::Deleting => "DELETING",
FleetStatus::Downloading => "DOWNLOADING",
FleetStatus::Error => "ERROR",
FleetStatus::New => "NEW",
FleetStatus::Terminated => "TERMINATED",
FleetStatus::Validating => "VALIDATING",
FleetStatus::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"ACTIVATING",
"ACTIVE",
"BUILDING",
"DELETING",
"DOWNLOADING",
"ERROR",
"NEW",
"TERMINATED",
"VALIDATING",
]
}
}
impl AsRef<str> for FleetStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Log entry describing an event that involves GameLift resources (such as a fleet). In
/// addition to tracking activity, event codes and messages can provide additional
/// information for troubleshooting and debugging problems.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeFleetEvents</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Event {
/// <p>A unique identifier for a fleet event.</p>
pub event_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for an event resource, such as a fleet ID.</p>
pub resource_id: std::option::Option<std::string::String>,
/// <p>The type of event being logged. </p>
/// <p>
/// <b>Fleet creation events (ordered by fleet creation activity):</b>
/// </p>
/// <ul>
/// <li>
/// <p>FLEET_CREATED -- A fleet resource was successfully created with a status of
/// <code>NEW</code>. Event messaging includes the fleet ID.</p>
/// </li>
/// <li>
/// <p>FLEET_STATE_DOWNLOADING -- Fleet status changed from <code>NEW</code> to
/// <code>DOWNLOADING</code>. The compressed build has started downloading to a
/// fleet instance for installation.</p>
/// </li>
/// <li>
/// <p> FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet
/// instance.</p>
/// </li>
/// <li>
/// <p>FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully
/// downloaded to an instance, and the build files are now being extracted from the
/// uploaded build and saved to an instance. Failure at this stage prevents a fleet
/// from moving to <code>ACTIVE</code> status. Logs for this stage display a list of
/// the files that are extracted and saved on the instance. Access the logs by using
/// the URL in <i>PreSignedLogUrl</i>.</p>
/// </li>
/// <li>
/// <p>FLEET_CREATION_RUNNING_INSTALLER – The game server build files were
/// successfully extracted, and the GameLift is now running the build's install
/// script (if one is included). Failure in this stage prevents a fleet from moving
/// to <code>ACTIVE</code> status. Logs for this stage list the installation steps
/// and whether or not the install completed successfully. Access the logs by using
/// the URL in <i>PreSignedLogUrl</i>. </p>
/// </li>
/// <li>
/// <p>FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful,
/// and the GameLift is now verifying that the game server launch paths, which are
/// specified in the fleet's runtime configuration, exist. If any listed launch path
/// exists, GameLift tries to launch a game server process and waits for the process
/// to report ready. Failures in this stage prevent a fleet from moving to
/// <code>ACTIVE</code> status. Logs for this stage list the launch paths in the
/// runtime configuration and indicate whether each is found. Access the logs by
/// using the URL in <i>PreSignedLogUrl</i>.
/// </p>
/// </li>
/// <li>
/// <p>FLEET_STATE_VALIDATING -- Fleet status changed from
/// <code>DOWNLOADING</code> to <code>VALIDATING</code>.</p>
/// </li>
/// <li>
/// <p> FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime
/// configuration failed because the executable specified in a launch path does not
/// exist on the instance.</p>
/// </li>
/// <li>
/// <p>FLEET_STATE_BUILDING -- Fleet status changed from <code>VALIDATING</code>
/// to <code>BUILDING</code>.</p>
/// </li>
/// <li>
/// <p>FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime
/// configuration failed because the executable specified in a launch path failed to
/// run on the fleet instance.</p>
/// </li>
/// <li>
/// <p>FLEET_STATE_ACTIVATING -- Fleet status changed from <code>BUILDING</code>
/// to <code>ACTIVATING</code>. </p>
/// </li>
/// <li>
/// <p> FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of
/// the steps in the fleet activation process. This event code indicates that the
/// game build was successfully downloaded to a fleet instance, built, and
/// validated, but was not able to start a server process. Learn more at
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation"> Debug Fleet
/// Creation Issues</a>
/// </p>
/// </li>
/// <li>
/// <p>FLEET_STATE_ACTIVE -- The fleet's status changed from
/// <code>ACTIVATING</code> to <code>ACTIVE</code>. The fleet is now ready to
/// host game sessions.</p>
/// </li>
/// </ul>
/// <p>
/// <b>VPC peering events:</b>
/// </p>
/// <ul>
/// <li>
/// <p>FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been
/// established between the VPC for an GameLift fleet and a VPC in your AWS
/// account.</p>
/// </li>
/// <li>
/// <p>FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed.
/// Event details and status information (see <a>DescribeVpcPeeringConnections</a>) provide additional detail. A
/// common reason for peering failure is that the two VPCs have overlapping CIDR
/// blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in
/// your AWS account. For more information on VPC peering failures, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html">https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html</a>
/// </p>
/// </li>
/// <li>
/// <p>FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully
/// deleted.</p>
/// </li>
/// </ul>
/// <p>
/// <b>Spot instance events:</b>
/// </p>
/// <ul>
/// <li>
/// <p> INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a
/// two-minute notification.</p>
/// </li>
/// </ul>
/// <p>
/// <b>Other fleet events:</b>
/// </p>
/// <ul>
/// <li>
/// <p>FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings
/// (desired instances, minimum/maximum scaling limits). Event messaging includes
/// the new capacity settings.</p>
/// </li>
/// <li>
/// <p>FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to
/// the fleet's game session protection policy setting. Event messaging includes
/// both the old and new policy setting. </p>
/// </li>
/// <li>
/// <p>FLEET_DELETED -- A request to delete a fleet was initiated.</p>
/// </li>
/// <li>
/// <p> GENERIC_EVENT -- An unspecified event has occurred.</p>
/// </li>
/// </ul>
pub event_code: std::option::Option<crate::model::EventCode>,
/// <p>Additional information related to the event.</p>
pub message: std::option::Option<std::string::String>,
/// <p>Time stamp indicating when this event occurred. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub event_time: std::option::Option<smithy_types::Instant>,
/// <p>Location of stored logs with additional detail that is related to the event. This
/// is useful for debugging issues. The URL is valid for 15 minutes. You can also access
/// fleet creation logs through the GameLift console.</p>
pub pre_signed_log_url: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for Event {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Event");
formatter.field("event_id", &self.event_id);
formatter.field("resource_id", &self.resource_id);
formatter.field("event_code", &self.event_code);
formatter.field("message", &self.message);
formatter.field("event_time", &self.event_time);
formatter.field("pre_signed_log_url", &self.pre_signed_log_url);
formatter.finish()
}
}
/// See [`Event`](crate::model::Event)
pub mod event {
/// A builder for [`Event`](crate::model::Event)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) event_id: std::option::Option<std::string::String>,
pub(crate) resource_id: std::option::Option<std::string::String>,
pub(crate) event_code: std::option::Option<crate::model::EventCode>,
pub(crate) message: std::option::Option<std::string::String>,
pub(crate) event_time: std::option::Option<smithy_types::Instant>,
pub(crate) pre_signed_log_url: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for a fleet event.</p>
pub fn event_id(mut self, input: impl Into<std::string::String>) -> Self {
self.event_id = Some(input.into());
self
}
pub fn set_event_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.event_id = input;
self
}
/// <p>A unique identifier for an event resource, such as a fleet ID.</p>
pub fn resource_id(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_id = Some(input.into());
self
}
pub fn set_resource_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_id = input;
self
}
/// <p>The type of event being logged. </p>
/// <p>
/// <b>Fleet creation events (ordered by fleet creation activity):</b>
/// </p>
/// <ul>
/// <li>
/// <p>FLEET_CREATED -- A fleet resource was successfully created with a status of
/// <code>NEW</code>. Event messaging includes the fleet ID.</p>
/// </li>
/// <li>
/// <p>FLEET_STATE_DOWNLOADING -- Fleet status changed from <code>NEW</code> to
/// <code>DOWNLOADING</code>. The compressed build has started downloading to a
/// fleet instance for installation.</p>
/// </li>
/// <li>
/// <p> FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet
/// instance.</p>
/// </li>
/// <li>
/// <p>FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully
/// downloaded to an instance, and the build files are now being extracted from the
/// uploaded build and saved to an instance. Failure at this stage prevents a fleet
/// from moving to <code>ACTIVE</code> status. Logs for this stage display a list of
/// the files that are extracted and saved on the instance. Access the logs by using
/// the URL in <i>PreSignedLogUrl</i>.</p>
/// </li>
/// <li>
/// <p>FLEET_CREATION_RUNNING_INSTALLER – The game server build files were
/// successfully extracted, and the GameLift is now running the build's install
/// script (if one is included). Failure in this stage prevents a fleet from moving
/// to <code>ACTIVE</code> status. Logs for this stage list the installation steps
/// and whether or not the install completed successfully. Access the logs by using
/// the URL in <i>PreSignedLogUrl</i>. </p>
/// </li>
/// <li>
/// <p>FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful,
/// and the GameLift is now verifying that the game server launch paths, which are
/// specified in the fleet's runtime configuration, exist. If any listed launch path
/// exists, GameLift tries to launch a game server process and waits for the process
/// to report ready. Failures in this stage prevent a fleet from moving to
/// <code>ACTIVE</code> status. Logs for this stage list the launch paths in the
/// runtime configuration and indicate whether each is found. Access the logs by
/// using the URL in <i>PreSignedLogUrl</i>.
/// </p>
/// </li>
/// <li>
/// <p>FLEET_STATE_VALIDATING -- Fleet status changed from
/// <code>DOWNLOADING</code> to <code>VALIDATING</code>.</p>
/// </li>
/// <li>
/// <p> FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime
/// configuration failed because the executable specified in a launch path does not
/// exist on the instance.</p>
/// </li>
/// <li>
/// <p>FLEET_STATE_BUILDING -- Fleet status changed from <code>VALIDATING</code>
/// to <code>BUILDING</code>.</p>
/// </li>
/// <li>
/// <p>FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime
/// configuration failed because the executable specified in a launch path failed to
/// run on the fleet instance.</p>
/// </li>
/// <li>
/// <p>FLEET_STATE_ACTIVATING -- Fleet status changed from <code>BUILDING</code>
/// to <code>ACTIVATING</code>. </p>
/// </li>
/// <li>
/// <p> FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of
/// the steps in the fleet activation process. This event code indicates that the
/// game build was successfully downloaded to a fleet instance, built, and
/// validated, but was not able to start a server process. Learn more at
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation"> Debug Fleet
/// Creation Issues</a>
/// </p>
/// </li>
/// <li>
/// <p>FLEET_STATE_ACTIVE -- The fleet's status changed from
/// <code>ACTIVATING</code> to <code>ACTIVE</code>. The fleet is now ready to
/// host game sessions.</p>
/// </li>
/// </ul>
/// <p>
/// <b>VPC peering events:</b>
/// </p>
/// <ul>
/// <li>
/// <p>FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been
/// established between the VPC for an GameLift fleet and a VPC in your AWS
/// account.</p>
/// </li>
/// <li>
/// <p>FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed.
/// Event details and status information (see <a>DescribeVpcPeeringConnections</a>) provide additional detail. A
/// common reason for peering failure is that the two VPCs have overlapping CIDR
/// blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in
/// your AWS account. For more information on VPC peering failures, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html">https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html</a>
/// </p>
/// </li>
/// <li>
/// <p>FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully
/// deleted.</p>
/// </li>
/// </ul>
/// <p>
/// <b>Spot instance events:</b>
/// </p>
/// <ul>
/// <li>
/// <p> INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a
/// two-minute notification.</p>
/// </li>
/// </ul>
/// <p>
/// <b>Other fleet events:</b>
/// </p>
/// <ul>
/// <li>
/// <p>FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings
/// (desired instances, minimum/maximum scaling limits). Event messaging includes
/// the new capacity settings.</p>
/// </li>
/// <li>
/// <p>FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to
/// the fleet's game session protection policy setting. Event messaging includes
/// both the old and new policy setting. </p>
/// </li>
/// <li>
/// <p>FLEET_DELETED -- A request to delete a fleet was initiated.</p>
/// </li>
/// <li>
/// <p> GENERIC_EVENT -- An unspecified event has occurred.</p>
/// </li>
/// </ul>
pub fn event_code(mut self, input: crate::model::EventCode) -> Self {
self.event_code = Some(input);
self
}
pub fn set_event_code(
mut self,
input: std::option::Option<crate::model::EventCode>,
) -> Self {
self.event_code = input;
self
}
/// <p>Additional information related to the event.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// <p>Time stamp indicating when this event occurred. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn event_time(mut self, input: smithy_types::Instant) -> Self {
self.event_time = Some(input);
self
}
pub fn set_event_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.event_time = input;
self
}
/// <p>Location of stored logs with additional detail that is related to the event. This
/// is useful for debugging issues. The URL is valid for 15 minutes. You can also access
/// fleet creation logs through the GameLift console.</p>
pub fn pre_signed_log_url(mut self, input: impl Into<std::string::String>) -> Self {
self.pre_signed_log_url = Some(input.into());
self
}
pub fn set_pre_signed_log_url(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.pre_signed_log_url = input;
self
}
/// Consumes the builder and constructs a [`Event`](crate::model::Event)
pub fn build(self) -> crate::model::Event {
crate::model::Event {
event_id: self.event_id,
resource_id: self.resource_id,
event_code: self.event_code,
message: self.message,
event_time: self.event_time,
pre_signed_log_url: self.pre_signed_log_url,
}
}
}
}
impl Event {
/// Creates a new builder-style object to manufacture [`Event`](crate::model::Event)
pub fn builder() -> crate::model::event::Builder {
crate::model::event::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum EventCode {
FleetActivationFailed,
FleetActivationFailedNoInstances,
FleetBinaryDownloadFailed,
FleetCreated,
FleetCreationExtractingBuild,
FleetCreationRunningInstaller,
FleetCreationValidatingRuntimeConfig,
FleetDeleted,
FleetInitializationFailed,
FleetNewGameSessionProtectionPolicyUpdated,
FleetScalingEvent,
FleetStateActivating,
FleetStateActive,
FleetStateBuilding,
FleetStateDownloading,
FleetStateError,
FleetStateValidating,
FleetValidationExecutableRuntimeFailure,
FleetValidationLaunchPathNotFound,
FleetValidationTimedOut,
FleetVpcPeeringDeleted,
FleetVpcPeeringFailed,
FleetVpcPeeringSucceeded,
GameSessionActivationTimeout,
GenericEvent,
InstanceInterrupted,
ServerProcessCrashed,
ServerProcessForceTerminated,
ServerProcessInvalidPath,
ServerProcessProcessExitTimeout,
ServerProcessProcessReadyTimeout,
ServerProcessSdkInitializationTimeout,
ServerProcessTerminatedUnhealthy,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for EventCode {
fn from(s: &str) -> Self {
match s {
"FLEET_ACTIVATION_FAILED" => EventCode::FleetActivationFailed,
"FLEET_ACTIVATION_FAILED_NO_INSTANCES" => EventCode::FleetActivationFailedNoInstances,
"FLEET_BINARY_DOWNLOAD_FAILED" => EventCode::FleetBinaryDownloadFailed,
"FLEET_CREATED" => EventCode::FleetCreated,
"FLEET_CREATION_EXTRACTING_BUILD" => EventCode::FleetCreationExtractingBuild,
"FLEET_CREATION_RUNNING_INSTALLER" => EventCode::FleetCreationRunningInstaller,
"FLEET_CREATION_VALIDATING_RUNTIME_CONFIG" => {
EventCode::FleetCreationValidatingRuntimeConfig
}
"FLEET_DELETED" => EventCode::FleetDeleted,
"FLEET_INITIALIZATION_FAILED" => EventCode::FleetInitializationFailed,
"FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" => {
EventCode::FleetNewGameSessionProtectionPolicyUpdated
}
"FLEET_SCALING_EVENT" => EventCode::FleetScalingEvent,
"FLEET_STATE_ACTIVATING" => EventCode::FleetStateActivating,
"FLEET_STATE_ACTIVE" => EventCode::FleetStateActive,
"FLEET_STATE_BUILDING" => EventCode::FleetStateBuilding,
"FLEET_STATE_DOWNLOADING" => EventCode::FleetStateDownloading,
"FLEET_STATE_ERROR" => EventCode::FleetStateError,
"FLEET_STATE_VALIDATING" => EventCode::FleetStateValidating,
"FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE" => {
EventCode::FleetValidationExecutableRuntimeFailure
}
"FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND" => {
EventCode::FleetValidationLaunchPathNotFound
}
"FLEET_VALIDATION_TIMED_OUT" => EventCode::FleetValidationTimedOut,
"FLEET_VPC_PEERING_DELETED" => EventCode::FleetVpcPeeringDeleted,
"FLEET_VPC_PEERING_FAILED" => EventCode::FleetVpcPeeringFailed,
"FLEET_VPC_PEERING_SUCCEEDED" => EventCode::FleetVpcPeeringSucceeded,
"GAME_SESSION_ACTIVATION_TIMEOUT" => EventCode::GameSessionActivationTimeout,
"GENERIC_EVENT" => EventCode::GenericEvent,
"INSTANCE_INTERRUPTED" => EventCode::InstanceInterrupted,
"SERVER_PROCESS_CRASHED" => EventCode::ServerProcessCrashed,
"SERVER_PROCESS_FORCE_TERMINATED" => EventCode::ServerProcessForceTerminated,
"SERVER_PROCESS_INVALID_PATH" => EventCode::ServerProcessInvalidPath,
"SERVER_PROCESS_PROCESS_EXIT_TIMEOUT" => EventCode::ServerProcessProcessExitTimeout,
"SERVER_PROCESS_PROCESS_READY_TIMEOUT" => EventCode::ServerProcessProcessReadyTimeout,
"SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT" => {
EventCode::ServerProcessSdkInitializationTimeout
}
"SERVER_PROCESS_TERMINATED_UNHEALTHY" => EventCode::ServerProcessTerminatedUnhealthy,
other => EventCode::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for EventCode {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(EventCode::from(s))
}
}
impl EventCode {
pub fn as_str(&self) -> &str {
match self {
EventCode::FleetActivationFailed => "FLEET_ACTIVATION_FAILED",
EventCode::FleetActivationFailedNoInstances => "FLEET_ACTIVATION_FAILED_NO_INSTANCES",
EventCode::FleetBinaryDownloadFailed => "FLEET_BINARY_DOWNLOAD_FAILED",
EventCode::FleetCreated => "FLEET_CREATED",
EventCode::FleetCreationExtractingBuild => "FLEET_CREATION_EXTRACTING_BUILD",
EventCode::FleetCreationRunningInstaller => "FLEET_CREATION_RUNNING_INSTALLER",
EventCode::FleetCreationValidatingRuntimeConfig => {
"FLEET_CREATION_VALIDATING_RUNTIME_CONFIG"
}
EventCode::FleetDeleted => "FLEET_DELETED",
EventCode::FleetInitializationFailed => "FLEET_INITIALIZATION_FAILED",
EventCode::FleetNewGameSessionProtectionPolicyUpdated => {
"FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED"
}
EventCode::FleetScalingEvent => "FLEET_SCALING_EVENT",
EventCode::FleetStateActivating => "FLEET_STATE_ACTIVATING",
EventCode::FleetStateActive => "FLEET_STATE_ACTIVE",
EventCode::FleetStateBuilding => "FLEET_STATE_BUILDING",
EventCode::FleetStateDownloading => "FLEET_STATE_DOWNLOADING",
EventCode::FleetStateError => "FLEET_STATE_ERROR",
EventCode::FleetStateValidating => "FLEET_STATE_VALIDATING",
EventCode::FleetValidationExecutableRuntimeFailure => {
"FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE"
}
EventCode::FleetValidationLaunchPathNotFound => {
"FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND"
}
EventCode::FleetValidationTimedOut => "FLEET_VALIDATION_TIMED_OUT",
EventCode::FleetVpcPeeringDeleted => "FLEET_VPC_PEERING_DELETED",
EventCode::FleetVpcPeeringFailed => "FLEET_VPC_PEERING_FAILED",
EventCode::FleetVpcPeeringSucceeded => "FLEET_VPC_PEERING_SUCCEEDED",
EventCode::GameSessionActivationTimeout => "GAME_SESSION_ACTIVATION_TIMEOUT",
EventCode::GenericEvent => "GENERIC_EVENT",
EventCode::InstanceInterrupted => "INSTANCE_INTERRUPTED",
EventCode::ServerProcessCrashed => "SERVER_PROCESS_CRASHED",
EventCode::ServerProcessForceTerminated => "SERVER_PROCESS_FORCE_TERMINATED",
EventCode::ServerProcessInvalidPath => "SERVER_PROCESS_INVALID_PATH",
EventCode::ServerProcessProcessExitTimeout => "SERVER_PROCESS_PROCESS_EXIT_TIMEOUT",
EventCode::ServerProcessProcessReadyTimeout => "SERVER_PROCESS_PROCESS_READY_TIMEOUT",
EventCode::ServerProcessSdkInitializationTimeout => {
"SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT"
}
EventCode::ServerProcessTerminatedUnhealthy => "SERVER_PROCESS_TERMINATED_UNHEALTHY",
EventCode::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&[
"FLEET_ACTIVATION_FAILED",
"FLEET_ACTIVATION_FAILED_NO_INSTANCES",
"FLEET_BINARY_DOWNLOAD_FAILED",
"FLEET_CREATED",
"FLEET_CREATION_EXTRACTING_BUILD",
"FLEET_CREATION_RUNNING_INSTALLER",
"FLEET_CREATION_VALIDATING_RUNTIME_CONFIG",
"FLEET_DELETED",
"FLEET_INITIALIZATION_FAILED",
"FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED",
"FLEET_SCALING_EVENT",
"FLEET_STATE_ACTIVATING",
"FLEET_STATE_ACTIVE",
"FLEET_STATE_BUILDING",
"FLEET_STATE_DOWNLOADING",
"FLEET_STATE_ERROR",
"FLEET_STATE_VALIDATING",
"FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE",
"FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND",
"FLEET_VALIDATION_TIMED_OUT",
"FLEET_VPC_PEERING_DELETED",
"FLEET_VPC_PEERING_FAILED",
"FLEET_VPC_PEERING_SUCCEEDED",
"GAME_SESSION_ACTIVATION_TIMEOUT",
"GENERIC_EVENT",
"INSTANCE_INTERRUPTED",
"SERVER_PROCESS_CRASHED",
"SERVER_PROCESS_FORCE_TERMINATED",
"SERVER_PROCESS_INVALID_PATH",
"SERVER_PROCESS_PROCESS_EXIT_TIMEOUT",
"SERVER_PROCESS_PROCESS_READY_TIMEOUT",
"SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT",
"SERVER_PROCESS_TERMINATED_UNHEALTHY",
]
}
}
impl AsRef<str> for EventCode {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Describes a GameLift fleet of game hosting resources.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateFleet</a> | <a>DescribeFleetAttributes</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FleetAttributes {
/// <p>A unique identifier for the fleet.</p>
pub fleet_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>. In a GameLift fleet ARN, the resource ID matches the <code>FleetId</code>
/// value.</p>
pub fleet_arn: std::option::Option<std::string::String>,
/// <p>The kind of instances, On-Demand or Spot, that this fleet uses.</p>
pub fleet_type: std::option::Option<crate::model::FleetType>,
/// <p>The EC2 instance type that determines the computing resources of each instance in
/// the fleet. Instance type defines the CPU, memory, storage, and networking capacity. See
/// <a href="http://aws.amazon.com/ec2/instance-types/">Amazon EC2 Instance Types</a>
/// for detailed descriptions.</p>
pub instance_type: std::option::Option<crate::model::Ec2InstanceType>,
/// <p>A human-readable description of the fleet.</p>
pub description: std::option::Option<std::string::String>,
/// <p>A descriptive label that is associated with a fleet. Fleet names do not need to be unique.</p>
pub name: std::option::Option<std::string::String>,
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub creation_time: std::option::Option<smithy_types::Instant>,
/// <p>A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub termination_time: std::option::Option<smithy_types::Instant>,
/// <p>Current status of the fleet. Possible fleet statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>NEW</b> -- A new fleet has been defined and desired
/// instances is set to 1. </p>
/// </li>
/// <li>
/// <p>
/// <b>DOWNLOADING/VALIDATING/BUILDING/ACTIVATING</b> --
/// GameLift is setting up the new fleet, creating new instances with the game build
/// or Realtime script and starting server processes.</p>
/// </li>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- Hosts can now accept game
/// sessions.</p>
/// </li>
/// <li>
/// <p>
/// <b>ERROR</b> -- An error occurred when downloading,
/// validating, building, or activating the fleet.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETING</b> -- Hosts are responding to a delete
/// fleet request.</p>
/// </li>
/// <li>
/// <p>
/// <b>TERMINATED</b> -- The fleet no longer
/// exists.</p>
/// </li>
/// </ul>
pub status: std::option::Option<crate::model::FleetStatus>,
/// <p>A unique identifier for the build resource that is deployed on instances in this fleet.</p>
pub build_id: std::option::Option<std::string::String>,
/// <p> The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift build resource that is deployed on instances in
/// this fleet. In a GameLift build ARN, the resource ID matches the <code>BuildId</code>
/// value.</p>
pub build_arn: std::option::Option<std::string::String>,
/// <p>A unique identifier for the Realtime script resource that is deployed on instances in this fleet.</p>
pub script_id: std::option::Option<std::string::String>,
/// <p> The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift script resource that is deployed on instances
/// in this fleet. In a GameLift script ARN, the resource ID matches the
/// <code>ScriptId</code> value.</p>
pub script_arn: std::option::Option<std::string::String>,
/// <p>
/// <b>This parameter is no longer used.</b> Server launch paths
/// are now defined using the fleet's <a>RuntimeConfiguration</a> parameter.
/// Requests that use this parameter instead continue to be valid.</p>
pub server_launch_path: std::option::Option<std::string::String>,
/// <p>
/// <b>This parameter is no longer used.</b> Server launch
/// parameters are now defined using the fleet's <a>RuntimeConfiguration</a>
/// parameter. Requests that use this parameter instead continue to be valid.</p>
pub server_launch_parameters: std::option::Option<std::string::String>,
/// <p>
/// <b>This parameter is no longer used.</b> Game session log
/// paths are now defined using the GameLift server API <code>ProcessReady()</code>
/// <code>logParameters</code>. See more information in the <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api-ref.html#gamelift-sdk-server-api-ref-dataypes-process">Server API Reference</a>. </p>
pub log_paths: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The type of game session protection to set on all new instances that are started in
/// the fleet.</p>
/// <ul>
/// <li>
/// <p>
/// <b>NoProtection</b> -- The game session can be
/// terminated during a scale-down event.</p>
/// </li>
/// <li>
/// <p>
/// <b>FullProtection</b> -- If the game session is in an
/// <code>ACTIVE</code> status, it cannot be terminated during a scale-down
/// event.</p>
/// </li>
/// </ul>
pub new_game_session_protection_policy: std::option::Option<crate::model::ProtectionPolicy>,
/// <p>The operating system of the fleet's computing resources. A fleet's operating system is
/// determined by the OS of the build or script that is deployed on this fleet.</p>
pub operating_system: std::option::Option<crate::model::OperatingSystem>,
/// <p>The fleet policy that limits the number of game sessions an individual player can
/// create over a span of time.</p>
pub resource_creation_limit_policy:
std::option::Option<crate::model::ResourceCreationLimitPolicy>,
/// <p>Name of a metric group that metrics for this fleet are added to. In Amazon CloudWatch,
/// you can view aggregated metrics for fleets that are in a metric group. A fleet can be
/// included in only one metric group at a time.</p>
pub metric_groups: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A list of fleet activity that has been suspended using <a>StopFleetActions</a>. This includes fleet auto-scaling.</p>
pub stopped_actions: std::option::Option<std::vec::Vec<crate::model::FleetAction>>,
/// <p>A unique identifier for an AWS IAM role that manages access to your AWS services.
/// With an instance role ARN set, any application that runs on an instance in this fleet can assume the role,
/// including install scripts, server processes, and daemons (background processes). Create a role or look up a role's
/// ARN by using the <a href="https://console.aws.amazon.com/iam/">IAM dashboard</a> in the AWS Management Console.
/// Learn more about using on-box credentials for your game servers at
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html">
/// Access external resources from a game server</a>.</p>
pub instance_role_arn: std::option::Option<std::string::String>,
/// <p>Indicates whether a TLS/SSL certificate was generated for the fleet. </p>
pub certificate_configuration: std::option::Option<crate::model::CertificateConfiguration>,
}
impl std::fmt::Debug for FleetAttributes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FleetAttributes");
formatter.field("fleet_id", &self.fleet_id);
formatter.field("fleet_arn", &self.fleet_arn);
formatter.field("fleet_type", &self.fleet_type);
formatter.field("instance_type", &self.instance_type);
formatter.field("description", &self.description);
formatter.field("name", &self.name);
formatter.field("creation_time", &self.creation_time);
formatter.field("termination_time", &self.termination_time);
formatter.field("status", &self.status);
formatter.field("build_id", &self.build_id);
formatter.field("build_arn", &self.build_arn);
formatter.field("script_id", &self.script_id);
formatter.field("script_arn", &self.script_arn);
formatter.field("server_launch_path", &self.server_launch_path);
formatter.field("server_launch_parameters", &self.server_launch_parameters);
formatter.field("log_paths", &self.log_paths);
formatter.field(
"new_game_session_protection_policy",
&self.new_game_session_protection_policy,
);
formatter.field("operating_system", &self.operating_system);
formatter.field(
"resource_creation_limit_policy",
&self.resource_creation_limit_policy,
);
formatter.field("metric_groups", &self.metric_groups);
formatter.field("stopped_actions", &self.stopped_actions);
formatter.field("instance_role_arn", &self.instance_role_arn);
formatter.field("certificate_configuration", &self.certificate_configuration);
formatter.finish()
}
}
/// See [`FleetAttributes`](crate::model::FleetAttributes)
pub mod fleet_attributes {
/// A builder for [`FleetAttributes`](crate::model::FleetAttributes)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) fleet_id: std::option::Option<std::string::String>,
pub(crate) fleet_arn: std::option::Option<std::string::String>,
pub(crate) fleet_type: std::option::Option<crate::model::FleetType>,
pub(crate) instance_type: std::option::Option<crate::model::Ec2InstanceType>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) creation_time: std::option::Option<smithy_types::Instant>,
pub(crate) termination_time: std::option::Option<smithy_types::Instant>,
pub(crate) status: std::option::Option<crate::model::FleetStatus>,
pub(crate) build_id: std::option::Option<std::string::String>,
pub(crate) build_arn: std::option::Option<std::string::String>,
pub(crate) script_id: std::option::Option<std::string::String>,
pub(crate) script_arn: std::option::Option<std::string::String>,
pub(crate) server_launch_path: std::option::Option<std::string::String>,
pub(crate) server_launch_parameters: std::option::Option<std::string::String>,
pub(crate) log_paths: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) new_game_session_protection_policy:
std::option::Option<crate::model::ProtectionPolicy>,
pub(crate) operating_system: std::option::Option<crate::model::OperatingSystem>,
pub(crate) resource_creation_limit_policy:
std::option::Option<crate::model::ResourceCreationLimitPolicy>,
pub(crate) metric_groups: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) stopped_actions: std::option::Option<std::vec::Vec<crate::model::FleetAction>>,
pub(crate) instance_role_arn: std::option::Option<std::string::String>,
pub(crate) certificate_configuration:
std::option::Option<crate::model::CertificateConfiguration>,
}
impl Builder {
/// <p>A unique identifier for the fleet.</p>
pub fn fleet_id(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_id = Some(input.into());
self
}
pub fn set_fleet_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_id = input;
self
}
/// <p>The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is <code>arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912</code>. In a GameLift fleet ARN, the resource ID matches the <code>FleetId</code>
/// value.</p>
pub fn fleet_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.fleet_arn = Some(input.into());
self
}
pub fn set_fleet_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.fleet_arn = input;
self
}
/// <p>The kind of instances, On-Demand or Spot, that this fleet uses.</p>
pub fn fleet_type(mut self, input: crate::model::FleetType) -> Self {
self.fleet_type = Some(input);
self
}
pub fn set_fleet_type(
mut self,
input: std::option::Option<crate::model::FleetType>,
) -> Self {
self.fleet_type = input;
self
}
/// <p>The EC2 instance type that determines the computing resources of each instance in
/// the fleet. Instance type defines the CPU, memory, storage, and networking capacity. See
/// <a href="http://aws.amazon.com/ec2/instance-types/">Amazon EC2 Instance Types</a>
/// for detailed descriptions.</p>
pub fn instance_type(mut self, input: crate::model::Ec2InstanceType) -> Self {
self.instance_type = Some(input);
self
}
pub fn set_instance_type(
mut self,
input: std::option::Option<crate::model::Ec2InstanceType>,
) -> Self {
self.instance_type = input;
self
}
/// <p>A human-readable description of the fleet.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>A descriptive label that is associated with a fleet. Fleet names do not need to be unique.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn creation_time(mut self, input: smithy_types::Instant) -> Self {
self.creation_time = Some(input);
self
}
pub fn set_creation_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.creation_time = input;
self
}
/// <p>A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example <code>"1469498468.057"</code>).</p>
pub fn termination_time(mut self, input: smithy_types::Instant) -> Self {
self.termination_time = Some(input);
self
}
pub fn set_termination_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.termination_time = input;
self
}
/// <p>Current status of the fleet. Possible fleet statuses include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>NEW</b> -- A new fleet has been defined and desired
/// instances is set to 1. </p>
/// </li>
/// <li>
/// <p>
/// <b>DOWNLOADING/VALIDATING/BUILDING/ACTIVATING</b> --
/// GameLift is setting up the new fleet, creating new instances with the game build
/// or Realtime script and starting server processes.</p>
/// </li>
/// <li>
/// <p>
/// <b>ACTIVE</b> -- Hosts can now accept game
/// sessions.</p>
/// </li>
/// <li>
/// <p>
/// <b>ERROR</b> -- An error occurred when downloading,
/// validating, building, or activating the fleet.</p>
/// </li>
/// <li>
/// <p>
/// <b>DELETING</b> -- Hosts are responding to a delete
/// fleet request.</p>
/// </li>
/// <li>
/// <p>
/// <b>TERMINATED</b> -- The fleet no longer
/// exists.</p>
/// </li>
/// </ul>
pub fn status(mut self, input: crate::model::FleetStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(mut self, input: std::option::Option<crate::model::FleetStatus>) -> Self {
self.status = input;
self
}
/// <p>A unique identifier for the build resource that is deployed on instances in this fleet.</p>
pub fn build_id(mut self, input: impl Into<std::string::String>) -> Self {
self.build_id = Some(input.into());
self
}
pub fn set_build_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.build_id = input;
self
}
/// <p> The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift build resource that is deployed on instances in
/// this fleet. In a GameLift build ARN, the resource ID matches the <code>BuildId</code>
/// value.</p>
pub fn build_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.build_arn = Some(input.into());
self
}
pub fn set_build_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.build_arn = input;
self
}
/// <p>A unique identifier for the Realtime script resource that is deployed on instances in this fleet.</p>
pub fn script_id(mut self, input: impl Into<std::string::String>) -> Self {
self.script_id = Some(input.into());
self
}
pub fn set_script_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.script_id = input;
self
}
/// <p> The Amazon Resource Name (<a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">ARN</a>) associated with the GameLift script resource that is deployed on instances
/// in this fleet. In a GameLift script ARN, the resource ID matches the
/// <code>ScriptId</code> value.</p>
pub fn script_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.script_arn = Some(input.into());
self
}
pub fn set_script_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.script_arn = input;
self
}
/// <p>
/// <b>This parameter is no longer used.</b> Server launch paths
/// are now defined using the fleet's <a>RuntimeConfiguration</a> parameter.
/// Requests that use this parameter instead continue to be valid.</p>
pub fn server_launch_path(mut self, input: impl Into<std::string::String>) -> Self {
self.server_launch_path = Some(input.into());
self
}
pub fn set_server_launch_path(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.server_launch_path = input;
self
}
/// <p>
/// <b>This parameter is no longer used.</b> Server launch
/// parameters are now defined using the fleet's <a>RuntimeConfiguration</a>
/// parameter. Requests that use this parameter instead continue to be valid.</p>
pub fn server_launch_parameters(mut self, input: impl Into<std::string::String>) -> Self {
self.server_launch_parameters = Some(input.into());
self
}
pub fn set_server_launch_parameters(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.server_launch_parameters = input;
self
}
pub fn log_paths(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.log_paths.unwrap_or_default();
v.push(input.into());
self.log_paths = Some(v);
self
}
pub fn set_log_paths(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.log_paths = input;
self
}
/// <p>The type of game session protection to set on all new instances that are started in
/// the fleet.</p>
/// <ul>
/// <li>
/// <p>
/// <b>NoProtection</b> -- The game session can be
/// terminated during a scale-down event.</p>
/// </li>
/// <li>
/// <p>
/// <b>FullProtection</b> -- If the game session is in an
/// <code>ACTIVE</code> status, it cannot be terminated during a scale-down
/// event.</p>
/// </li>
/// </ul>
pub fn new_game_session_protection_policy(
mut self,
input: crate::model::ProtectionPolicy,
) -> Self {
self.new_game_session_protection_policy = Some(input);
self
}
pub fn set_new_game_session_protection_policy(
mut self,
input: std::option::Option<crate::model::ProtectionPolicy>,
) -> Self {
self.new_game_session_protection_policy = input;
self
}
/// <p>The operating system of the fleet's computing resources. A fleet's operating system is
/// determined by the OS of the build or script that is deployed on this fleet.</p>
pub fn operating_system(mut self, input: crate::model::OperatingSystem) -> Self {
self.operating_system = Some(input);
self
}
pub fn set_operating_system(
mut self,
input: std::option::Option<crate::model::OperatingSystem>,
) -> Self {
self.operating_system = input;
self
}
/// <p>The fleet policy that limits the number of game sessions an individual player can
/// create over a span of time.</p>
pub fn resource_creation_limit_policy(
mut self,
input: crate::model::ResourceCreationLimitPolicy,
) -> Self {
self.resource_creation_limit_policy = Some(input);
self
}
pub fn set_resource_creation_limit_policy(
mut self,
input: std::option::Option<crate::model::ResourceCreationLimitPolicy>,
) -> Self {
self.resource_creation_limit_policy = input;
self
}
pub fn metric_groups(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.metric_groups.unwrap_or_default();
v.push(input.into());
self.metric_groups = Some(v);
self
}
pub fn set_metric_groups(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.metric_groups = input;
self
}
pub fn stopped_actions(mut self, input: impl Into<crate::model::FleetAction>) -> Self {
let mut v = self.stopped_actions.unwrap_or_default();
v.push(input.into());
self.stopped_actions = Some(v);
self
}
pub fn set_stopped_actions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::FleetAction>>,
) -> Self {
self.stopped_actions = input;
self
}
/// <p>A unique identifier for an AWS IAM role that manages access to your AWS services.
/// With an instance role ARN set, any application that runs on an instance in this fleet can assume the role,
/// including install scripts, server processes, and daemons (background processes). Create a role or look up a role's
/// ARN by using the <a href="https://console.aws.amazon.com/iam/">IAM dashboard</a> in the AWS Management Console.
/// Learn more about using on-box credentials for your game servers at
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html">
/// Access external resources from a game server</a>.</p>
pub fn instance_role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_role_arn = Some(input.into());
self
}
pub fn set_instance_role_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_role_arn = input;
self
}
/// <p>Indicates whether a TLS/SSL certificate was generated for the fleet. </p>
pub fn certificate_configuration(
mut self,
input: crate::model::CertificateConfiguration,
) -> Self {
self.certificate_configuration = Some(input);
self
}
pub fn set_certificate_configuration(
mut self,
input: std::option::Option<crate::model::CertificateConfiguration>,
) -> Self {
self.certificate_configuration = input;
self
}
/// Consumes the builder and constructs a [`FleetAttributes`](crate::model::FleetAttributes)
pub fn build(self) -> crate::model::FleetAttributes {
crate::model::FleetAttributes {
fleet_id: self.fleet_id,
fleet_arn: self.fleet_arn,
fleet_type: self.fleet_type,
instance_type: self.instance_type,
description: self.description,
name: self.name,
creation_time: self.creation_time,
termination_time: self.termination_time,
status: self.status,
build_id: self.build_id,
build_arn: self.build_arn,
script_id: self.script_id,
script_arn: self.script_arn,
server_launch_path: self.server_launch_path,
server_launch_parameters: self.server_launch_parameters,
log_paths: self.log_paths,
new_game_session_protection_policy: self.new_game_session_protection_policy,
operating_system: self.operating_system,
resource_creation_limit_policy: self.resource_creation_limit_policy,
metric_groups: self.metric_groups,
stopped_actions: self.stopped_actions,
instance_role_arn: self.instance_role_arn,
certificate_configuration: self.certificate_configuration,
}
}
}
}
impl FleetAttributes {
/// Creates a new builder-style object to manufacture [`FleetAttributes`](crate::model::FleetAttributes)
pub fn builder() -> crate::model::fleet_attributes::Builder {
crate::model::fleet_attributes::Builder::default()
}
}
/// <p>Determines whether a TLS/SSL certificate is generated for a fleet. This feature must be
/// enabled when creating the fleet. All instances in a fleet share the same
/// certificate. The certificate can be retrieved by calling the
/// <a href="https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-serversdk.html">GameLift Server
/// SDK</a> operation <code>GetInstanceCertificate</code>. </p>
/// <p>A fleet's certificate configuration is part of <a>FleetAttributes</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CertificateConfiguration {
/// <p>Indicates whether a TLS/SSL certificate is generated for a fleet. </p>
/// <p>Valid values include: </p>
/// <ul>
/// <li>
/// <p>
/// <b>GENERATED</b> - Generate a TLS/SSL certificate
/// for this fleet.</p>
/// </li>
/// <li>
/// <p>
/// <b>DISABLED</b> - (default) Do not generate a
/// TLS/SSL certificate for this fleet. </p>
/// </li>
/// </ul>
/// <p> </p>
pub certificate_type: std::option::Option<crate::model::CertificateType>,
}
impl std::fmt::Debug for CertificateConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CertificateConfiguration");
formatter.field("certificate_type", &self.certificate_type);
formatter.finish()
}
}
/// See [`CertificateConfiguration`](crate::model::CertificateConfiguration)
pub mod certificate_configuration {
/// A builder for [`CertificateConfiguration`](crate::model::CertificateConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) certificate_type: std::option::Option<crate::model::CertificateType>,
}
impl Builder {
/// <p>Indicates whether a TLS/SSL certificate is generated for a fleet. </p>
/// <p>Valid values include: </p>
/// <ul>
/// <li>
/// <p>
/// <b>GENERATED</b> - Generate a TLS/SSL certificate
/// for this fleet.</p>
/// </li>
/// <li>
/// <p>
/// <b>DISABLED</b> - (default) Do not generate a
/// TLS/SSL certificate for this fleet. </p>
/// </li>
/// </ul>
/// <p> </p>
pub fn certificate_type(mut self, input: crate::model::CertificateType) -> Self {
self.certificate_type = Some(input);
self
}
pub fn set_certificate_type(
mut self,
input: std::option::Option<crate::model::CertificateType>,
) -> Self {
self.certificate_type = input;
self
}
/// Consumes the builder and constructs a [`CertificateConfiguration`](crate::model::CertificateConfiguration)
pub fn build(self) -> crate::model::CertificateConfiguration {
crate::model::CertificateConfiguration {
certificate_type: self.certificate_type,
}
}
}
}
impl CertificateConfiguration {
/// Creates a new builder-style object to manufacture [`CertificateConfiguration`](crate::model::CertificateConfiguration)
pub fn builder() -> crate::model::certificate_configuration::Builder {
crate::model::certificate_configuration::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CertificateType {
Disabled,
Generated,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CertificateType {
fn from(s: &str) -> Self {
match s {
"DISABLED" => CertificateType::Disabled,
"GENERATED" => CertificateType::Generated,
other => CertificateType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CertificateType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CertificateType::from(s))
}
}
impl CertificateType {
pub fn as_str(&self) -> &str {
match self {
CertificateType::Disabled => "DISABLED",
CertificateType::Generated => "GENERATED",
CertificateType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["DISABLED", "GENERATED"]
}
}
impl AsRef<str> for CertificateType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum FleetType {
OnDemand,
Spot,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for FleetType {
fn from(s: &str) -> Self {
match s {
"ON_DEMAND" => FleetType::OnDemand,
"SPOT" => FleetType::Spot,
other => FleetType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for FleetType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(FleetType::from(s))
}
}
impl FleetType {
pub fn as_str(&self) -> &str {
match self {
FleetType::OnDemand => "ON_DEMAND",
FleetType::Spot => "SPOT",
FleetType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ON_DEMAND", "SPOT"]
}
}
impl AsRef<str> for FleetType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The GameLift service limits for an EC2 instance type and current utilization. GameLift
/// allows AWS accounts a maximum number of instances, per instance type, per AWS Region or
/// location, for use with GameLift. You can request an limit increase for your account by
/// using the <b>Service limits</b> page in the GameLift
/// console.</p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>DescribeEC2InstanceLimits</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Ec2InstanceLimit {
/// <p>The name of an EC2 instance type. See <a href="http://aws.amazon.com/ec2/instance-types/">Amazon EC2 Instance Types</a> for detailed
/// descriptions. </p>
pub ec2_instance_type: std::option::Option<crate::model::Ec2InstanceType>,
/// <p>The number of instances for the specified type and location that are currently being
/// used by the AWS account. </p>
pub current_instances: std::option::Option<i32>,
/// <p>The number of instances that is allowed for the specified instance type and
/// location.</p>
pub instance_limit: std::option::Option<i32>,
/// <p>An AWS Region code, such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for Ec2InstanceLimit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Ec2InstanceLimit");
formatter.field("ec2_instance_type", &self.ec2_instance_type);
formatter.field("current_instances", &self.current_instances);
formatter.field("instance_limit", &self.instance_limit);
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`Ec2InstanceLimit`](crate::model::Ec2InstanceLimit)
pub mod ec2_instance_limit {
/// A builder for [`Ec2InstanceLimit`](crate::model::Ec2InstanceLimit)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) ec2_instance_type: std::option::Option<crate::model::Ec2InstanceType>,
pub(crate) current_instances: std::option::Option<i32>,
pub(crate) instance_limit: std::option::Option<i32>,
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of an EC2 instance type. See <a href="http://aws.amazon.com/ec2/instance-types/">Amazon EC2 Instance Types</a> for detailed
/// descriptions. </p>
pub fn ec2_instance_type(mut self, input: crate::model::Ec2InstanceType) -> Self {
self.ec2_instance_type = Some(input);
self
}
pub fn set_ec2_instance_type(
mut self,
input: std::option::Option<crate::model::Ec2InstanceType>,
) -> Self {
self.ec2_instance_type = input;
self
}
/// <p>The number of instances for the specified type and location that are currently being
/// used by the AWS account. </p>
pub fn current_instances(mut self, input: i32) -> Self {
self.current_instances = Some(input);
self
}
pub fn set_current_instances(mut self, input: std::option::Option<i32>) -> Self {
self.current_instances = input;
self
}
/// <p>The number of instances that is allowed for the specified instance type and
/// location.</p>
pub fn instance_limit(mut self, input: i32) -> Self {
self.instance_limit = Some(input);
self
}
pub fn set_instance_limit(mut self, input: std::option::Option<i32>) -> Self {
self.instance_limit = input;
self
}
/// <p>An AWS Region code, such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`Ec2InstanceLimit`](crate::model::Ec2InstanceLimit)
pub fn build(self) -> crate::model::Ec2InstanceLimit {
crate::model::Ec2InstanceLimit {
ec2_instance_type: self.ec2_instance_type,
current_instances: self.current_instances,
instance_limit: self.instance_limit,
location: self.location,
}
}
}
}
impl Ec2InstanceLimit {
/// Creates a new builder-style object to manufacture [`Ec2InstanceLimit`](crate::model::Ec2InstanceLimit)
pub fn builder() -> crate::model::ec2_instance_limit::Builder {
crate::model::ec2_instance_limit::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum GameServerGroupDeleteOption {
ForceDelete,
Retain,
SafeDelete,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for GameServerGroupDeleteOption {
fn from(s: &str) -> Self {
match s {
"FORCE_DELETE" => GameServerGroupDeleteOption::ForceDelete,
"RETAIN" => GameServerGroupDeleteOption::Retain,
"SAFE_DELETE" => GameServerGroupDeleteOption::SafeDelete,
other => GameServerGroupDeleteOption::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for GameServerGroupDeleteOption {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(GameServerGroupDeleteOption::from(s))
}
}
impl GameServerGroupDeleteOption {
pub fn as_str(&self) -> &str {
match self {
GameServerGroupDeleteOption::ForceDelete => "FORCE_DELETE",
GameServerGroupDeleteOption::Retain => "RETAIN",
GameServerGroupDeleteOption::SafeDelete => "SAFE_DELETE",
GameServerGroupDeleteOption::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["FORCE_DELETE", "RETAIN", "SAFE_DELETE"]
}
}
impl AsRef<str> for GameServerGroupDeleteOption {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p>Configuration settings for intelligent automatic scaling that uses target tracking.
/// These settings are used to add an Auto Scaling policy when creating the corresponding
/// Auto Scaling group with <a>CreateGameServerGroup</a>. After the Auto Scaling
/// group is created, all updates to Auto Scaling policies, including changing this policy
/// and adding or removing other policies, is done directly on the Auto Scaling group. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GameServerGroupAutoScalingPolicy {
/// <p>Length of time, in seconds, it takes for a new instance to start new game server
/// processes and register with GameLift FleetIQ. Specifying a warm-up time can be useful, particularly
/// with game servers that take a long time to start up, because it avoids prematurely
/// starting new instances. </p>
pub estimated_instance_warmup: std::option::Option<i32>,
/// <p>Settings for a target-based scaling policy applied to Auto Scaling group. These
/// settings are used to create a target-based policy that tracks the GameLift FleetIQ metric
/// <code>"PercentUtilizedGameServers"</code> and specifies a target value for the
/// metric. As player usage changes, the policy triggers to adjust the game server group
/// capacity so that the metric returns to the target value. </p>
pub target_tracking_configuration:
std::option::Option<crate::model::TargetTrackingConfiguration>,
}
impl std::fmt::Debug for GameServerGroupAutoScalingPolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GameServerGroupAutoScalingPolicy");
formatter.field("estimated_instance_warmup", &self.estimated_instance_warmup);
formatter.field(
"target_tracking_configuration",
&self.target_tracking_configuration,
);
formatter.finish()
}
}
/// See [`GameServerGroupAutoScalingPolicy`](crate::model::GameServerGroupAutoScalingPolicy)
pub mod game_server_group_auto_scaling_policy {
/// A builder for [`GameServerGroupAutoScalingPolicy`](crate::model::GameServerGroupAutoScalingPolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) estimated_instance_warmup: std::option::Option<i32>,
pub(crate) target_tracking_configuration:
std::option::Option<crate::model::TargetTrackingConfiguration>,
}
impl Builder {
/// <p>Length of time, in seconds, it takes for a new instance to start new game server
/// processes and register with GameLift FleetIQ. Specifying a warm-up time can be useful, particularly
/// with game servers that take a long time to start up, because it avoids prematurely
/// starting new instances. </p>
pub fn estimated_instance_warmup(mut self, input: i32) -> Self {
self.estimated_instance_warmup = Some(input);
self
}
pub fn set_estimated_instance_warmup(mut self, input: std::option::Option<i32>) -> Self {
self.estimated_instance_warmup = input;
self
}
/// <p>Settings for a target-based scaling policy applied to Auto Scaling group. These
/// settings are used to create a target-based policy that tracks the GameLift FleetIQ metric
/// <code>"PercentUtilizedGameServers"</code> and specifies a target value for the
/// metric. As player usage changes, the policy triggers to adjust the game server group
/// capacity so that the metric returns to the target value. </p>
pub fn target_tracking_configuration(
mut self,
input: crate::model::TargetTrackingConfiguration,
) -> Self {
self.target_tracking_configuration = Some(input);
self
}
pub fn set_target_tracking_configuration(
mut self,
input: std::option::Option<crate::model::TargetTrackingConfiguration>,
) -> Self {
self.target_tracking_configuration = input;
self
}
/// Consumes the builder and constructs a [`GameServerGroupAutoScalingPolicy`](crate::model::GameServerGroupAutoScalingPolicy)
pub fn build(self) -> crate::model::GameServerGroupAutoScalingPolicy {
crate::model::GameServerGroupAutoScalingPolicy {
estimated_instance_warmup: self.estimated_instance_warmup,
target_tracking_configuration: self.target_tracking_configuration,
}
}
}
}
impl GameServerGroupAutoScalingPolicy {
/// Creates a new builder-style object to manufacture [`GameServerGroupAutoScalingPolicy`](crate::model::GameServerGroupAutoScalingPolicy)
pub fn builder() -> crate::model::game_server_group_auto_scaling_policy::Builder {
crate::model::game_server_group_auto_scaling_policy::Builder::default()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p>Settings for a target-based scaling policy as part of a <a>GameServerGroupAutoScalingPolicy</a>.
/// These settings are used to
/// create a target-based policy that tracks the GameLift FleetIQ metric
/// <code>"PercentUtilizedGameServers"</code> and specifies a target value for the
/// metric. As player usage changes, the policy triggers to adjust the game server group
/// capacity so that the metric returns to the target value. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TargetTrackingConfiguration {
/// <p>Desired value to use with a game server group target-based scaling policy. </p>
pub target_value: std::option::Option<f64>,
}
impl std::fmt::Debug for TargetTrackingConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TargetTrackingConfiguration");
formatter.field("target_value", &self.target_value);
formatter.finish()
}
}
/// See [`TargetTrackingConfiguration`](crate::model::TargetTrackingConfiguration)
pub mod target_tracking_configuration {
/// A builder for [`TargetTrackingConfiguration`](crate::model::TargetTrackingConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) target_value: std::option::Option<f64>,
}
impl Builder {
/// <p>Desired value to use with a game server group target-based scaling policy. </p>
pub fn target_value(mut self, input: f64) -> Self {
self.target_value = Some(input);
self
}
pub fn set_target_value(mut self, input: std::option::Option<f64>) -> Self {
self.target_value = input;
self
}
/// Consumes the builder and constructs a [`TargetTrackingConfiguration`](crate::model::TargetTrackingConfiguration)
pub fn build(self) -> crate::model::TargetTrackingConfiguration {
crate::model::TargetTrackingConfiguration {
target_value: self.target_value,
}
}
}
}
impl TargetTrackingConfiguration {
/// Creates a new builder-style object to manufacture [`TargetTrackingConfiguration`](crate::model::TargetTrackingConfiguration)
pub fn builder() -> crate::model::target_tracking_configuration::Builder {
crate::model::target_tracking_configuration::Builder::default()
}
}
/// <p>
/// <b>This data type is used with the GameLift FleetIQ and game server groups.</b>
/// </p>
/// <p>An EC2 launch template that contains configuration settings and game server code to
/// be deployed to all instances in a game server group. The launch template is specified
/// when creating a new game server group with <a>CreateGameServerGroup</a>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LaunchTemplateSpecification {
/// <p>A unique identifier for an existing EC2 launch template.</p>
pub launch_template_id: std::option::Option<std::string::String>,
/// <p>A readable identifier for an existing EC2 launch template. </p>
pub launch_template_name: std::option::Option<std::string::String>,
/// <p>The version of the EC2 launch template to use. If no version is specified, the
/// default version will be used. With Amazon EC2, you can specify a default version for a
/// launch template. If none is set, the default is the first version created.</p>
pub version: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for LaunchTemplateSpecification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LaunchTemplateSpecification");
formatter.field("launch_template_id", &self.launch_template_id);
formatter.field("launch_template_name", &self.launch_template_name);
formatter.field("version", &self.version);
formatter.finish()
}
}
/// See [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
pub mod launch_template_specification {
/// A builder for [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) launch_template_id: std::option::Option<std::string::String>,
pub(crate) launch_template_name: std::option::Option<std::string::String>,
pub(crate) version: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A unique identifier for an existing EC2 launch template.</p>
pub fn launch_template_id(mut self, input: impl Into<std::string::String>) -> Self {
self.launch_template_id = Some(input.into());
self
}
pub fn set_launch_template_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.launch_template_id = input;
self
}
/// <p>A readable identifier for an existing EC2 launch template. </p>
pub fn launch_template_name(mut self, input: impl Into<std::string::String>) -> Self {
self.launch_template_name = Some(input.into());
self
}
pub fn set_launch_template_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.launch_template_name = input;
self
}
/// <p>The version of the EC2 launch template to use. If no version is specified, the
/// default version will be used. With Amazon EC2, you can specify a default version for a
/// launch template. If none is set, the default is the first version created.</p>
pub fn version(mut self, input: impl Into<std::string::String>) -> Self {
self.version = Some(input.into());
self
}
pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version = input;
self
}
/// Consumes the builder and constructs a [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
pub fn build(self) -> crate::model::LaunchTemplateSpecification {
crate::model::LaunchTemplateSpecification {
launch_template_id: self.launch_template_id,
launch_template_name: self.launch_template_name,
version: self.version,
}
}
}
}
impl LaunchTemplateSpecification {
/// Creates a new builder-style object to manufacture [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
pub fn builder() -> crate::model::launch_template_specification::Builder {
crate::model::launch_template_specification::Builder::default()
}
}
/// <p>A remote location where a multi-location fleet can deploy EC2 instances for game
/// hosting. </p>
/// <p>
/// <b>Related actions</b>
/// </p>
/// <p>
/// <a>CreateFleet</a>
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LocationConfiguration {
/// <p>An AWS Region code, such as <code>us-west-2</code>. </p>
pub location: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for LocationConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LocationConfiguration");
formatter.field("location", &self.location);
formatter.finish()
}
}
/// See [`LocationConfiguration`](crate::model::LocationConfiguration)
pub mod location_configuration {
/// A builder for [`LocationConfiguration`](crate::model::LocationConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) location: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>An AWS Region code, such as <code>us-west-2</code>. </p>
pub fn location(mut self, input: impl Into<std::string::String>) -> Self {
self.location = Some(input.into());
self
}
pub fn set_location(mut self, input: std::option::Option<std::string::String>) -> Self {
self.location = input;
self
}
/// Consumes the builder and constructs a [`LocationConfiguration`](crate::model::LocationConfiguration)
pub fn build(self) -> crate::model::LocationConfiguration {
crate::model::LocationConfiguration {
location: self.location,
}
}
}
}
impl LocationConfiguration {
/// Creates a new builder-style object to manufacture [`LocationConfiguration`](crate::model::LocationConfiguration)
pub fn builder() -> crate::model::location_configuration::Builder {
crate::model::location_configuration::Builder::default()
}
}
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum AcceptanceType {
Accept,
Reject,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for AcceptanceType {
fn from(s: &str) -> Self {
match s {
"ACCEPT" => AcceptanceType::Accept,
"REJECT" => AcceptanceType::Reject,
other => AcceptanceType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for AcceptanceType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(AcceptanceType::from(s))
}
}
impl AcceptanceType {
pub fn as_str(&self) -> &str {
match self {
AcceptanceType::Accept => "ACCEPT",
AcceptanceType::Reject => "REJECT",
AcceptanceType::Unknown(s) => s.as_ref(),
}
}
pub fn values() -> &'static [&'static str] {
&["ACCEPT", "REJECT"]
}
}
impl AsRef<str> for AcceptanceType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
| 44.773586 | 426 | 0.621786 |
d65578ff3e87d8eeff056e8bd59cb80d7cab85a9
| 3,685 |
//! Defines aggregation operations over a taxon tree.
pub mod lineage;
pub mod rank;
use std::collections::HashMap;
use crate::taxon;
use crate::taxon::TaxonId;
/// Allows to aggregate over a taxon tree.
pub trait Aggregator {
/// Aggregates a set of scored taxons into a resulting taxon id.
fn aggregate(&self, taxons: &HashMap<TaxonId, f32>) -> Result<TaxonId>;
/// Aggregates a list of taxons into a resulting taxon id.
fn counting_aggregate(&self, taxons: &[TaxonId]) -> Result<TaxonId> {
let taxons = taxons.iter().map(|&t| (t, 1.0));
self.aggregate(&count(taxons))
}
}
/// Returns how many times each taxon occurs in a vector of taxons.
pub fn count<T>(taxons: T) -> HashMap<TaxonId, f32>
where
T: Iterator<Item = (TaxonId, f32)>,
{
let mut counts = HashMap::new();
for (taxon, count) in taxons {
*counts.entry(taxon).or_insert(0.0) += count;
}
counts
}
/// Filters any taxon in a frequency table with a frequency below the given amount.
pub fn filter(freq_table: HashMap<TaxonId, f32>, lower_bound: f32) -> HashMap<TaxonId, f32> {
freq_table
.into_iter()
.filter(|&(_, freq)| freq >= lower_bound)
.collect()
}
error_chain! {
links {
Taxon(taxon::Error, taxon::ErrorKind) #[doc = "Taxon"];
}
errors {
/// Aggregation called on an empty list
EmptyInput {
description("Aggregration called on an empty list")
display("Aggregration called on an empty list")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::fixtures;
use crate::rmq;
use crate::taxon::TaxonList;
use crate::tree;
fn aggregators(by_id: &TaxonList) -> Vec<Box<dyn Aggregator>> {
vec![
Box::new(rmq::lca::LCACalculator::new(fixtures::tree())),
Box::new(rmq::rtl::RTLCalculator::new(fixtures::ROOT, by_id)),
Box::new(rmq::mix::MixCalculator::new(fixtures::tree(), 0.0)),
Box::new(rmq::mix::MixCalculator::new(fixtures::tree(), 1.0)),
Box::new(rmq::mix::MixCalculator::new(fixtures::tree(), 0.5)),
Box::new(tree::lca::LCACalculator::new(fixtures::ROOT, by_id)),
Box::new(tree::mix::MixCalculator::new(fixtures::ROOT, by_id, 0.0)),
Box::new(tree::mix::MixCalculator::new(fixtures::ROOT, by_id, 1.0)),
Box::new(tree::mix::MixCalculator::new(fixtures::ROOT, by_id, 0.5)),
]
}
#[test]
fn test_empty_query() {
for aggregator in aggregators(&fixtures::by_id()) {
assert_matches!(
*aggregator
.counting_aggregate(&Vec::new())
.unwrap_err()
.kind(),
ErrorKind::EmptyInput
);
}
}
#[test]
fn test_singleton_is_singleton() {
for aggregator in aggregators(&fixtures::by_id()) {
for taxon in fixtures::taxon_list() {
assert_matches!(aggregator.counting_aggregate(&vec![taxon.id]), Ok(tid) if tid == taxon.id);
}
}
}
#[test]
fn test_invalid_taxa() {
for aggregator in aggregators(&fixtures::by_id()) {
assert_matches!(
*aggregator.counting_aggregate(&vec![5]).unwrap_err().kind(),
ErrorKind::Taxon(taxon::ErrorKind::UnknownTaxon(5))
);
assert_matches!(
*aggregator
.counting_aggregate(&vec![1, 2, 5, 1])
.unwrap_err()
.kind(),
ErrorKind::Taxon(taxon::ErrorKind::UnknownTaxon(5))
);
}
}
}
| 31.495726 | 108 | 0.56635 |
89c84e1f06e2cefb1c87f77c4e0fc83477054277
| 1,467 |
pub fn compute() {
let mut sprawl = sprawl::Sprawl::new();
let node000 = sprawl
.new_node(
sprawl::style::Style {
size: sprawl::geometry::Size {
width: sprawl::style::Dimension::Points(20f32),
height: sprawl::style::Dimension::Points(20f32),
..Default::default()
},
..Default::default()
},
&[],
)
.unwrap();
let node00 = sprawl
.new_node(sprawl::style::Style { flex_grow: 1f32, flex_shrink: 1f32, ..Default::default() }, &[node000])
.unwrap();
let node0 = sprawl
.new_node(
sprawl::style::Style {
justify_content: sprawl::style::JustifyContent::Center,
flex_grow: 0f32,
flex_shrink: 1f32,
..Default::default()
},
&[node00],
)
.unwrap();
let node = sprawl
.new_node(
sprawl::style::Style {
size: sprawl::geometry::Size {
width: sprawl::style::Dimension::Points(100f32),
height: sprawl::style::Dimension::Points(100f32),
..Default::default()
},
..Default::default()
},
&[node0],
)
.unwrap();
sprawl.compute_layout(node, sprawl::geometry::Size::undefined()).unwrap();
}
| 32.6 | 112 | 0.456714 |
72868edef71b9e5ce2db5129edc04f4b1bb09af4
| 20,322 |
//! Hubcaps provides a set of building blocks for interacting with the Github API
//!
//! # Examples
//!
//! Typical use will require instantiation of a Github client. Which requires
//! a user agent string and set of `hubcaps::Credentials`.
//!
//! ```no_run
//! extern crate hubcaps;
//! extern crate hyper;
//!
//! use hubcaps::{Credentials, Github};
//!
//! fn main() {
//! let github = Github::new(
//! String::from("user-agent-name"),
//! Credentials::Token(
//! String::from("personal-access-token")
//! ),
//! );
//! }
//! ```
//!
//! Github enterprise users will want to create a client with the
//! [Github#host](struct.Github.html#method.host) method
//!
//! Access to various services are provided via methods on instances of the `Github` type.
//!
//! The convention for executing operations typically looks like
//! `github.repo(.., ..).service().operation(OperationOptions)` where operation may be `create`,
//! `delete`, etc.
//!
//! Services and their types are packaged under their own module namespace.
//! A service interface will provide access to operations and operations may access options types
//! that define the various parameter options available for the operation. Most operation option
//! types expose `builder()` methods for a builder oriented style of constructing options.
//!
//! ## Entity listings
//!
//! Many of Github's APIs return a collection of entities with a common interface for supporting pagination
//! Hubcaps supports two types of interfaces for working with listings. `list(...)` interfaces return the first
//! ( often enough ) list of entities. Alternatively for listings that require > 30 items you may wish to
//! use the `iter(..)` variant which returns a `futures::Stream` over all entities in a paginated set.
//!
//! # Errors
//!
//! Operations typically result in a `hubcaps::Future` with an error type pinned to
//! [hubcaps::Error](errors/struct.Error.html).
//!
//! ## Rate Limiting
//!
//! A special note should be taken when accounting for Github's
//! [API Rate Limiting](https://developer.github.com/v3/rate_limit/)
//! A special case
//! [hubcaps::ErrorKind::RateLimit](errors/enum.ErrorKind.html#variant.RateLimit)
//! will be returned from api operations when the rate limit
//! associated with credentials has been exhausted. This type will include a reset
//! Duration to wait before making future requests.
//!
//! This crate uses the `log` crate's debug log interface to log x-rate-limit
//! headers received from Github.
//! If you are attempting to test your access patterns against
//! Github's rate limits, enable debug looking and look for "x-rate-limit"
//! log patterns sourced from this crate
//!
#![allow(missing_docs)] // todo: make this a deny eventually
#[macro_use]
extern crate error_chain;
extern crate futures;
extern crate http;
extern crate hyper;
#[cfg(feature = "tls")]
extern crate hyper_tls;
extern crate hyperx;
#[macro_use]
extern crate log;
extern crate mime;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate url;
use std::fmt;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{future, stream, Future as StdFuture, IntoFuture, Stream as StdStream};
use hyper::client::connect::Connect;
use hyper::client::HttpConnector;
use hyper::header::{ACCEPT, AUTHORIZATION, LINK, LOCATION, USER_AGENT};
use hyper::{Body, Client, Method, Request, StatusCode, Uri};
#[cfg(feature = "tls")]
use hyper_tls::HttpsConnector;
use hyperx::header::{qitem, Link, RelationType};
use mime::Mime;
use serde::de::DeserializeOwned;
use url::Url;
#[macro_use]
mod macros; // expose json! macro to child modules
pub mod activity;
pub mod branches;
pub mod comments;
pub mod deployments;
pub mod errors;
pub mod gists;
pub mod git;
pub mod hooks;
pub mod issues;
pub mod keys;
pub mod labels;
pub mod notifications;
pub mod organizations;
pub mod pull_commits;
pub mod pulls;
pub mod rate_limit;
pub mod releases;
pub mod repositories;
pub mod review_comments;
pub mod search;
pub mod stars;
pub mod statuses;
pub mod teams;
pub mod traffic;
pub mod users;
pub use errors::{Error, ErrorKind, Result};
use activity::Activity;
use gists::{Gists, UserGists};
use organizations::{Organization, Organizations, UserOrganizations};
use rate_limit::RateLimit;
use repositories::{OrganizationRepositories, Repositories, Repository, UserRepositories};
use search::Search;
use users::Users;
const DEFAULT_HOST: &str = "https://api.github.com";
/// A type alias for `Futures` that may return `hubcaps::Errors`
pub type Future<T> = Box<StdFuture<Item = T, Error = Error> + Send>;
/// A type alias for `Streams` that may result in `hubcaps::Errors`
pub type Stream<T> = Box<StdStream<Item = T, Error = Error> + Send>;
const X_GITHUB_REQUEST_ID: &str = "x-github-request-id";
const X_RATELIMIT_LIMIT: &str = "x-ratelimit-limit";
const X_RATELIMIT_REMAINING: &str = "x-ratelimit-remaining";
const X_RATELIMIT_RESET: &str = "x-ratelimit-reset";
/// Github defined Media types
/// See [this doc](https://developer.github.com/v3/media/) for more for more information
#[derive(Clone, Copy)]
pub enum MediaType {
/// Return json (the default)
Json,
/// Return json in preview form
Preview(&'static str),
}
impl Default for MediaType {
fn default() -> MediaType {
MediaType::Json
}
}
impl From<MediaType> for Mime {
fn from(media: MediaType) -> Mime {
match media {
MediaType::Json => "application/vnd.github.v3+json".parse().unwrap(),
MediaType::Preview(codename) => {
format!("application/vnd.github.{}-preview+json", codename)
.parse()
.unwrap_or_else(|_| panic!("could not parse media type for preview {}", codename))
}
}
}
}
/// enum representation of Github list sorting options
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SortDirection {
/// Sort in ascending order (the default)
Asc,
/// Sort in descending order
Desc,
}
impl fmt::Display for SortDirection {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SortDirection::Asc => "asc",
SortDirection::Desc => "desc",
}.fmt(f)
}
}
impl Default for SortDirection {
fn default() -> SortDirection {
SortDirection::Asc
}
}
/// Various forms of authentication credentials supported by Github
#[derive(Debug, PartialEq, Clone)]
pub enum Credentials {
/// Oauth token string
/// https://developer.github.com/v3/#oauth2-token-sent-in-a-header
Token(String),
/// Oauth client id and secret
/// https://developer.github.com/v3/#oauth2-keysecret
Client(String, String),
}
/// Entry point interface for interacting with Github API
#[derive(Clone, Debug)]
pub struct Github<C>
where
C: Clone + Connect + 'static,
{
host: String,
agent: String,
client: Client<C>,
credentials: Option<Credentials>,
}
#[cfg(feature = "tls")]
impl Github<HttpsConnector<HttpConnector>> {
pub fn new<A, C>(agent: A, credentials: C) -> Self
where
A: Into<String>,
C: Into<Option<Credentials>>,
{
Self::host(DEFAULT_HOST, agent, credentials)
}
pub fn host<H, A, C>(host: H, agent: A, credentials: C) -> Self
where
H: Into<String>,
A: Into<String>,
C: Into<Option<Credentials>>,
{
let connector = HttpsConnector::new(4).unwrap();
let http = Client::builder()
.keep_alive(true)
.build(connector);
Self::custom(host, agent, credentials, http)
}
}
impl<C> Github<C>
where
C: Clone + Connect + 'static,
{
pub fn custom<H, A, CR>(host: H, agent: A, credentials: CR, http: Client<C>) -> Self
where
H: Into<String>,
A: Into<String>,
CR: Into<Option<Credentials>>,
{
Self {
host: host.into(),
agent: agent.into(),
client: http,
credentials: credentials.into(),
}
}
pub fn rate_limit(&self) -> RateLimit<C> {
RateLimit::new(self.clone())
}
/// Return a reference to user activity
pub fn activity(&self) -> Activity<C> {
Activity::new(self.clone())
}
/// Return a reference to a Github repository
pub fn repo<O, R>(&self, owner: O, repo: R) -> Repository<C>
where
O: Into<String>,
R: Into<String>,
{
Repository::new(self.clone(), owner, repo)
}
/// Return a reference to the collection of repositories owned by and
/// associated with an owner
pub fn user_repos<S>(&self, owner: S) -> UserRepositories<C>
where
S: Into<String>,
{
UserRepositories::new(self.clone(), owner)
}
/// Return a reference to the collection of repositories owned by the user
/// associated with the current authentication credentials
pub fn repos(&self) -> Repositories<C> {
Repositories::new(self.clone())
}
pub fn org<O>(&self, org: O) -> Organization<C>
where
O: Into<String>,
{
Organization::new(self.clone(), org)
}
/// Return a reference to the collection of organizations that the user
/// associated with the current authentication credentials is in
pub fn orgs(&self) -> Organizations<C> {
Organizations::new(self.clone())
}
/// Return a reference to an interface that provides access
/// to user information.
pub fn users(&self) -> Users<C> {
Users::new(self.clone())
}
/// Return a reference to the collection of organizations a user
/// is publicly associated with
pub fn user_orgs<U>(&self, user: U) -> UserOrganizations<C>
where
U: Into<String>,
{
UserOrganizations::new(self.clone(), user)
}
/// Return a reference to an interface that provides access to a user's gists
pub fn user_gists<O>(&self, owner: O) -> UserGists<C>
where
O: Into<String>,
{
UserGists::new(self.clone(), owner)
}
/// Return a reference to an interface that provides access to the
/// gists belonging to the owner of the token used to configure this client
pub fn gists(&self) -> Gists<C> {
Gists::new(self.clone())
}
/// Return a reference to an interface that provides access to search operations
pub fn search(&self) -> Search<C> {
Search::new(self.clone())
}
/// Return a reference to the collection of repositories owned by and
/// associated with an organization
pub fn org_repos<O>(&self, org: O) -> OrganizationRepositories<C>
where
O: Into<String>,
{
OrganizationRepositories::new(self.clone(), org)
}
fn request<Out>(
&self,
method: Method,
uri: &str,
body: Option<Vec<u8>>,
media_type: MediaType,
) -> Future<(Option<Link>, Out)>
where
Out: DeserializeOwned + 'static + Send,
{
let url = if let Some(Credentials::Client(ref id, ref secret)) = self.credentials {
let mut parsed = Url::parse(&uri).unwrap();
parsed
.query_pairs_mut()
.append_pair("client_id", id)
.append_pair("client_secret", secret);
parsed.to_string().parse::<Uri>().into_future()
} else {
uri.parse().into_future()
};
let instance = self.clone();
let body2 = body.clone();
let method2 = method.clone();
let response = url.map_err(Error::from).and_then(move |url| {
let mut req = Request::builder();
req.method(method2).uri(url);
req.header(USER_AGENT, &*instance.agent);
req.header(ACCEPT, &*format!("{}", qitem::<Mime>(From::from(media_type))));
if let Some(Credentials::Token(ref token)) = instance.credentials {
req.header(AUTHORIZATION, &*format!("token {}", token));
}
let req = match body2 {
Some(body) => req.body(Body::from(body)),
None => req.body(Body::empty()),
};
req.map_err(Error::from)
.into_future()
.and_then(move |req| instance.client.request(req).map_err(Error::from))
});
let instance2 = self.clone();
Box::new(response.and_then(move |response| {
if let Some(value) = response.headers().get(X_GITHUB_REQUEST_ID) {
debug!("x-github-request-id: {:?}", value)
}
if let Some(value) = response.headers().get(X_RATELIMIT_LIMIT) {
debug!("x-rate-limit-limit: {:?}", value)
}
let remaining = response
.headers()
.get(X_RATELIMIT_REMAINING)
.and_then(|val| val.to_str().ok())
.and_then(|val| val.parse::<u32>().ok());
let reset = response
.headers()
.get(X_RATELIMIT_RESET)
.and_then(|val| val.to_str().ok())
.and_then(|val| val.parse::<u32>().ok());
if let Some(value) = remaining {
debug!("x-rate-limit-remaining: {}", value)
}
if let Some(value) = reset {
debug!("x-rate-limit-reset: {}", value)
}
let status = response.status();
// handle redirect common with renamed repos
if StatusCode::MOVED_PERMANENTLY == status || StatusCode::TEMPORARY_REDIRECT == status {
let location = response.headers().get(LOCATION)
.and_then(|l| l.to_str().ok());
if let Some(location) = location {
debug!("redirect location {:?}", location);
return instance2.request(method, &location.to_string(), body, media_type);
}
}
let link = response
.headers()
.get(LINK)
.and_then(|l| l.to_str().ok())
.and_then(|l| l.parse().ok());
Box::new(response.into_body().concat2().map_err(Error::from).and_then(
move |response_body| {
if status.is_success() {
debug!(
"response payload {}",
String::from_utf8_lossy(&response_body)
);
serde_json::from_slice::<Out>(&response_body)
.map(|out| (link, out))
.map_err(|error| ErrorKind::Codec(error).into())
} else {
let error = match (remaining, reset) {
(Some(remaining), Some(reset)) if remaining == 0 => {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
ErrorKind::RateLimit {
reset: Duration::from_secs(u64::from(reset) - now),
}
}
_ => ErrorKind::Fault {
code: status,
error: serde_json::from_slice(&response_body)?,
},
};
Err(error.into())
}
},
))
}))
}
fn request_entity<D>(
&self,
method: Method,
uri: &str,
body: Option<Vec<u8>>,
media_type: MediaType,
) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
Box::new(
self.request(method, uri, body, media_type)
.map(|(_, entity)| entity),
)
}
fn get<D>(&self, uri: &str) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
self.get_media(uri, MediaType::Json)
}
fn get_media<D>(&self, uri: &str, media: MediaType) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
self.request_entity(Method::GET, &(self.host.clone() + uri), None, media)
}
fn get_pages<D>(&self, uri: &str) -> Future<(Option<Link>, D)>
where
D: DeserializeOwned + 'static + Send,
{
self.request(Method::GET, &(self.host.clone() + uri), None, MediaType::Json)
}
fn delete(&self, uri: &str) -> Future<()> {
Box::new(self.request_entity::<()>(
Method::DELETE,
&(self.host.clone() + uri),
None,
MediaType::Json,
).or_else(|err| match err {
Error(ErrorKind::Codec(_), _) => Ok(()),
otherwise => Err(otherwise),
}))
}
fn post<D>(&self, uri: &str, message: Vec<u8>) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
self.request_entity(
Method::POST,
&(self.host.clone() + uri),
Some(message),
MediaType::Json,
)
}
fn patch_no_response(&self, uri: &str, message: Vec<u8>) -> Future<()> {
Box::new(self.patch(uri, message).or_else(|err| match err {
Error(ErrorKind::Codec(_), _) => Ok(()),
err => Err(err),
}))
}
fn patch_media<D>(&self, uri: &str, message: Vec<u8>, media: MediaType) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
self.request_entity(Method::PATCH, &(self.host.clone() + uri), Some(message), media)
}
fn patch<D>(&self, uri: &str, message: Vec<u8>) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
self.patch_media(uri, message, MediaType::Json)
}
fn put_no_response(&self, uri: &str, message: Vec<u8>) -> Future<()> {
Box::new(self.put(uri, message).or_else(|err| match err {
Error(ErrorKind::Codec(_), _) => Ok(()),
err => Err(err),
}))
}
fn put<D>(&self, uri: &str, message: Vec<u8>) -> Future<D>
where
D: DeserializeOwned + 'static + Send,
{
self.request_entity(
Method::PUT,
&(self.host.clone() + uri),
Some(message),
MediaType::Json,
)
}
}
fn next_link(l: &Link) -> Option<String> {
l.values()
.into_iter()
.find(|v| v.rel().unwrap_or(&[]).get(0) == Some(&RelationType::Next))
.map(|v| v.link().to_owned())
}
/// "unfold" paginated results of a list of github entities
fn unfold<C, D, I>(
github: Github<C>,
first: Future<(Option<Link>, D)>,
into_items: fn(D) -> Vec<I>,
) -> Stream<I>
where
D: DeserializeOwned + 'static + Send,
I: 'static + Send,
C: Clone + Connect + 'static,
{
Box::new(
first
.map(move |(link, payload)| {
let mut items = into_items(payload);
items.reverse();
stream::unfold::<_, _, Future<(I, (Option<Link>, Vec<I>))>, _>(
(link, items),
move |(link, mut items)| match items.pop() {
Some(item) => Some(Box::new(future::ok((item, (link, items))))),
_ => link.and_then(|l| next_link(&l)).map(|url| {
let url = Url::parse(&url).unwrap();
let uri = [url.path(), url.query().unwrap_or_default()].join("?");
Box::new(github.get_pages(uri.as_ref()).map(move |(link, payload)| {
let mut items = into_items(payload);
items.reverse();
(items.remove(0), (link, items))
})) as Future<(I, (Option<Link>, Vec<I>))>
}),
},
)
})
.into_stream()
.flatten(),
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_sort_direction() {
let default: SortDirection = Default::default();
assert_eq!(default, SortDirection::Asc)
}
}
| 32.206022 | 111 | 0.565102 |
115262733faf0f65c798e68e0bbd7ff22a986566
| 4,630 |
use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, MulAssign, Div, Neg};
use std::cmp::{Ord, Eq, PartialEq, PartialOrd, Ordering};
use std::marker::PhantomData;
use std::fmt;
use std::iter::Sum;
#[derive(Debug)]
pub struct Length<U> {
value: f64,
_m: PhantomData<U>
}
impl<U> Length<U> {
pub fn zero() -> Self {
Length { value: 0.0, _m: PhantomData }
}
pub fn is_zero(&self) -> bool {
self.value == 0.0
}
pub fn new(value: impl Into<f64>, unit: U) -> Self {
Length { value: value.into(), _m: PhantomData }
}
}
impl<U> Clone for Length<U> {
fn clone(&self) -> Self {
Length { value: self.value, _m: PhantomData }
}
}
impl<U> Copy for Length<U> {}
impl<U> Div<U> for Length<U> {
type Output = f64;
fn div(self, rhs: U) -> f64 {
self.value
}
}
impl<U> Default for Length<U> {
fn default() -> Self {
Length { value: 0.0, _m: PhantomData }
}
}
impl<U> PartialEq for Length<U> {
fn eq(&self, other: &Self) -> bool {
self.value.eq(&other.value)
}
}
impl<U> PartialOrd for Length<U> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<U> Eq for Length<U> {}
impl<U> Ord for Length<U> {
fn cmp(&self, rhs: &Self) -> Ordering {
self.value.partial_cmp(&rhs.value).unwrap()
}
}
impl<U> Add for Length<U> {
type Output = Length<U>;
fn add(self, rhs: Length<U>) -> Length<U> {
Length { value: self.value + rhs.value, _m: PhantomData }
}
}
impl<U> Sub for Length<U> {
type Output = Length<U>;
fn sub(self, rhs: Length<U>) -> Length<U> {
Length { value: self.value - rhs.value, _m: PhantomData }
}
}
impl<U, T: Into<f64>> Mul<T> for Length<U> {
type Output = Length<U>;
fn mul(self, rhs: T) -> Length<U> {
Length { value: self.value * rhs.into(), _m: PhantomData }
}
}
impl<U> AddAssign for Length<U> {
fn add_assign(&mut self, rhs: Length<U>) {
self.value += rhs.value;
}
}
impl<U> SubAssign for Length<U> {
fn sub_assign(&mut self, rhs: Length<U>) {
self.value -= rhs.value;
}
}
impl<U> Neg for Length<U> {
type Output = Self;
fn neg(self) -> Self {
Length { value: -self.value, _m: PhantomData }
}
}
impl<U> Sum for Length<U> {
fn sum<I>(iter: I) -> Self where I: Iterator<Item = Self> {
Length { value: iter.map(|l| l.value).sum(), _m: PhantomData }
}
}
pub struct Font;
pub struct Px;
pub struct Em;
macro_rules! impl_length {
($($unit:ty),*) => {
$(
impl fmt::Debug for Length<$unit> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, concat!("Length {{ value: {:?}, unit: ", stringify!($unit), " }}"), self.value)
}
}
impl fmt::Display for Length<$unit> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, concat!("{} ", stringify!($unit)), self.value)
}
}
)*
};
}
impl_length!(Font, Em, Px);
/// scale * T/U
pub struct Scale<T, U> {
pub factor: f64,
_t: PhantomData<T>,
_u: PhantomData<U>,
}
impl<T, U> Scale<T, U> {
pub fn new(factor: f64, _t: T, _u: U) -> Self {
Scale { factor, _t: PhantomData, _u: PhantomData }
}
pub fn inv(self) -> Scale<U, T> {
Scale { factor: 1.0 / self.factor, _t: PhantomData, _u: PhantomData }
}
}
impl<T, U> Clone for Scale<T, U> {
fn clone(&self) -> Self {
Scale { factor: self.factor, _t: PhantomData, _u: PhantomData }
}
}
impl<T, U> Copy for Scale<T, U> {}
impl<T, U> Mul<Scale<T, U>> for Length<U> {
type Output = Length<T>;
fn mul(self, rhs: Scale<T, U>) -> Length<T> {
Length { value: self.value * rhs.factor, _m: PhantomData }
}
}
impl<T, U> Div<Scale<T, U>> for Length<T> {
type Output = Length<U>;
fn div(self, rhs: Scale<T, U>) -> Length<U> {
Length { value: self.value / rhs.factor, _m: PhantomData }
}
}
impl<T, U, V> Mul<Scale<U, V>> for Scale<T, U> {
type Output = Scale<T, V>;
fn mul(self, rhs: Scale<U, V>) -> Scale<T, V> {
Scale { factor: self.factor * rhs.factor, _t: PhantomData, _u: PhantomData }
}
}
impl<T, U, V> Div<Scale<V, U>> for Scale<T, U> {
type Output = Scale<T, V>;
fn div(self, rhs: Scale<V, U>) -> Scale<T, V> {
Scale { factor: self.factor / rhs.factor, _t: PhantomData, _u: PhantomData }
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Unit {
Em(f64),
Px(f64)
}
| 26.457143 | 109 | 0.547516 |
91f357815aa0b5e50eccd9e46fdec0dd5ff4d64e
| 1,312 |
// Copyright Materialize, Inc. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::collections::HashMap;
use anyhow::bail;
use hyper::{header, Body, Request, Response, StatusCode};
use url::form_urlencoded;
use crate::http::util;
pub async fn handle_sql(
req: Request<Body>,
coord_client: &mut coord::SessionClient,
) -> Result<Response<Body>, anyhow::Error> {
let res = async {
let body = hyper::body::to_bytes(req).await?;
let body: HashMap<_, _> = form_urlencoded::parse(&body).collect();
let sql = match body.get("sql") {
Some(sql) => sql,
None => bail!("expected `sql` parameter"),
};
let res = coord_client.simple_execute(sql).await?;
Ok(Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from(serde_json::to_string(&res)?))
.unwrap())
}
.await;
match res {
Ok(res) => Ok(res),
Err(e) => Ok(util::error_response(StatusCode::BAD_REQUEST, e.to_string())),
}
}
| 32 | 83 | 0.630335 |
8add8759c082643794958a00fb7d48f80daf361b
| 5,791 |
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use super::dispatch_json::{Deserialize, JsonOp, Value};
use crate::colors;
use crate::fs as deno_fs;
use crate::ops::json_op;
use crate::state::ThreadSafeState;
use crate::version;
use atty;
use deno_core::*;
use std::collections::HashMap;
use std::env;
use std::io::{Error, ErrorKind};
use sys_info;
use url::Url;
/// BUILD_OS and BUILD_ARCH match the values in Deno.build. See js/build.ts.
#[cfg(target_os = "macos")]
static BUILD_OS: &str = "mac";
#[cfg(target_os = "linux")]
static BUILD_OS: &str = "linux";
#[cfg(target_os = "windows")]
static BUILD_OS: &str = "win";
#[cfg(target_arch = "x86_64")]
static BUILD_ARCH: &str = "x64";
pub fn init(i: &mut Isolate, s: &ThreadSafeState) {
i.register_op("exit", s.core_op(json_op(s.stateful_op(op_exit))));
i.register_op("is_tty", s.core_op(json_op(s.stateful_op(op_is_tty))));
i.register_op("env", s.core_op(json_op(s.stateful_op(op_env))));
i.register_op("exec_path", s.core_op(json_op(s.stateful_op(op_exec_path))));
i.register_op("set_env", s.core_op(json_op(s.stateful_op(op_set_env))));
i.register_op("get_env", s.core_op(json_op(s.stateful_op(op_get_env))));
i.register_op("get_dir", s.core_op(json_op(s.stateful_op(op_get_dir))));
i.register_op("hostname", s.core_op(json_op(s.stateful_op(op_hostname))));
i.register_op("start", s.core_op(json_op(s.stateful_op(op_start))));
}
fn op_start(
state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
let gs = &state.global_state;
let script_args = if gs.flags.argv.len() >= 2 {
gs.flags.argv.clone().split_off(2)
} else {
vec![]
};
Ok(JsonOp::Sync(json!({
"cwd": deno_fs::normalize_path(&env::current_dir().unwrap()),
"pid": std::process::id(),
"argv": script_args,
"mainModule": gs.main_module.as_ref().map(|x| x.to_string()),
"debugFlag": gs.flags.log_level.map_or(false, |l| l == log::Level::Debug),
"versionFlag": gs.flags.version,
"v8Version": version::v8(),
"denoVersion": version::DENO,
"tsVersion": version::TYPESCRIPT,
"noColor": !colors::use_color(),
"os": BUILD_OS,
"arch": BUILD_ARCH,
})))
}
#[derive(Deserialize)]
struct GetDirArgs {
kind: std::string::String,
}
fn op_get_dir(
state: &ThreadSafeState,
args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
state.check_env()?;
let args: GetDirArgs = serde_json::from_value(args)?;
let path = match args.kind.as_str() {
"home" => dirs::home_dir(),
"config" => dirs::config_dir(),
"cache" => dirs::cache_dir(),
"executable" => dirs::executable_dir(),
"data" => dirs::data_dir(),
"data_local" => dirs::data_local_dir(),
"audio" => dirs::audio_dir(),
"desktop" => dirs::desktop_dir(),
"document" => dirs::document_dir(),
"download" => dirs::download_dir(),
"font" => dirs::font_dir(),
"picture" => dirs::picture_dir(),
"public" => dirs::public_dir(),
"template" => dirs::template_dir(),
"video" => dirs::video_dir(),
_ => {
return Err(ErrBox::from(Error::new(
ErrorKind::InvalidInput,
format!("Invalid dir type `{}`", args.kind.as_str()),
)))
}
};
if path == None {
Err(ErrBox::from(Error::new(
ErrorKind::NotFound,
format!("Could not get user {} directory.", args.kind.as_str()),
)))
} else {
Ok(JsonOp::Sync(json!(path
.unwrap_or_default()
.into_os_string()
.into_string()
.unwrap_or_default())))
}
}
fn op_exec_path(
state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
state.check_env()?;
let current_exe = env::current_exe().unwrap();
// Now apply URL parser to current exe to get fully resolved path, otherwise
// we might get `./` and `../` bits in `exec_path`
let exe_url = Url::from_file_path(current_exe).unwrap();
let path = exe_url.to_file_path().unwrap();
Ok(JsonOp::Sync(json!(path)))
}
#[derive(Deserialize)]
struct SetEnv {
key: String,
value: String,
}
fn op_set_env(
state: &ThreadSafeState,
args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
let args: SetEnv = serde_json::from_value(args)?;
state.check_env()?;
env::set_var(args.key, args.value);
Ok(JsonOp::Sync(json!({})))
}
fn op_env(
state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
state.check_env()?;
let v = env::vars().collect::<HashMap<String, String>>();
Ok(JsonOp::Sync(json!(v)))
}
#[derive(Deserialize)]
struct GetEnv {
key: String,
}
fn op_get_env(
state: &ThreadSafeState,
args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
let args: GetEnv = serde_json::from_value(args)?;
state.check_env()?;
let r = match env::var(args.key) {
Err(env::VarError::NotPresent) => json!([]),
v => json!([v?]),
};
Ok(JsonOp::Sync(r))
}
#[derive(Deserialize)]
struct Exit {
code: i32,
}
fn op_exit(
_s: &ThreadSafeState,
args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
let args: Exit = serde_json::from_value(args)?;
std::process::exit(args.code)
}
fn op_is_tty(
_s: &ThreadSafeState,
_args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
Ok(JsonOp::Sync(json!({
"stdin": atty::is(atty::Stream::Stdin),
"stdout": atty::is(atty::Stream::Stdout),
"stderr": atty::is(atty::Stream::Stderr),
})))
}
fn op_hostname(
state: &ThreadSafeState,
_args: Value,
_zero_copy: Option<PinnedBuf>,
) -> Result<JsonOp, ErrBox> {
state.check_env()?;
let hostname = sys_info::hostname().unwrap_or_else(|_| "".to_owned());
Ok(JsonOp::Sync(json!(hostname)))
}
| 27.445498 | 78 | 0.650492 |
db71e235df51aad2bf08011bf27354b06fe008fc
| 40,362 |
//! Postgres schema description.
use super::*;
use crate::{getters::Getter, parsers::Parser};
use enumflags2::BitFlags;
use indoc::indoc;
use native_types::{NativeType, PostgresType};
use quaint::{connector::ResultRow, prelude::Queryable, single::Quaint};
use regex::Regex;
use serde_json::from_str;
use std::{borrow::Cow, collections::HashMap, convert::TryInto};
use tracing::trace;
#[enumflags2::bitflags]
#[derive(Clone, Copy, Debug)]
#[repr(u8)]
pub enum Circumstances {
Cockroach,
}
#[derive(Debug)]
pub struct SqlSchemaDescriber {
conn: Quaint,
circumstances: BitFlags<Circumstances>,
}
#[async_trait::async_trait]
impl super::SqlSchemaDescriberBackend for SqlSchemaDescriber {
async fn list_databases(&self) -> DescriberResult<Vec<String>> {
Ok(self.get_databases().await?)
}
async fn get_metadata(&self, schema: &str) -> DescriberResult<SqlMetadata> {
let table_count = self.get_table_names(&schema).await?.len();
let size_in_bytes = self.get_size(&schema).await?;
Ok(SqlMetadata {
table_count,
size_in_bytes,
})
}
async fn describe(&self, schema: &str) -> DescriberResult<SqlSchema> {
let sequences = self.get_sequences(schema).await?;
let enums = self.get_enums(schema).await?;
let mut columns = self.get_columns(schema, &enums, &sequences).await?;
let mut foreign_keys = self.get_foreign_keys(schema).await?;
let mut indexes = self.get_indices(schema, &sequences).await?;
let table_names = self.get_table_names(schema).await?;
let mut tables = Vec::with_capacity(table_names.len());
for table_name in &table_names {
tables.push(self.get_table(&table_name, &mut columns, &mut foreign_keys, &mut indexes));
}
let views = self.get_views(schema).await?;
let procedures = self.get_procedures(schema).await?;
Ok(SqlSchema {
enums,
sequences,
tables,
views,
procedures,
user_defined_types: vec![],
})
}
#[tracing::instrument]
async fn version(&self, _schema: &str) -> crate::DescriberResult<Option<String>> {
Ok(self.conn.version().await?)
}
}
static PG_RE_NUM: Lazy<Regex> = Lazy::new(|| Regex::new(r"^'?(-?\d+)('::.*)?$").expect("compile regex"));
static PG_RE_FLOAT: Lazy<Regex> = Lazy::new(|| Regex::new(r"^'?([^']+)('::.*)?$").expect("compile regex"));
impl Parser for SqlSchemaDescriber {
fn re_num() -> &'static Regex {
&PG_RE_NUM
}
fn re_float() -> &'static Regex {
&PG_RE_FLOAT
}
}
impl SqlSchemaDescriber {
/// Constructor.
pub fn new(conn: Quaint, circumstances: BitFlags<Circumstances>) -> SqlSchemaDescriber {
SqlSchemaDescriber { conn, circumstances }
}
fn is_cockroach(&self) -> bool {
self.circumstances.contains(Circumstances::Cockroach)
}
#[tracing::instrument]
async fn get_databases(&self) -> DescriberResult<Vec<String>> {
let sql = "select schema_name from information_schema.schemata;";
let rows = self.conn.query_raw(sql, &[]).await?;
let names = rows
.into_iter()
.map(|row| row.get_expect_string("schema_name"))
.collect();
trace!("Found schema names: {:?}", names);
Ok(names)
}
#[tracing::instrument]
async fn get_procedures(&self, schema: &str) -> DescriberResult<Vec<Procedure>> {
if self.is_cockroach() {
return Ok(Vec::new());
}
let sql = r#"
SELECT p.proname AS name,
CASE WHEN l.lanname = 'internal' THEN p.prosrc
ELSE pg_get_functiondef(p.oid)
END as definition
FROM pg_proc p
LEFT JOIN pg_namespace n ON p.pronamespace = n.oid
LEFT JOIN pg_language l ON p.prolang = l.oid
WHERE n.nspname = $1
"#;
let rows = self.conn.query_raw(sql, &[schema.into()]).await?;
let mut procedures = Vec::with_capacity(rows.len());
for row in rows.into_iter() {
procedures.push(Procedure {
name: row.get_expect_string("name"),
definition: row.get_string("definition"),
});
}
Ok(procedures)
}
#[tracing::instrument]
async fn get_table_names(&self, schema: &str) -> DescriberResult<Vec<String>> {
let sql = "
SELECT table_name as table_name FROM information_schema.tables
WHERE table_schema = $1
-- Views are not supported yet
AND table_type = 'BASE TABLE'
ORDER BY table_name";
let rows = self.conn.query_raw(sql, &[schema.into()]).await?;
let names = rows
.into_iter()
.map(|row| row.get_expect_string("table_name"))
.collect();
trace!("Found table names: {:?}", names);
Ok(names)
}
#[tracing::instrument]
async fn get_size(&self, schema: &str) -> DescriberResult<usize> {
if self.circumstances.contains(Circumstances::Cockroach) {
return Ok(0); // TODO
}
let sql =
"SELECT SUM(pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(tablename)))::BIGINT as size
FROM pg_tables
WHERE schemaname = $1::text";
let mut result_iter = self.conn.query_raw(sql, &[schema.into()]).await?.into_iter();
let size: i64 = result_iter.next().and_then(|row| row.get_i64("size")).unwrap_or(0);
trace!("Found db size: {:?}", size);
Ok(size.try_into().expect("size is not a valid usize"))
}
#[tracing::instrument(skip(columns, foreign_keys, indices))]
fn get_table(
&self,
name: &str,
columns: &mut HashMap<String, Vec<Column>>,
foreign_keys: &mut HashMap<String, Vec<ForeignKey>>,
indices: &mut HashMap<String, (Vec<Index>, Option<PrimaryKey>)>,
) -> Table {
let (indices, primary_key) = indices.remove(name).unwrap_or_else(|| (Vec::new(), None));
let foreign_keys = foreign_keys.remove(name).unwrap_or_else(Vec::new);
let columns = columns.remove(name).unwrap_or_default();
Table {
name: name.to_string(),
columns,
foreign_keys,
indices,
primary_key,
}
}
#[tracing::instrument]
async fn get_views(&self, schema: &str) -> DescriberResult<Vec<View>> {
let sql = indoc! {r#"
SELECT viewname AS view_name, definition AS view_sql
FROM pg_catalog.pg_views
WHERE schemaname = $1
"#};
let result_set = self.conn.query_raw(sql, &[schema.into()]).await?;
let mut views = Vec::with_capacity(result_set.len());
for row in result_set.into_iter() {
views.push(View {
name: row.get_expect_string("view_name"),
definition: row.get_string("view_sql"),
})
}
Ok(views)
}
async fn get_columns(
&self,
schema: &str,
enums: &[Enum],
sequences: &[Sequence],
) -> DescriberResult<HashMap<String, Vec<Column>>> {
let mut columns: HashMap<String, Vec<Column>> = HashMap::new();
let sql = r#"
SELECT
info.table_name,
info.column_name,
format_type(att.atttypid, att.atttypmod) as formatted_type,
info.numeric_precision,
info.numeric_scale,
info.numeric_precision_radix,
info.datetime_precision,
info.data_type,
info.udt_name as full_data_type,
info.column_default,
info.is_nullable,
info.is_identity,
info.data_type,
info.character_maximum_length
FROM information_schema.columns info
JOIN pg_attribute att on att.attname = info.column_name
And att.attrelid = (
SELECT pg_class.oid
FROM pg_class
JOIN pg_namespace on pg_namespace.oid = pg_class.relnamespace
WHERE relname = info.table_name
AND pg_namespace.nspname = $1
)
WHERE table_schema = $1
ORDER BY ordinal_position;
"#;
let rows = self.conn.query_raw(&sql, &[schema.into()]).await?;
for col in rows {
trace!("Got column: {:?}", col);
let table_name = col.get_expect_string("table_name");
let name = col.get_expect_string("column_name");
let is_identity = match col.get_string("is_identity") {
Some(is_id) if is_id.eq_ignore_ascii_case("yes") => true,
Some(is_id) if is_id.eq_ignore_ascii_case("no") => false,
Some(is_identity_str) => panic!("unrecognized is_identity variant '{}'", is_identity_str),
None => false,
};
let data_type = col.get_expect_string("data_type");
let tpe = get_column_type(&col, enums);
let default = Self::get_default_value(&col, &data_type, &tpe, sequences, schema);
let auto_increment = is_identity
|| matches!(default.as_ref().map(|d| d.kind()), Some(DefaultKind::Sequence(_)))
|| (self.is_cockroach()
&& matches!(
default.as_ref().map(|d| d.kind()),
Some(DefaultKind::DbGenerated(s)) if s == "unique_rowid()"
));
let col = Column {
name,
tpe,
default,
auto_increment,
};
columns.entry(table_name).or_default().push(col);
}
trace!("Found table columns: {:?}", columns);
Ok(columns)
}
fn get_precision(col: &ResultRow) -> Precision {
let (character_maximum_length, numeric_precision, numeric_scale, time_precision) =
if matches!(col.get_expect_string("data_type").as_str(), "ARRAY") {
fn get_single(formatted_type: &str) -> Option<u32> {
static SINGLE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r#".*\(([0-9]*)\).*\[\]$"#).unwrap());
SINGLE_REGEX
.captures(formatted_type)
.and_then(|cap| cap.get(1).map(|precision| from_str::<u32>(precision.as_str()).unwrap()))
}
fn get_dual(formatted_type: &str) -> (Option<u32>, Option<u32>) {
static DUAL_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"numeric\(([0-9]*),([0-9]*)\)\[\]$"#).unwrap());
let first = DUAL_REGEX
.captures(formatted_type)
.and_then(|cap| cap.get(1).map(|precision| from_str::<u32>(precision.as_str()).unwrap()));
let second = DUAL_REGEX
.captures(formatted_type)
.and_then(|cap| cap.get(2).map(|precision| from_str::<u32>(precision.as_str()).unwrap()));
(first, second)
}
let formatted_type = col.get_expect_string("formatted_type");
let fdt = col.get_expect_string("full_data_type");
let char_max_length = match fdt.as_str() {
"_bpchar" | "_varchar" | "_bit" | "_varbit" => get_single(&formatted_type),
_ => None,
};
let (num_precision, num_scale) = match fdt.as_str() {
"_numeric" => get_dual(&formatted_type),
_ => (None, None),
};
let time = match fdt.as_str() {
"_timestamptz" | "_timestamp" | "_timetz" | "_time" | "_interval" => get_single(&formatted_type),
_ => None,
};
(char_max_length, num_precision, num_scale, time)
} else {
(
col.get_u32("character_maximum_length"),
col.get_u32("numeric_precision"),
col.get_u32("numeric_scale"),
col.get_u32("datetime_precision"),
)
};
Precision {
character_maximum_length,
numeric_precision,
numeric_scale,
time_precision,
}
}
/// Returns a map from table name to foreign keys.
async fn get_foreign_keys(&self, schema: &str) -> DescriberResult<HashMap<String, Vec<ForeignKey>>> {
// The `generate_subscripts` in the inner select is needed because the optimizer is free to reorganize the unnested rows if not explicitly ordered.
let sql = r#"
SELECT con.oid as "con_id",
att2.attname as "child_column",
cl.relname as "parent_table",
att.attname as "parent_column",
con.confdeltype,
con.confupdtype,
rel_ns.nspname as "referenced_schema_name",
conname as constraint_name,
child,
parent,
table_name
FROM (SELECT unnest(con1.conkey) as "parent",
unnest(con1.confkey) as "child",
cl.relname AS table_name,
ns.nspname AS schema_name,
generate_subscripts(con1.conkey, 1) AS colidx,
con1.oid,
con1.confrelid,
con1.conrelid,
con1.conname,
con1.confdeltype,
con1.confupdtype
FROM pg_class cl
join pg_constraint con1 on con1.conrelid = cl.oid
join pg_namespace ns on cl.relnamespace = ns.oid
WHERE
ns.nspname = $1
and con1.contype = 'f'
ORDER BY colidx
) con
JOIN pg_attribute att on att.attrelid = con.confrelid and att.attnum = con.child
JOIN pg_class cl on cl.oid = con.confrelid
JOIN pg_attribute att2 on att2.attrelid = con.conrelid and att2.attnum = con.parent
JOIN pg_class rel_cl on con.confrelid = rel_cl.oid
JOIN pg_namespace rel_ns on rel_cl.relnamespace = rel_ns.oid
ORDER BY con_id, con.colidx;
"#;
// One foreign key with multiple columns will be represented here as several
// rows with the same ID, which we will have to combine into corresponding foreign key
// objects.
let result_set = self.conn.query_raw(&sql, &[schema.into()]).await?;
let mut intermediate_fks: HashMap<i64, (String, ForeignKey)> = HashMap::new();
for row in result_set.into_iter() {
trace!("Got description FK row {:?}", row);
let id = row.get_expect_i64("con_id");
let column = row.get_expect_string("child_column");
let referenced_table = row.get_expect_string("parent_table");
let referenced_column = row.get_expect_string("parent_column");
let table_name = row.get_expect_string("table_name");
let confdeltype = row
.get_char("confdeltype")
.unwrap_or_else(|| row.get_expect_string("confdeltype").chars().next().unwrap());
let confupdtype = row
.get_char("confupdtype")
.unwrap_or_else(|| row.get_expect_string("confupdtype").chars().next().unwrap());
let constraint_name = row.get_expect_string("constraint_name");
let referenced_schema_name = row.get_expect_string("referenced_schema_name");
if schema != referenced_schema_name {
return Err(DescriberError::from(DescriberErrorKind::CrossSchemaReference {
from: format!("{}.{}", schema, table_name),
to: format!("{}.{}", referenced_schema_name, referenced_table),
constraint: constraint_name,
}));
}
let on_delete_action = match confdeltype {
'a' => ForeignKeyAction::NoAction,
'r' => ForeignKeyAction::Restrict,
'c' => ForeignKeyAction::Cascade,
'n' => ForeignKeyAction::SetNull,
'd' => ForeignKeyAction::SetDefault,
_ => panic!("unrecognized foreign key action (on delete) '{}'", confdeltype),
};
let on_update_action = match confupdtype {
'a' => ForeignKeyAction::NoAction,
'r' => ForeignKeyAction::Restrict,
'c' => ForeignKeyAction::Cascade,
'n' => ForeignKeyAction::SetNull,
'd' => ForeignKeyAction::SetDefault,
_ => panic!("unrecognized foreign key action (on update) '{}'", confupdtype),
};
match intermediate_fks.get_mut(&id) {
Some((_, fk)) => {
fk.columns.push(column);
fk.referenced_columns.push(referenced_column);
}
None => {
let fk = ForeignKey {
constraint_name: Some(constraint_name),
columns: vec![column],
referenced_table,
referenced_columns: vec![referenced_column],
on_delete_action,
on_update_action,
};
intermediate_fks.insert(id, (table_name, fk));
}
};
}
let mut fks = HashMap::new();
for (table_name, fk) in intermediate_fks.into_iter().map(|(_k, v)| v) {
let entry = fks.entry(table_name).or_insert_with(Vec::new);
trace!(
"Found foreign key - column(s): {:?}, to table: '{}', to column(s): {:?}",
fk.columns,
fk.referenced_table,
fk.referenced_columns
);
entry.push(fk);
}
for fks in fks.values_mut() {
fks.sort_unstable_by_key(|fk| fk.columns.clone());
}
Ok(fks)
}
/// Returns a map from table name to indexes and (optional) primary key.
async fn get_indices(
&self,
schema: &str,
sequences: &[Sequence],
) -> DescriberResult<HashMap<String, (Vec<Index>, Option<PrimaryKey>)>> {
let mut indexes_map = HashMap::new();
let sql = r#"
SELECT
indexInfos.relname as name,
columnInfos.attname AS column_name,
rawIndex.indisunique AS is_unique,
rawIndex.indisprimary AS is_primary_key,
tableInfos.relname AS table_name,
rawIndex.indkeyidx,
pg_get_serial_sequence('"' || $1 || '"."' || tableInfos.relname || '"', columnInfos.attname) AS sequence_name
FROM
-- pg_class stores infos about tables, indices etc: https://www.postgresql.org/docs/current/catalog-pg-class.html
pg_class tableInfos,
pg_class indexInfos,
-- pg_index stores indices: https://www.postgresql.org/docs/current/catalog-pg-index.html
(
SELECT
indrelid,
indexrelid,
indisunique,
indisprimary,
pg_index.indkey AS indkey,
generate_subscripts(pg_index.indkey, 1) AS indkeyidx
FROM pg_index
-- ignores partial indexes
Where indpred is Null
GROUP BY indrelid, indexrelid, indisunique, indisprimary, indkeyidx, indkey
ORDER BY indrelid, indexrelid, indkeyidx
) rawIndex,
-- pg_attribute stores infos about columns: https://www.postgresql.org/docs/current/catalog-pg-attribute.html
pg_attribute columnInfos,
-- pg_namespace stores info about the schema
pg_namespace schemaInfo
WHERE
-- find table info for index
tableInfos.oid = rawIndex.indrelid
-- find index info
AND indexInfos.oid = rawIndex.indexrelid
-- find table columns
AND columnInfos.attrelid = tableInfos.oid
AND columnInfos.attnum = rawIndex.indkey[rawIndex.indkeyidx]
-- we only consider ordinary tables
AND tableInfos.relkind = 'r'
-- we only consider stuff out of one specific schema
AND tableInfos.relnamespace = schemaInfo.oid
AND schemaInfo.nspname = $1
GROUP BY tableInfos.relname, indexInfos.relname, rawIndex.indisunique, rawIndex.indisprimary, columnInfos.attname, rawIndex.indkeyidx
ORDER BY rawIndex.indkeyidx
"#;
let rows = self.conn.query_raw(&sql, &[schema.into()]).await?;
for row in rows {
trace!("Got index: {:?}", row);
let name = row.get_expect_string("name");
let column_name = row.get_expect_string("column_name");
let is_unique = row.get_expect_bool("is_unique");
let is_primary_key = row.get_expect_bool("is_primary_key");
let table_name = row.get_expect_string("table_name");
let sequence_name = row.get_string("sequence_name");
if is_primary_key {
let entry: &mut (Vec<_>, Option<PrimaryKey>) =
indexes_map.entry(table_name).or_insert_with(|| (Vec::new(), None));
match entry.1.as_mut() {
Some(pk) => {
pk.columns.push(column_name);
}
None => {
let sequence = sequence_name.and_then(|sequence_name| {
let captures = RE_SEQ.captures(&sequence_name).expect("get captures");
let sequence_name = captures.get(1).expect("get capture").as_str();
sequences.iter().find(|s| s.name == sequence_name).map(|sequence| {
trace!("Got sequence corresponding to primary key: {:#?}", sequence);
sequence.clone()
})
});
entry.1 = Some(PrimaryKey {
columns: vec![column_name],
sequence,
constraint_name: Some(name.clone()),
});
}
}
} else {
let entry: &mut (Vec<Index>, _) = indexes_map.entry(table_name).or_insert_with(|| (Vec::new(), None));
if let Some(existing_index) = entry.0.iter_mut().find(|idx| idx.name == name) {
existing_index.columns.push(column_name);
} else {
entry.0.push(Index {
name,
columns: vec![column_name],
tpe: match is_unique {
true => IndexType::Unique,
false => IndexType::Normal,
},
})
}
}
}
Ok(indexes_map)
}
#[tracing::instrument]
async fn get_sequences(&self, schema: &str) -> DescriberResult<Vec<Sequence>> {
let sql = "SELECT sequence_name
FROM information_schema.sequences
WHERE sequence_schema = $1";
let rows = self.conn.query_raw(&sql, &[schema.into()]).await?;
let sequences = rows
.into_iter()
.map(|seq| {
trace!("Got sequence: {:?}", seq);
Sequence {
name: seq.get_expect_string("sequence_name"),
}
})
.collect();
trace!("Found sequences: {:?}", sequences);
Ok(sequences)
}
#[tracing::instrument]
async fn get_enums(&self, schema: &str) -> DescriberResult<Vec<Enum>> {
let sql = "
SELECT t.typname as name, e.enumlabel as value
FROM pg_type t
JOIN pg_enum e ON t.oid = e.enumtypid
JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE n.nspname = $1
ORDER BY e.enumsortorder";
let rows = self.conn.query_raw(&sql, &[schema.into()]).await?;
let mut enum_values: HashMap<String, Vec<String>> = HashMap::new();
for row in rows.into_iter() {
trace!("Got enum row: {:?}", row);
let name = row.get_expect_string("name");
let value = row.get_expect_string("value");
let values = enum_values.entry(name).or_insert_with(Vec::new);
values.push(value);
}
let mut enums: Vec<Enum> = enum_values
.into_iter()
.map(|(k, v)| Enum { name: k, values: v })
.collect();
enums.sort_by(|a, b| Ord::cmp(&a.name, &b.name));
trace!("Found enums: {:?}", enums);
Ok(enums)
}
fn get_default_value(
col: &ResultRow,
data_type: &str,
tpe: &ColumnType,
sequences: &[Sequence],
schema: &str,
) -> Option<DefaultValue> {
match col.get("column_default") {
None => None,
Some(param_value) => match param_value.to_string() {
None => None,
Some(x) if x.starts_with("NULL") => None,
Some(default_string) => {
Some(match &tpe.family {
ColumnTypeFamily::Int | ColumnTypeFamily::BigInt => {
let default_expr = unsuffix_default_literal(
&default_string,
&[data_type, &tpe.full_data_type, "integer", "INT8", "INT4"],
)
.unwrap_or_else(|| default_string.as_str().into());
let default_expr = process_string_literal(&default_expr);
match default_expr.parse::<i64>().ok() {
Some(int_value) => DefaultValue::value(if tpe.family.is_int() {
PrismaValue::Int(int_value)
} else {
PrismaValue::BigInt(int_value)
}),
None => match is_autoincrement(&default_string, sequences) {
Some(seq) => DefaultValue::sequence(seq),
None => DefaultValue::db_generated(default_string),
},
}
}
ColumnTypeFamily::Float => match Self::parse_float(&default_string) {
Some(float_value) => DefaultValue::value(float_value),
None => DefaultValue::db_generated(default_string),
},
ColumnTypeFamily::Decimal => match Self::parse_float(&default_string) {
Some(float_value) => DefaultValue::value(float_value),
None => DefaultValue::db_generated(default_string),
},
ColumnTypeFamily::Boolean => match Self::parse_bool(&default_string) {
Some(bool_value) => DefaultValue::value(bool_value),
None => DefaultValue::db_generated(default_string),
},
ColumnTypeFamily::String => match fetch_dbgenerated(&default_string) {
Some(fun) => DefaultValue::db_generated(fun),
None => {
let literal = unsuffix_default_literal(
&default_string,
&[data_type, &tpe.full_data_type, "STRING"],
);
match literal {
Some(default_literal) => DefaultValue::value(
process_string_literal(default_literal.as_ref()).into_owned(),
),
None => DefaultValue::db_generated(default_string),
}
}
},
ColumnTypeFamily::DateTime => {
match default_string.to_lowercase().as_str() {
"now()" | "current_timestamp" => DefaultValue::now(),
_ => DefaultValue::db_generated(default_string), //todo parse values
}
}
ColumnTypeFamily::Binary => DefaultValue::db_generated(default_string),
// JSON/JSONB defaults come in the '{}'::jsonb form.
ColumnTypeFamily::Json => {
unsuffix_default_literal(&default_string, &[data_type, &tpe.full_data_type])
.map(|default| DefaultValue::value(PrismaValue::Json(unquote_string(&default))))
.unwrap_or_else(move || DefaultValue::db_generated(default_string))
}
ColumnTypeFamily::Uuid => DefaultValue::db_generated(default_string),
ColumnTypeFamily::Enum(enum_name) => {
let expected_suffixes: &[Cow<'_, str>] = &[
Cow::Borrowed(enum_name),
Cow::Owned(format!("\"{}\"", enum_name)),
Cow::Owned(format!("{}.{}", schema, enum_name)),
];
match unsuffix_default_literal(&default_string, expected_suffixes) {
Some(value) => DefaultValue::value(PrismaValue::Enum(Self::unquote_string(&value))),
None => DefaultValue::db_generated(default_string),
}
}
ColumnTypeFamily::Unsupported(_) => DefaultValue::db_generated(default_string),
})
}
},
}
}
}
fn get_column_type(row: &ResultRow, enums: &[Enum]) -> ColumnType {
use ColumnTypeFamily::*;
let data_type = row.get_expect_string("data_type");
let full_data_type = row.get_expect_string("full_data_type");
let is_required = match row.get_expect_string("is_nullable").to_lowercase().as_ref() {
"no" => true,
"yes" => false,
x => panic!("unrecognized is_nullable variant '{}'", x),
};
let arity = match matches!(data_type.as_str(), "ARRAY") {
true => ColumnArity::List,
false if is_required => ColumnArity::Required,
false => ColumnArity::Nullable,
};
let precision = SqlSchemaDescriber::get_precision(&row);
let unsupported_type = || (Unsupported(full_data_type.clone()), None);
let enum_exists = |name| enums.iter().any(|e| e.name == name);
let (family, native_type) = match full_data_type.as_str() {
name if data_type == "USER-DEFINED" && enum_exists(name) => (Enum(name.to_owned()), None),
name if data_type == "ARRAY" && name.starts_with('_') && enum_exists(name.trim_start_matches('_')) => {
(Enum(name.trim_start_matches('_').to_owned()), None)
}
"int2" | "_int2" => (Int, Some(PostgresType::SmallInt)),
"int4" | "_int4" => (Int, Some(PostgresType::Integer)),
"int8" | "_int8" => (BigInt, Some(PostgresType::BigInt)),
"oid" | "_oid" => (Int, Some(PostgresType::Oid)),
"float4" | "_float4" => (Float, Some(PostgresType::Real)),
"float8" | "_float8" => (Float, Some(PostgresType::DoublePrecision)),
"bool" | "_bool" => (Boolean, Some(PostgresType::Boolean)),
"text" | "_text" => (String, Some(PostgresType::Text)),
"citext" | "_citext" => (String, Some(PostgresType::Citext)),
"varchar" | "_varchar" => (String, Some(PostgresType::VarChar(precision.character_maximum_length))),
"bpchar" | "_bpchar" => (String, Some(PostgresType::Char(precision.character_maximum_length))),
"date" | "_date" => (DateTime, Some(PostgresType::Date)),
"bytea" | "_bytea" => (Binary, Some(PostgresType::ByteA)),
"json" | "_json" => (Json, Some(PostgresType::Json)),
"jsonb" | "_jsonb" => (Json, Some(PostgresType::JsonB)),
"uuid" | "_uuid" => (Uuid, Some(PostgresType::Uuid)),
"xml" | "_xml" => (String, Some(PostgresType::Xml)),
// bit and varbit should be binary, but are currently mapped to strings.
"bit" | "_bit" => (String, Some(PostgresType::Bit(precision.character_maximum_length))),
"varbit" | "_varbit" => (String, Some(PostgresType::VarBit(precision.character_maximum_length))),
"numeric" | "_numeric" => (
Decimal,
Some(PostgresType::Decimal(
match (precision.numeric_precision, precision.numeric_scale) {
(None, None) => None,
(Some(prec), Some(scale)) => Some((prec, scale)),
_ => None,
},
)),
),
"money" | "_money" => (Decimal, Some(PostgresType::Money)),
"pg_lsn" | "_pg_lsn" => unsupported_type(),
"time" | "_time" => (DateTime, Some(PostgresType::Time(precision.time_precision))),
"timetz" | "_timetz" => (DateTime, Some(PostgresType::Timetz(precision.time_precision))),
"timestamp" | "_timestamp" => (DateTime, Some(PostgresType::Timestamp(precision.time_precision))),
"timestamptz" | "_timestamptz" => (DateTime, Some(PostgresType::Timestamptz(precision.time_precision))),
"tsquery" | "_tsquery" => unsupported_type(),
"tsvector" | "_tsvector" => unsupported_type(),
"txid_snapshot" | "_txid_snapshot" => unsupported_type(),
"inet" | "_inet" => (String, Some(PostgresType::Inet)),
//geometric
"box" | "_box" => unsupported_type(),
"circle" | "_circle" => unsupported_type(),
"line" | "_line" => unsupported_type(),
"lseg" | "_lseg" => unsupported_type(),
"path" | "_path" => unsupported_type(),
"polygon" | "_polygon" => unsupported_type(),
name if enum_exists(name) => (Enum(name.to_owned()), None),
_ => unsupported_type(),
};
ColumnType {
full_data_type,
family,
arity,
native_type: native_type.map(|x| x.to_json()),
}
}
static RE_SEQ: Lazy<Regex> = Lazy::new(|| Regex::new("^(?:.+\\.)?\"?([^.\"]+)\"?").expect("compile regex"));
static AUTOINCREMENT_REGEX: Lazy<Regex> = Lazy::new(|| {
Regex::new(r#"nextval\((\(?)'((.+)\.)?(("(?P<sequence>.+)")|(?P<sequence2>.+))'(::text\))?::regclass\)"#)
.expect("compile autoincrement regex")
});
/// Returns the name of the sequence in the schema that the defaultvalue matches if it is drawn from one of them
fn is_autoincrement(value: &str, sequences: &[Sequence]) -> Option<String> {
AUTOINCREMENT_REGEX.captures(value).and_then(|captures| {
let sequence_name = captures.name("sequence").or_else(|| captures.name("sequence2"));
sequence_name.and_then(|name| {
sequences
.iter()
.find(|seq| seq.name == name.as_str())
.map(|x| x.name.clone())
})
})
}
fn fetch_dbgenerated(value: &str) -> Option<String> {
static POSTGRES_DB_GENERATED_RE: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(^\((.*)\)):{2,3}(\\")?(.*)(\\")?$"#).unwrap());
if !POSTGRES_DB_GENERATED_RE.is_match(value) {
None
} else {
let captures = POSTGRES_DB_GENERATED_RE.captures(value)?;
let fun = captures.get(1).unwrap().as_str();
let suffix = captures.get(4).unwrap().as_str();
Some(format!("{}::{}", fun, suffix))
}
}
fn unsuffix_default_literal<'a, T: AsRef<str>>(literal: &'a str, expected_suffixes: &[T]) -> Option<Cow<'a, str>> {
// Tries to match expressions of the form <expr> or <expr>::<type> or <expr>:::<type>.
static POSTGRES_DATA_TYPE_SUFFIX_RE: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(?ms)^(.*?):{2,3}(\\")?(.*)(\\")?$"#).unwrap());
let captures = POSTGRES_DATA_TYPE_SUFFIX_RE.captures(literal)?;
let suffix = captures.get(3).unwrap().as_str();
if !expected_suffixes.iter().any(|expected| expected.as_ref() == suffix) {
return None;
}
let first_capture = captures.get(1).unwrap().as_str();
Some(Cow::Borrowed(first_capture))
}
// See https://www.postgresql.org/docs/9.3/sql-syntax-lexical.html
fn process_string_literal(literal: &str) -> Cow<'_, str> {
// B'...' or e'...' or '...'
static POSTGRES_STRING_DEFAULT_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"(?ms)^(?:B|e)?'(.*)'$"#).unwrap());
static POSTGRES_DEFAULT_QUOTE_UNESCAPE_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"'(')"#).unwrap());
static POSTGRES_DEFAULT_BACKSLASH_UNESCAPE_RE: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"\\(["']|\\[^\\])"#).unwrap());
static COCKROACH_DEFAULT_BACKSLASH_UNESCAPE_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"\\\\(["']|\\)"#).unwrap());
static POSTGRES_STRING_DEFAULTS_PIPELINE: &[(&Lazy<Regex>, &str)] = &[
(&POSTGRES_STRING_DEFAULT_RE, "$1"),
(&POSTGRES_DEFAULT_QUOTE_UNESCAPE_RE, "$1"),
(&POSTGRES_DEFAULT_BACKSLASH_UNESCAPE_RE, "$1"),
(&COCKROACH_DEFAULT_BACKSLASH_UNESCAPE_RE, "$1"),
];
chain_replaces(literal, POSTGRES_STRING_DEFAULTS_PIPELINE)
}
fn chain_replaces<'a>(s: &'a str, replaces: &[(&Lazy<Regex>, &str)]) -> Cow<'a, str> {
let mut out = Cow::Borrowed(s);
for (re, replacement) in replaces.iter() {
if !re.is_match(out.as_ref()) {
continue;
}
let replaced = re.replace_all(out.as_ref(), *replacement);
out = Cow::Owned(replaced.into_owned())
}
out
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn postgres_is_autoincrement_works() {
let sequences = vec![
Sequence {
name: "first_sequence".to_string(),
},
Sequence {
name: "second_sequence".to_string(),
},
Sequence {
name: "third_Sequence".to_string(),
},
Sequence {
name: "fourth_Sequence".to_string(),
},
Sequence {
name: "fifth_sequence".to_string(),
},
];
let first_autoincrement = r#"nextval('first_sequence'::regclass)"#;
assert!(is_autoincrement(first_autoincrement, &sequences).is_some());
let second_autoincrement = r#"nextval('schema_name.second_sequence'::regclass)"#;
assert!(is_autoincrement(second_autoincrement, &sequences).is_some());
let third_autoincrement = r#"nextval('"third_Sequence"'::regclass)"#;
assert!(is_autoincrement(third_autoincrement, &sequences).is_some());
let fourth_autoincrement = r#"nextval('"schema_Name"."fourth_Sequence"'::regclass)"#;
assert!(is_autoincrement(fourth_autoincrement, &sequences).is_some());
let fifth_autoincrement = r#"nextval(('fifth_sequence'::text)::regclass)"#;
assert!(is_autoincrement(fifth_autoincrement, &sequences).is_some());
let non_autoincrement = r#"string_default_named_seq"#;
assert!(is_autoincrement(non_autoincrement, &sequences).is_none());
}
}
| 41.912773 | 155 | 0.532332 |
f49b7a4258a46a768401c04582c690e27548208e
| 2,329 |
use kitsune_p2p_types::box_fut;
use super::*;
/// A supertrait of KitsuneHost convenient for defining test handlers.
/// Allows only specifying the methods you care about, and letting all the rest
/// throw errors if called
pub trait KitsuneHostDefaultError: KitsuneHost {
/// Name to be printed out on unimplemented error
const NAME: &'static str;
/// We need to get previously stored agent info.
fn get_agent_info_signed(
&self,
_input: GetAgentInfoSignedEvt,
) -> KitsuneHostResult<Option<crate::types::agent_store::AgentInfoSigned>> {
box_fut(Err(format!(
"error for unimplemented KitsuneHost test behavior: method {} of {}",
"get_agent_info_signed",
Self::NAME
)
.into()))
}
/// Extrapolated Peer Coverage
fn peer_extrapolated_coverage(
&self,
_space: Arc<KitsuneSpace>,
_dht_arc_set: DhtArcSet,
) -> KitsuneHostResult<Vec<f64>> {
box_fut(Err(format!(
"error for unimplemented KitsuneHost test behavior: method {} of {}",
"peer_extrapolated_coverage",
Self::NAME
)
.into()))
}
/// Record a set of metric records
fn record_metrics(
&self,
_space: Arc<KitsuneSpace>,
_records: Vec<MetricRecord>,
) -> KitsuneHostResult<()> {
box_fut(Err(format!(
"error for unimplemented KitsuneHost test behavior: method {} of {}",
"record_metrics",
Self::NAME
)
.into()))
}
}
impl<T: KitsuneHostDefaultError> KitsuneHost for T {
fn get_agent_info_signed(
&self,
input: GetAgentInfoSignedEvt,
) -> KitsuneHostResult<Option<crate::types::agent_store::AgentInfoSigned>> {
KitsuneHostDefaultError::get_agent_info_signed(self, input)
}
fn peer_extrapolated_coverage(
&self,
space: Arc<KitsuneSpace>,
dht_arc_set: DhtArcSet,
) -> KitsuneHostResult<Vec<f64>> {
KitsuneHostDefaultError::peer_extrapolated_coverage(self, space, dht_arc_set)
}
fn record_metrics(
&self,
space: Arc<KitsuneSpace>,
records: Vec<MetricRecord>,
) -> KitsuneHostResult<()> {
KitsuneHostDefaultError::record_metrics(self, space, records)
}
}
| 29.858974 | 85 | 0.623444 |
fb1ae6070a888abf8974d747e7f5bc35581cb702
| 3,675 |
//! TCP relay client implementation
use std::{
io,
pin::Pin,
task::{self, Poll},
};
use log::trace;
use pin_project::pin_project;
use shadowsocks::relay::socks5::{
self,
Address,
Command,
Error,
HandshakeRequest,
HandshakeResponse,
Reply,
TcpRequestHeader,
TcpResponseHeader,
};
use tokio::{
io::{AsyncRead, AsyncWrite, ReadBuf},
net::{TcpStream, ToSocketAddrs},
};
/// Socks5 proxy client
#[pin_project]
pub struct Socks5TcpClient {
#[pin]
stream: TcpStream,
}
impl Socks5TcpClient {
/// Connects to `addr` via `proxy`
pub async fn connect<A, P>(addr: A, proxy: P) -> Result<Socks5TcpClient, Error>
where
A: Into<Address>,
P: ToSocketAddrs,
{
let mut s = TcpStream::connect(proxy).await?;
// 1. Handshake
let hs = HandshakeRequest::new(vec![socks5::SOCKS5_AUTH_METHOD_NONE]);
trace!("client connected, going to send handshake: {:?}", hs);
hs.write_to(&mut s).await?;
let hsp = HandshakeResponse::read_from(&mut s).await?;
trace!("got handshake response: {:?}", hsp);
assert_eq!(hsp.chosen_method, socks5::SOCKS5_AUTH_METHOD_NONE);
// 2. Send request header
let h = TcpRequestHeader::new(Command::TcpConnect, addr.into());
trace!("going to connect, req: {:?}", h);
h.write_to(&mut s).await?;
let hp = TcpResponseHeader::read_from(&mut s).await?;
trace!("got response: {:?}", hp);
match hp.reply {
Reply::Succeeded => (),
r => return Err(Error::Reply(r)),
}
Ok(Socks5TcpClient { stream: s })
}
/// UDP Associate `addr` via `proxy`
///
/// According to RFC, `addr` is the address that your UDP socket binds to
pub async fn udp_associate<A, P>(addr: A, proxy: P) -> Result<(Socks5TcpClient, Address), Error>
where
A: Into<Address>,
P: ToSocketAddrs,
{
let mut s = TcpStream::connect(proxy).await?;
// 1. Handshake
let hs = HandshakeRequest::new(vec![socks5::SOCKS5_AUTH_METHOD_NONE]);
trace!("client connected, going to send handshake: {:?}", hs);
hs.write_to(&mut s).await?;
let hsp = HandshakeResponse::read_from(&mut s).await?;
trace!("got handshake response: {:?}", hsp);
assert_eq!(hsp.chosen_method, socks5::SOCKS5_AUTH_METHOD_NONE);
// 2. Send request header
let h = TcpRequestHeader::new(Command::UdpAssociate, addr.into());
trace!("going to connect, req: {:?}", h);
h.write_to(&mut s).await?;
let hp = TcpResponseHeader::read_from(&mut s).await?;
trace!("got response: {:?}", hp);
match hp.reply {
Reply::Succeeded => (),
r => return Err(Error::Reply(r)),
}
Ok((Socks5TcpClient { stream: s }, hp.address))
}
}
impl AsyncRead for Socks5TcpClient {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
self.project().stream.poll_read(cx, buf)
}
}
impl AsyncWrite for Socks5TcpClient {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> {
self.project().stream.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().stream.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().stream.poll_shutdown(cx)
}
}
| 28.053435 | 115 | 0.58449 |
26f07574b86e737a4e24b83c9c3caea75b9d0b29
| 4,275 |
use crate::handler::Challenge;
use rlp::DecoderError;
use std::fmt;
#[derive(Debug)]
/// A general error that is used throughout the Discv5 library.
pub enum Discv5Error {
/// An invalid ENR was received.
InvalidEnr,
/// The public key type is known.
UnknownPublicKey,
/// The ENR key used is not supported.
KeyTypeNotSupported(&'static str),
/// Failed to derive an ephemeral public key.
KeyDerivationFailed,
/// The remote's public key was invalid.
InvalidRemotePublicKey,
/// The secret key does not match the provided ENR.
InvalidSecretKey,
/// An invalid signature was received for a challenge.
InvalidChallengeSignature(Challenge),
/// The Service channel has been closed early.
ServiceChannelClosed,
/// The discv5 service is not running.
ServiceNotStarted,
/// The service has is already running.
ServiceAlreadyStarted,
/// A session could not be established with the remote.
SessionNotEstablished,
/// An RLP decoding error occurred.
RLPError(DecoderError),
/// Failed to encrypt a message.
EncryptionFail(String),
/// Failed to decrypt a message.
DecryptionFailed(String),
/// The custom error has occurred.
Custom(&'static str),
/// A generic dynamic error occurred.
Error(String),
/// An IO error occurred.
Io(std::io::Error),
}
impl From<std::io::Error> for Discv5Error {
fn from(err: std::io::Error) -> Discv5Error {
Discv5Error::Io(err)
}
}
#[derive(Debug, Clone, PartialEq)]
/// Types of packet errors.
pub enum PacketError {
/// The packet type is unknown.
UnknownPacket,
/// The packet size was larger than expected.
TooLarge,
/// The packet size was smaller than expected.
TooSmall,
/// The NodeId sent was invalid.
InvalidNodeId,
/// The header has an invalid length.
HeaderLengthInvalid(usize),
/// The header could not be decrypted.
HeaderDecryptionFailed,
/// The authdata size is too large.
InvalidAuthDataSize,
/// The handshake is of an invalid version.
InvalidVersion(u16),
/// The ENR sent was invalid.
InvalidEnr(DecoderError),
}
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum ResponseError {
/// The channel used to send the response has already been closed.
ChannelClosed,
}
impl fmt::Display for ResponseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ResponseError::ChannelClosed => {
write!(f, "response channel has already been closed")
}
}
}
}
impl std::error::Error for ResponseError {}
#[derive(Debug, Clone, PartialEq)]
pub enum RequestError {
/// The request timed out.
Timeout,
/// The discovery service has not been started.
ServiceNotStarted,
/// The request was sent to ourselves.
SelfRequest,
/// The channel to the underlying threads failed.
ChannelFailed(String),
/// An invalid ENR was provided.
InvalidEnr(String),
/// The remote's ENR is invalid.
InvalidRemoteEnr,
/// The remote returned and invalid packet.
InvalidRemotePacket,
/// Failed attempting to encrypt the request.
EncryptionFailed(String),
/// The multiaddr provided is invalid.
InvalidMultiaddr(String),
/// Failure generating random numbers during request.
EntropyFailure(&'static str),
}
#[derive(Debug, Clone, PartialEq)]
pub enum QueryError {
/// The discv5 service is not currently running.
ServiceNotStarted,
/// The channel to the underlying threads failed.
ChannelFailed(String),
/// The ENR provided was invalid.
InvalidEnr(String),
/// Encrypting the message failed.
EncryptionFailed(String),
/// The multiaddr provided was invalid.
InvalidMultiaddr(String),
}
impl fmt::Display for Discv5Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl fmt::Display for RequestError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl fmt::Display for QueryError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
| 29.081633 | 70 | 0.656608 |
c138cceecab3a5c6ee902d5c4a46064c7b105f66
| 54,951 |
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::fmt::{Debug, Formatter, Result};
use std::ops::DerefMut;
use std::rc::Rc;
use log_derive::*;
use mirai_annotations::*;
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_middle::mir;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef};
use rustc_middle::ty::{
AdtDef, Const, ConstKind, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef,
FnSig, ParamTy, Term, Ty, TyCtxt, TyKind, TypeAndMut,
};
use rustc_target::abi::VariantIdx;
use crate::abstract_value::AbstractValue;
use crate::constant_domain::ConstantDomain;
use crate::environment::Environment;
use crate::expression::{Expression, ExpressionType};
use crate::path::{Path, PathEnum, PathRefinement, PathRoot, PathSelector};
use crate::rustc_middle::ty::DefIdTree;
use crate::{type_visitor, utils};
#[derive(Debug)]
pub struct TypeCache<'tcx> {
type_list: Vec<Ty<'tcx>>,
type_to_index_map: HashMap<Ty<'tcx>, usize>,
}
impl<'tcx> Default for type_visitor::TypeCache<'tcx> {
fn default() -> Self {
Self::new()
}
}
impl<'tcx> TypeCache<'tcx> {
/// Provides a way to refer to a rustc_middle::ty::Ty via a handle that does not have
/// a life time specifier.
pub fn new() -> TypeCache<'tcx> {
TypeCache {
type_list: Vec::with_capacity(10_000),
type_to_index_map: HashMap::with_capacity(10_000),
}
}
/// Returns a non zero index that can be used to retrieve ty via get_type.
pub fn get_index(&mut self, ty: &Ty<'tcx>) -> usize {
if let Some(index) = self.type_to_index_map.get(ty) {
*index
} else {
let index = self.type_list.len() + 1;
self.type_list.push(*ty);
self.type_to_index_map.insert(*ty, index);
index
}
}
/// Returns the type that was stored at this index, or None if index is zero
/// or greater than the length of the type list.
pub fn get_type(&self, index: usize) -> Option<Ty<'tcx>> {
if index == 0 {
return None;
}
self.type_list.get(index - 1).cloned()
}
}
pub struct TypeVisitor<'tcx> {
pub actual_argument_types: Vec<Ty<'tcx>>,
pub closures_being_specialized: RefCell<HashSet<DefId>>,
pub def_id: DefId,
pub generic_argument_map: Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
pub generic_arguments: Option<SubstsRef<'tcx>>,
pub mir: &'tcx mir::Body<'tcx>,
path_ty_cache: HashMap<Rc<Path>, Ty<'tcx>>,
pub dummy_untagged_value_type: Ty<'tcx>,
tcx: TyCtxt<'tcx>,
type_cache: Rc<RefCell<TypeCache<'tcx>>>,
}
impl<'analysis, 'tcx> Debug for TypeVisitor<'tcx> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
"TypeVisitor".fmt(f)
}
}
impl<'analysis, 'compilation, 'tcx> TypeVisitor<'tcx> {
pub fn new(
def_id: DefId,
mir: &'tcx mir::Body<'tcx>,
tcx: TyCtxt<'tcx>,
cache: Rc<RefCell<TypeCache<'tcx>>>,
) -> TypeVisitor<'tcx> {
let dummy_untagged_value_type = tcx.types.i8;
TypeVisitor {
actual_argument_types: Vec::new(),
closures_being_specialized: RefCell::new(HashSet::new()),
def_id,
generic_argument_map: None,
generic_arguments: None,
mir,
path_ty_cache: HashMap::new(),
dummy_untagged_value_type,
tcx,
type_cache: cache,
}
}
/// Restores the method only state to its initial state.
#[logfn_inputs(TRACE)]
pub fn reset_visitor_state(&mut self) {
self.generic_arguments = None;
self.path_ty_cache
.retain(|p, _| p.is_rooted_by_non_local_structure());
}
/// Parameters that are closures bring enclosed variables with them that are effectively
/// additional parameters. We pre-populate the environment with entries for these because
/// there is no convenient way to look up their types later on. I.e. unlike ordinary parameters
/// whose types can be looked up in mir.local_decls, these extra parameters need their
/// types extracted from the closure type definitions via the tricky logic below.
#[logfn_inputs(TRACE)]
pub fn add_any_closure_fields_for(
&mut self,
mut path_ty: &Ty<'tcx>,
path: &Rc<Path>,
first_state: &mut Environment,
) {
let mut is_ref = false;
if let TyKind::Ref(_, t, _) = path_ty.kind() {
is_ref = true;
path_ty = t;
}
match path_ty.kind() {
TyKind::Closure(_, substs) => {
if utils::are_concrete(substs) {
for (i, ty) in substs.as_closure().upvar_tys().enumerate() {
let var_type = ExpressionType::from(ty.kind());
let mut qualifier = path.clone();
if is_ref {
qualifier = Path::new_deref(path.clone(), ExpressionType::NonPrimitive)
}
let closure_field_path = Path::new_field(qualifier, i);
self.set_path_rustc_type(closure_field_path.clone(), ty);
let closure_field_val =
AbstractValue::make_typed_unknown(var_type, closure_field_path.clone());
first_state
.value_map
.insert_mut(closure_field_path, closure_field_val);
}
}
}
TyKind::Generator(_, substs, _) => {
for (i, ty) in substs.as_generator().prefix_tys().enumerate() {
let var_type = ExpressionType::from(ty.kind());
let mut qualifier = path.clone();
if is_ref {
qualifier = Path::new_deref(path.clone(), ExpressionType::NonPrimitive)
}
let generator_field_path = Path::new_field(qualifier, i);
self.set_path_rustc_type(generator_field_path.clone(), ty);
let generator_field_val =
AbstractValue::make_typed_unknown(var_type, generator_field_path.clone());
first_state
.value_map
.insert_mut(generator_field_path, generator_field_val);
}
}
TyKind::Opaque(def_id, substs) => {
let map = self.get_generic_arguments_map(*def_id, substs, &[]);
let path_ty =
self.specialize_generic_argument_type(self.tcx.type_of(*def_id), &map);
self.add_any_closure_fields_for(&path_ty, path, first_state);
}
TyKind::Dynamic(..) | TyKind::FnDef(..) | TyKind::FnPtr(..) => {}
_ => {
info!("unexpected closure type {:?}", path_ty.kind());
}
}
}
/// Returns the size in bytes (including padding) of an element of the given collection type.
/// If the type is not a collection, it returns one.
pub fn get_elem_type_size(&self, ty: Ty<'tcx>) -> u64 {
match ty.kind() {
TyKind::Array(ty, _) | TyKind::Slice(ty) => self.get_type_size(*ty),
TyKind::RawPtr(t) => self.get_type_size(t.ty),
_ => 1,
}
}
/// Path is required to be rooted in a temporary used to track a checked operation result.
/// The result type of the local will be a tuple (t, bool).
/// The result of this function is the t part.
#[logfn_inputs(TRACE)]
pub fn get_first_part_of_target_path_type_tuple(
&self,
path: &Rc<Path>,
current_span: rustc_span::Span,
) -> ExpressionType {
match self.get_path_rustc_type(path, current_span).kind() {
TyKind::Tuple(types) => ExpressionType::from(types[0].kind()),
_ => assume_unreachable!(),
}
}
// Path is required to be rooted in a temporary used to track an operation result.
#[logfn_inputs(TRACE)]
pub fn get_target_path_type(
&self,
path: &Rc<Path>,
current_span: rustc_span::Span,
) -> ExpressionType {
ExpressionType::from(self.get_path_rustc_type(path, current_span).kind())
}
/// Returns a parameter environment for the current function.
pub fn get_param_env(&self) -> rustc_middle::ty::ParamEnv<'tcx> {
let env_def_id = if self.tcx.is_closure(self.def_id) {
self.tcx.typeck_root_def_id(self.def_id)
} else {
self.def_id
};
self.tcx.param_env(env_def_id)
}
/// Returns a shared reference to the path type cache of the visitor
pub fn get_path_type_cache(&self) -> &HashMap<Rc<Path>, Ty<'tcx>> {
&self.path_ty_cache
}
pub fn get_index_for(&self, ty: Ty<'tcx>) -> usize {
let mut cache = self.type_cache.borrow_mut();
cache.get_index(&ty)
}
pub fn get_type_from_index(&self, type_index: usize) -> Ty<'tcx> {
let cache = self.type_cache.borrow();
if let Some(ty) = cache.get_type(type_index) {
ty
} else {
self.tcx.types.never
}
}
/// Returns true if the given type is a reference (or raw pointer) to a collection type, in which
/// case the reference/pointer independently tracks the length of the collection, thus effectively
/// tracking a slice of the underlying collection.
#[logfn_inputs(TRACE)]
pub fn is_function_like(&self, ty_kind: &TyKind<'tcx>) -> bool {
matches!(
ty_kind,
TyKind::Closure(..)
| TyKind::Dynamic(..)
| TyKind::FnDef(..)
| TyKind::FnPtr(_)
| TyKind::Foreign(..)
| TyKind::Generator(..)
| TyKind::GeneratorWitness(..)
| TyKind::Opaque(..)
)
}
/// Returns true if the given type is a reference (or raw pointer) to a collection type, in which
/// case the reference/pointer independently tracks the length of the collection, thus effectively
/// tracking a slice of the underlying collection.
#[logfn_inputs(TRACE)]
pub fn is_slice_pointer(&self, ty_kind: &TyKind<'tcx>) -> bool {
match ty_kind {
TyKind::RawPtr(TypeAndMut { ty: target, .. }) | TyKind::Ref(_, target, _) => {
trace!("target type {:?}", target.kind());
// Pointers to sized arrays are thin pointers.
matches!(target.kind(), TyKind::Slice(..) | TyKind::Str)
}
_ => false,
}
}
/// Returns true if the given type is a reference to the string type.
#[logfn_inputs(TRACE)]
pub fn is_string_pointer(&self, ty_kind: &TyKind<'tcx>) -> bool {
if let TyKind::Ref(_, target, _) = ty_kind {
matches!(target.kind(), TyKind::Str)
} else {
false
}
}
/// Returns true if the given type is a reference (or raw pointer) that is not a slice pointer
#[logfn_inputs(TRACE)]
pub fn is_thin_pointer(&self, ty_kind: &TyKind<'tcx>) -> bool {
match ty_kind {
TyKind::RawPtr(TypeAndMut { ty: target, .. }) | TyKind::Ref(_, target, _) => {
!matches!(target.kind(), TyKind::Slice(..) | TyKind::Str)
}
_ => false,
}
}
/// Updates the type cache of the visitor so that looking up the type of path returns ty.
#[logfn_inputs(TRACE)]
pub fn set_path_rustc_type(&mut self, path: Rc<Path>, ty: Ty<'tcx>) {
self.path_ty_cache.insert(path, ty);
}
/// This is a hacky and brittle way to navigate the Rust compiler's type system.
/// Eventually it should be replaced with a comprehensive and principled mapping.
#[logfn_inputs(TRACE)]
pub fn get_path_rustc_type(&self, path: &Rc<Path>, current_span: rustc_span::Span) -> Ty<'tcx> {
if let Some(ty) = self.path_ty_cache.get(path) {
return *ty;
}
match &path.value {
PathEnum::Computed { value } => match &value.expression {
Expression::ConditionalExpression { consequent: e, .. }
| Expression::Join { left: e, .. } => {
self.get_path_rustc_type(&Path::get_as_path(e.clone()), current_span)
}
Expression::CompileTimeConstant(c) => {
if let ConstantDomain::Function(fr) = c {
if let Some(def_id) = fr.def_id {
return self.tcx.type_of(def_id);
}
}
c.get_rustc_type(self.tcx)
}
Expression::Reference(path) => {
let target_type = self.get_path_rustc_type(path, current_span);
if target_type.is_never() {
target_type
} else {
self.tcx
.mk_imm_ref(self.tcx.lifetimes.re_erased, target_type)
}
}
Expression::InitialParameterValue { path, .. }
| Expression::Variable { path, .. } => self.get_path_rustc_type(path, current_span),
_ => value.expression.infer_type().as_rustc_type(self.tcx),
},
PathEnum::LocalVariable {
ordinal,
type_index,
} => {
if *ordinal > 0 && *ordinal < self.mir.local_decls.len() {
let t = self.get_type_from_index(*type_index);
if t.is_never() {
self.get_loc_ty(mir::Local::from(*ordinal))
} else {
t
}
} else {
trace!(
"local var path.value is {:?} at {:?}",
path.value,
current_span
);
self.get_type_from_index(*type_index)
}
}
PathEnum::HeapBlock { .. } => self.tcx.mk_ptr(rustc_middle::ty::TypeAndMut {
ty: self.tcx.types.u8,
mutbl: rustc_hir::Mutability::Not,
}),
PathEnum::Offset { value } => {
if let Expression::Offset { left, .. } = &value.expression {
let base_path = Path::get_as_path(left.clone());
self.get_path_rustc_type(&base_path, current_span)
} else {
unreachable!("an offset path, must contain an offset expression");
}
}
PathEnum::Parameter { ordinal } => {
if *ordinal > 0 && *ordinal < self.mir.local_decls.len() {
self.get_loc_ty(mir::Local::from(*ordinal))
} else {
info!(
"parameter path.value is {:?} at {:?}",
path.value, current_span
);
self.tcx.types.never
}
}
PathEnum::PhantomData => self.tcx.types.never,
PathEnum::Result => {
if self.mir.local_decls.is_empty() {
info!("result type wanted from function without result local");
self.tcx.types.never
} else {
self.specialize_generic_argument_type(
self.mir.local_decls[mir::Local::from(0usize)].ty,
&self.generic_argument_map,
)
}
}
PathEnum::QualifiedPath {
qualifier,
selector,
..
} => {
let mut t = self.get_path_rustc_type(qualifier, current_span);
if t.is_never() {
return t;
}
match t.kind() {
TyKind::Infer(..) => {
// The qualifier does not resolve to a useful rustc type.
// This can happen when the qualifier is a PathEnum::Computed where the value
// is TOP, or BOTTOM or a heap layout.
return self.tcx.types.never;
}
TyKind::Projection(..) => {
t = self.specialize_generic_argument_type(t, &self.generic_argument_map);
}
_ => {}
}
match &**selector {
PathSelector::ConstantSlice { .. } => {
return self.tcx.mk_imm_ref(self.tcx.lifetimes.re_erased, t);
}
PathSelector::Function => {
return t;
}
PathSelector::UnionField {
case_index: ordinal,
..
}
| PathSelector::Field(ordinal) => {
if let TyKind::Opaque(def_id, subs) = &t.kind() {
let map = self.get_generic_arguments_map(*def_id, subs, &[]);
t = self
.specialize_generic_argument_type(self.tcx.type_of(*def_id), &map);
trace!("opaque type_of {:?}", t.kind());
trace!("opaque type_of {:?}", t);
}
match t.kind() {
TyKind::Adt(def, substs) => {
return self.get_field_type(def, substs, *ordinal);
}
TyKind::Array(elem_ty, ..) | TyKind::Slice(elem_ty) => {
match *ordinal {
0 => {
// Field 0 of a sized array is a raw pointer to the array element type
return self.tcx.mk_ptr(rustc_middle::ty::TypeAndMut {
ty: *elem_ty,
mutbl: rustc_hir::Mutability::Not,
});
}
1 => {
return self.tcx.types.usize;
}
_ => {}
}
}
TyKind::Closure(def_id, substs) => {
let closure_substs = substs.as_closure();
if closure_substs.is_valid() {
return closure_substs
.upvar_tys()
.nth(*ordinal)
.unwrap_or_else(|| {
info!(
"closure field not found {:?} {:?}",
def_id, ordinal
);
self.tcx.types.never
});
}
}
TyKind::Generator(def_id, substs, _) => {
let mut tuple_types =
substs.as_generator().state_tys(*def_id, self.tcx);
if let Some(field_tys) = tuple_types.nth(*ordinal) {
return self.tcx.mk_tup(field_tys);
}
info!("generator field not found {:?} {:?}", def_id, ordinal);
return self.tcx.types.never;
}
TyKind::Ref(_, t, _) if matches!(t.kind(), TyKind::Closure(..)) => {
// todo: this seems to work around a more fundamental bug.
// why would getting a field from a closure not need a deref
// before the field access? I.e. is a reference to a closure
// a sort of fat pointer?
if let TyKind::Closure(def_id, substs) = t.kind() {
if utils::are_concrete(substs) {
return substs
.as_closure()
.upvar_tys()
.nth(*ordinal)
.unwrap_or_else(|| {
info!(
"closure field not found {:?} {:?}",
def_id, ordinal
);
self.tcx.types.never
});
}
} else {
unreachable!("t.kind is a closure because of the guard");
}
}
TyKind::Str => {
match *ordinal {
0 => {
// Field 0 of a str is a raw pointer to char
return self.tcx.mk_ptr(rustc_middle::ty::TypeAndMut {
ty: self.tcx.types.char,
mutbl: rustc_hir::Mutability::Not,
});
}
1 => {
return self.tcx.types.usize;
}
_ => {}
}
}
TyKind::Tuple(types) => {
if let Some(ty) = types.get(*ordinal as usize) {
return *ty;
}
if types.is_empty() {
return self.tcx.types.never;
}
}
_ => {
if self.is_slice_pointer(t.kind()) {
match *ordinal {
0 => {
// Field 0 of a slice pointer is a raw pointer to the slice element type
return self.tcx.mk_ptr(rustc_middle::ty::TypeAndMut {
ty: self.get_element_type(t),
mutbl: rustc_hir::Mutability::Mut,
});
}
1 => {
return self.tcx.types.usize;
}
_ => {}
}
} else {
// Taking the address of a struct returns the address of field 0
// and the type of the address is both &S and &F where S is the
// struct type and F is the type of field 0. If get here, it
// is because we tracked type F, whereas rustc used S.
match *ordinal {
0 => {
return t;
}
1 => {
// Assume &S is a slice pointer
return self.tcx.types.usize;
}
_ => {}
}
}
}
}
}
PathSelector::Deref => {
return self.get_dereferenced_type(t);
}
PathSelector::Discriminant => {
return self.tcx.types.i32;
}
PathSelector::Downcast(_, ordinal) => {
// Down casting to an enum variant
if t == self.tcx.types.usize {
// Down casting from an untyped pointer. This happens often enough
// that we don't want to log this an informational message.
debug!("The qualifier of the downcast can't be typed");
return self.tcx.types.never;
}
while type_visitor::is_transparent_wrapper(t)
|| matches!(t.kind(), TyKind::Adt(..))
{
if let TyKind::Adt(def, substs) = t.kind() {
let substs =
self.specialize_substs(substs, &self.generic_argument_map);
if !def.is_enum() {
// Could be a *&S vs *&S.Field_0 confusion
t = self.get_field_type(def, substs, 0);
continue;
}
if *ordinal < def.variants().len() {
let variant = &def.variants()[VariantIdx::new(*ordinal)];
let field_tys =
variant.fields.iter().map(|fd| fd.ty(self.tcx, substs));
return self.tcx.mk_tup(field_tys);
}
if !type_visitor::is_transparent_wrapper(t) {
break;
}
}
t = self.remove_transparent_wrapper(t);
}
info!(
"illegally down casting to index {} of {:?} at {:?}",
*ordinal, t, current_span
);
return self.tcx.types.never;
}
PathSelector::Index(_) | PathSelector::ConstantIndex { .. } => {
return self.get_element_type(t);
}
PathSelector::Layout => {
return self.tcx.types.trait_object_dummy_self;
}
PathSelector::Slice(_) => {
return {
let slice_ty = self.tcx.mk_slice(self.get_element_type(t));
self.tcx.mk_mut_ref(self.tcx.lifetimes.re_static, slice_ty)
};
}
PathSelector::TagField => {
return self.dummy_untagged_value_type;
}
_ => {}
}
info!("current span is {:?}", current_span);
info!(
"cache key is {:?}",
utils::summary_key_str(self.tcx, self.def_id)
);
info!("path is {:?}", path);
info!("t is {:?}", t);
info!("qualifier is {:?}", qualifier);
info!("selector is {:?}", selector);
self.tcx.types.never
}
PathEnum::StaticVariable { def_id, .. } => {
if let Some(def_id) = def_id {
return self.tcx.type_of(*def_id);
}
info!(
"static variable path.value is {:?} at {:?}",
path.value, current_span
);
self.tcx.types.never
}
_ => {
info!("path.value is {:?} at {:?}", path.value, current_span);
info!("path_ty_cache {:?}", self.path_ty_cache);
self.tcx.types.never
}
}
}
/// Returns the target type of a reference type.
#[logfn_inputs(TRACE)]
pub fn get_dereferenced_type(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.kind() {
TyKind::RawPtr(ty_and_mut) => ty_and_mut.ty,
TyKind::Ref(_, t, _) => *t,
_ => {
if ty.is_box() {
ty.boxed_ty()
} else {
ty
}
}
}
}
/// Returns the element type of an array or slice type.
#[logfn_inputs(TRACE)]
pub fn get_element_type(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
match &ty.kind() {
TyKind::Array(t, _) => *t,
TyKind::RawPtr(TypeAndMut { ty: t, .. }) | TyKind::Ref(_, t, _) => match t.kind() {
TyKind::Array(t, _) => *t,
TyKind::Slice(t) => *t,
TyKind::Str => self.tcx.types.char,
_ => *t,
},
TyKind::Slice(t) => *t,
TyKind::Str => self.tcx.types.char,
_ => ty,
}
}
/// Returns the type of the field with the given ordinal.
#[logfn_inputs(TRACE)]
pub fn get_field_type(
&self,
def: &'tcx AdtDef,
substs: SubstsRef<'tcx>,
ordinal: usize,
) -> Ty<'tcx> {
for variant in def.variants().iter() {
if ordinal < variant.fields.len() {
let field = &variant.fields[ordinal];
let ft = field.ty(self.tcx, substs);
trace!("field {:?} type is {:?}", ordinal, ft);
return ft;
}
}
debug!("adt def does not have a field with ordinal {}", ordinal);
self.tcx.types.never
}
/// Returns a map from path to ADT type for any path rooted in an actual argument
/// and known to have a type that is a reference to an ADT. Since the rustc type of the
/// corresponding field might be a trait, we prefer to type from the actual argument which
/// is more likely to be concrete. By seeding the initial type cache of a called function
/// with this information, we can get resolution of trait calls where the receiver is a
/// field reachable from a parameter, rather than the parameter itself.
#[logfn_inputs(TRACE)]
pub fn get_adt_map(
&self,
actual_arguments: &[(Rc<Path>, Rc<AbstractValue>)],
environment: &Environment,
) -> Option<Rc<HashMap<Rc<Path>, Ty<'tcx>>>> {
let mut result: HashMap<Rc<Path>, Ty<'tcx>> = HashMap::new();
for (i, (arg_path, _)) in actual_arguments.iter().enumerate() {
for (p, v) in environment
.value_map
.iter()
.filter(|(p, _)| p.is_rooted_by(arg_path))
{
if let Expression::Reference(rp) = &v.expression {
if let Some(ty) = self.path_ty_cache.get(rp) {
if ty.is_adt() {
let param_path = p.replace_root(arg_path, Path::new_parameter(i + 1));
let ptr_ty = self.tcx.mk_ptr(rustc_middle::ty::TypeAndMut {
ty: *ty,
mutbl: rustc_hir::Mutability::Not,
});
result.insert(param_path, ptr_ty);
}
}
}
}
}
if result.is_empty() {
None
} else {
Some(Rc::new(result))
}
}
/// If Operand corresponds to a compile time constant function, return
/// the generic parameter substitutions (type arguments) that are used by
/// the call instruction whose operand this is.
#[logfn_inputs(TRACE)]
pub fn get_generic_arguments_map(
&self,
def_id: DefId,
generic_args: SubstsRef<'tcx>,
actual_argument_types: &[Ty<'tcx>],
) -> Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>> {
let mut substitution_map = self.generic_argument_map.clone();
let mut map: HashMap<rustc_span::Symbol, GenericArg<'tcx>> = HashMap::new();
// This iterates over the callee's generic parameter definitions.
// If the parent of the callee is generic, those definitions are iterated
// as well. This applies recursively. Note that a child cannot mask the
// generic parameters of its parent with one of its own, so each parameter
// definition in this iteration will have a unique name.
InternalSubsts::for_item(self.tcx, def_id, |param_def, _| {
if let Some(gen_arg) = generic_args.get(param_def.index as usize) {
let specialized_gen_arg =
self.specialize_generic_argument(*gen_arg, &substitution_map);
if let Some(substitution_map) = &mut substitution_map {
substitution_map.insert(param_def.name, specialized_gen_arg);
}
map.insert(param_def.name, specialized_gen_arg);
} else {
debug!("unmapped generic param def");
}
self.tcx.mk_param_from_def(param_def) // not used
});
// Add "Self" -> actual_argument_types[0]
if let Some(self_ty) = actual_argument_types.get(0) {
let self_ty = if let TyKind::Ref(_, ty, _) = self_ty.kind() {
*ty
} else {
*self_ty
};
let self_sym = rustc_span::Symbol::intern("Self");
map.entry(self_sym).or_insert_with(|| self_ty.into());
}
if map.is_empty() {
None
} else {
Some(map)
}
}
/// Returns the specialized type for the given local variable
#[logfn_inputs(TRACE)]
pub fn get_loc_ty(&self, local: mir::Local) -> Ty<'tcx> {
let i = local.as_usize();
let loc_ty = self.specialize_generic_argument_type(
self.mir.local_decls[local].ty,
&self.generic_argument_map,
);
if !utils::is_concrete(loc_ty.kind())
&& 0 < i
&& i <= self.mir.arg_count
&& i <= self.actual_argument_types.len()
{
let act_ty = self.actual_argument_types[i - 1];
if utils::is_concrete(act_ty.kind()) {
return act_ty;
}
}
loc_ty
}
/// Returns an ExpressionType value corresponding to the Rustc type of the place.
#[logfn_inputs(TRACE)]
pub fn get_place_type(
&self,
place: &mir::Place<'tcx>,
current_span: rustc_span::Span,
) -> ExpressionType {
ExpressionType::from(self.get_rustc_place_type(place, current_span).kind())
}
/// Returns the rustc Ty of the given place in memory.
#[logfn_inputs(TRACE)]
#[logfn(TRACE)]
pub fn get_rustc_place_type(
&self,
place: &mir::Place<'tcx>,
current_span: rustc_span::Span,
) -> Ty<'tcx> {
let result = {
let base_type = self.get_loc_ty(place.local);
self.get_type_for_projection_element(current_span, base_type, place.projection)
};
match result.kind() {
TyKind::Param(t_par) => {
if let Some(generic_args) = self.generic_arguments {
if let Some(ty) = generic_args.types().nth(t_par.index as usize) {
return ty;
}
if t_par.name.as_str() == "Self" && !self.actual_argument_types.is_empty() {
return self.actual_argument_types[0];
}
}
}
TyKind::Ref(region, ty, mutbl) => {
if let TyKind::Param(t_par) = ty.kind() {
if t_par.name.as_str() == "Self" && !self.actual_argument_types.is_empty() {
return self.tcx.mk_ref(
*region,
rustc_middle::ty::TypeAndMut {
ty: self.actual_argument_types[0],
mutbl: *mutbl,
},
);
}
}
}
_ => {}
}
result
}
/// Returns the rustc TyKind of the element selected by projection_elem.
#[logfn_inputs(TRACE)]
pub fn get_type_for_projection_element(
&self,
current_span: rustc_span::Span,
base_ty: Ty<'tcx>,
place_projection: &[rustc_middle::mir::PlaceElem<'tcx>],
) -> Ty<'tcx> {
place_projection
.iter()
.fold(base_ty, |base_ty, projection_elem| match projection_elem {
mir::ProjectionElem::Deref => match base_ty.kind() {
TyKind::Adt(..) => base_ty,
TyKind::RawPtr(ty_and_mut) => ty_and_mut.ty,
TyKind::Ref(_, ty, _) => *ty,
_ => {
info!(
"bad deref projection span: {:?}\nelem: {:?} type: {:?}",
current_span, projection_elem, base_ty
);
self.tcx.types.never
}
},
mir::ProjectionElem::Field(_, ty) => {
self.specialize_generic_argument_type(*ty, &self.generic_argument_map)
}
mir::ProjectionElem::Subslice { .. } => base_ty,
mir::ProjectionElem::Index(_) | mir::ProjectionElem::ConstantIndex { .. } => {
match base_ty.kind() {
TyKind::Adt(..) => base_ty,
TyKind::Array(ty, _) => *ty,
TyKind::Ref(_, ty, _) => self.get_element_type(*ty),
TyKind::Slice(ty) => *ty,
_ => {
debug!(
"span: {:?}\nelem: {:?} type: {:?}",
current_span, projection_elem, base_ty
);
assume_unreachable!();
}
}
}
mir::ProjectionElem::Downcast(_, ordinal) => {
if let TyKind::Adt(def, substs) = base_ty.kind() {
if ordinal.index() >= def.variants().len() {
debug!(
"illegally down casting to index {} of {:?} at {:?}",
ordinal.index(),
base_ty,
current_span
);
let variant = &def.variants().iter().last().unwrap();
let field_tys = variant.fields.iter().map(|fd| fd.ty(self.tcx, substs));
return self.tcx.mk_tup(field_tys);
}
let variant = &def.variants()[*ordinal];
let field_tys = variant.fields.iter().map(|fd| fd.ty(self.tcx, substs));
return self.tcx.mk_tup(field_tys);
} else if let TyKind::Generator(def_id, substs, ..) = base_ty.kind() {
let mut tuple_types = substs.as_generator().state_tys(*def_id, self.tcx);
if let Some(field_tys) = tuple_types.nth(ordinal.index()) {
return self.tcx.mk_tup(field_tys);
}
debug!(
"illegally down casting to index {} of {:?} at {:?}",
ordinal.index(),
base_ty,
current_span
);
} else {
info!("unexpected type for downcast {:?}", base_ty);
}
base_ty
}
})
}
/// Returns the size in bytes (including padding) of an instance of the given type.
pub fn get_type_size(&self, ty: Ty<'tcx>) -> u64 {
if let Ok(ty_and_layout) = self.layout_of(ty) {
ty_and_layout.layout.size().bytes()
} else {
0
}
}
/// Returns the size (including padding) and alignment, in bytes, of an instance of the given type.
pub fn get_type_size_and_alignment(&self, ty: Ty<'tcx>) -> (u128, u128) {
if let Ok(ty_and_layout) = self.layout_of(ty) {
(
ty_and_layout.layout.size().bytes() as u128,
ty_and_layout.align.pref.bytes() as u128,
)
} else {
(0, 8)
}
}
/// Returns a layout for the given type, if concrete.
pub fn layout_of(
&self,
ty: Ty<'tcx>,
) -> std::result::Result<
rustc_middle::ty::layout::TyAndLayout<'tcx>,
rustc_middle::ty::layout::LayoutError<'tcx>,
> {
let param_env = self.get_param_env();
if utils::is_concrete(ty.kind()) {
self.tcx.layout_of(param_env.and(ty))
} else {
Err(rustc_middle::ty::layout::LayoutError::Unknown(ty))
}
}
pub fn remove_transparent_wrapper(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
if let TyKind::Adt(def, substs) = ty.kind() {
if def.repr().transparent() {
let variant_0 = VariantIdx::from_u32(0);
let v = &def.variants()[variant_0];
let non_zst_field = v.fields.iter().find(|field| {
let field_ty = self.tcx.type_of(field.did);
let is_zst = self
.layout_of(field_ty)
.map_or(false, |layout| layout.is_zst());
!is_zst
});
if let Some(f) = non_zst_field {
return f.ty(self.tcx, substs);
}
}
}
ty
}
#[logfn_inputs(TRACE)]
fn specialize_const(
&self,
constant: Const<'tcx>,
map: &Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
) -> Const<'tcx> {
if let ConstKind::Param(param_const) = constant.val() {
if let Some(gen_arg) = map.as_ref().unwrap().get(¶m_const.name) {
return gen_arg.expect_const();
}
}
constant
}
#[logfn_inputs(TRACE)]
fn specialize_generic_argument(
&self,
gen_arg: GenericArg<'tcx>,
map: &Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
) -> GenericArg<'tcx> {
match gen_arg.unpack() {
GenericArgKind::Type(ty) => self.specialize_generic_argument_type(ty, map).into(),
GenericArgKind::Const(c) => self.specialize_const(c, map).into(),
_ => gen_arg,
}
}
#[logfn_inputs(TRACE)]
pub fn specialize_generic_argument_type(
&self,
gen_arg_type: Ty<'tcx>,
map: &Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
) -> Ty<'tcx> {
// The projection of an associated type. For example,
// `<T as Trait<..>>::N`.
if let TyKind::Projection(projection) = gen_arg_type.kind() {
let specialized_substs = self.specialize_substs(projection.substs, map);
let item_def_id = projection.item_def_id;
return if utils::are_concrete(specialized_substs) {
let param_env = self
.tcx
.param_env(self.tcx.associated_item(item_def_id).container.id());
if let Ok(Some(instance)) = rustc_middle::ty::Instance::resolve(
self.tcx,
param_env,
item_def_id,
specialized_substs,
) {
let instance_item_def_id = instance.def.def_id();
if item_def_id == instance_item_def_id {
return self
.tcx
.mk_projection(projection.item_def_id, specialized_substs);
}
let item_type = self.tcx.type_of(instance_item_def_id);
let map =
self.get_generic_arguments_map(instance_item_def_id, instance.substs, &[]);
if item_type == gen_arg_type && map.is_none() {
// Can happen if the projection just adds a life time
item_type
} else {
self.specialize_generic_argument_type(item_type, &map)
}
} else {
let projection_trait = self.tcx.parent(item_def_id);
if projection_trait == self.tcx.lang_items().pointee_trait() {
assume!(!specialized_substs.is_empty());
if let GenericArgKind::Type(ty) = specialized_substs[0].unpack() {
return ty.ptr_metadata_ty(self.tcx, |ty| ty).0;
}
} else if projection_trait == self.tcx.lang_items().discriminant_kind_trait() {
assume!(!specialized_substs.is_empty());
if let GenericArgKind::Type(enum_ty) = specialized_substs[0].unpack() {
return enum_ty.discriminant_ty(self.tcx);
}
}
debug!("could not resolve an associated type with concrete type arguments");
gen_arg_type
}
} else {
self.tcx
.mk_projection(projection.item_def_id, specialized_substs)
};
}
if map.is_none() {
return gen_arg_type;
}
match gen_arg_type.kind() {
TyKind::Adt(def, substs) => self.tcx.mk_adt(*def, self.specialize_substs(substs, map)),
TyKind::Array(elem_ty, len) => {
let specialized_elem_ty = self.specialize_generic_argument_type(*elem_ty, map);
let specialized_len = self.specialize_const(*len, map);
self.tcx
.mk_ty(TyKind::Array(specialized_elem_ty, specialized_len))
}
TyKind::Slice(elem_ty) => {
let specialized_elem_ty = self.specialize_generic_argument_type(*elem_ty, map);
self.tcx.mk_slice(specialized_elem_ty)
}
TyKind::RawPtr(rustc_middle::ty::TypeAndMut { ty, mutbl }) => {
let specialized_ty = self.specialize_generic_argument_type(*ty, map);
self.tcx.mk_ptr(rustc_middle::ty::TypeAndMut {
ty: specialized_ty,
mutbl: *mutbl,
})
}
TyKind::Ref(region, ty, mutbl) => {
let specialized_ty = self.specialize_generic_argument_type(*ty, map);
self.tcx.mk_ref(
*region,
rustc_middle::ty::TypeAndMut {
ty: specialized_ty,
mutbl: *mutbl,
},
)
}
TyKind::FnDef(def_id, substs) => self
.tcx
.mk_fn_def(*def_id, self.specialize_substs(substs, map)),
TyKind::FnPtr(fn_sig) => {
let map_fn_sig = |fn_sig: FnSig<'tcx>| {
let specialized_inputs_and_output = self.tcx.mk_type_list(
fn_sig
.inputs_and_output
.iter()
.map(|ty| self.specialize_generic_argument_type(ty, map)),
);
FnSig {
inputs_and_output: specialized_inputs_and_output,
c_variadic: fn_sig.c_variadic,
unsafety: fn_sig.unsafety,
abi: fn_sig.abi,
}
};
let specialized_fn_sig = fn_sig.map_bound(map_fn_sig);
self.tcx.mk_fn_ptr(specialized_fn_sig)
}
TyKind::Dynamic(predicates, region) => {
let specialized_predicates = predicates.iter().map(
|bound_pred: rustc_middle::ty::Binder<'_, ExistentialPredicate<'tcx>>| {
bound_pred.map_bound(|pred| match pred {
ExistentialPredicate::Trait(ExistentialTraitRef { def_id, substs }) => {
ExistentialPredicate::Trait(ExistentialTraitRef {
def_id,
substs: self.specialize_substs(substs, map),
})
}
ExistentialPredicate::Projection(ExistentialProjection {
item_def_id,
substs,
term,
}) => {
if let Term::Ty(ty) = term {
ExistentialPredicate::Projection(ExistentialProjection {
item_def_id,
substs: self.specialize_substs(substs, map),
term: Term::Ty(
self.specialize_generic_argument_type(ty, map),
),
})
} else {
ExistentialPredicate::Projection(ExistentialProjection {
item_def_id,
substs: self.specialize_substs(substs, map),
term,
})
}
}
ExistentialPredicate::AutoTrait(_) => pred,
})
},
);
self.tcx.mk_dynamic(
self.tcx
.mk_poly_existential_predicates(specialized_predicates),
*region,
)
}
TyKind::Closure(def_id, substs) => {
// Closure types can be part of their own type parameters...
// so need to guard against endless recursion
{
let mut borrowed_closures_being_specialized =
self.closures_being_specialized.borrow_mut();
let closures_being_specialized =
borrowed_closures_being_specialized.deref_mut();
if !closures_being_specialized.insert(*def_id) {
return gen_arg_type;
}
}
let specialized_closure = self
.tcx
.mk_closure(*def_id, self.specialize_substs(substs, map));
let mut borrowed_closures_being_specialized =
self.closures_being_specialized.borrow_mut();
let closures_being_specialized = borrowed_closures_being_specialized.deref_mut();
closures_being_specialized.remove(def_id);
specialized_closure
}
TyKind::Generator(def_id, substs, movability) => {
self.tcx
.mk_generator(*def_id, self.specialize_substs(substs, map), *movability)
}
TyKind::GeneratorWitness(bound_types) => {
let map_types = |types: &rustc_middle::ty::List<Ty<'tcx>>| {
self.tcx.mk_type_list(
types
.iter()
.map(|ty| self.specialize_generic_argument_type(ty, map)),
)
};
let specialized_types = bound_types.map_bound(map_types);
self.tcx.mk_generator_witness(specialized_types)
}
TyKind::Tuple(types) => self.tcx.mk_tup(
types
.iter()
.map(|ty| self.specialize_generic_argument_type(ty, map)),
),
TyKind::Opaque(def_id, substs) => self
.tcx
.mk_opaque(*def_id, self.specialize_substs(substs, map)),
TyKind::Param(ParamTy { name, .. }) => {
if let Some(map) = map {
if let Some(gen_arg) = map.get(name) {
return gen_arg.expect_ty();
}
}
gen_arg_type
}
_ => gen_arg_type,
}
}
#[logfn_inputs(TRACE)]
pub fn specialize_substs(
&self,
substs: SubstsRef<'tcx>,
map: &Option<HashMap<rustc_span::Symbol, GenericArg<'tcx>>>,
) -> SubstsRef<'tcx> {
let specialized_generic_args: Vec<GenericArg<'_>> = substs
.iter()
.map(|gen_arg| self.specialize_generic_argument(gen_arg, map))
.collect();
self.tcx.intern_substs(&specialized_generic_args)
}
}
pub fn is_transparent_wrapper(ty: Ty) -> bool {
return if let TyKind::Adt(def, _) = ty.kind() {
def.repr().transparent()
} else {
false
};
}
| 43.68124 | 116 | 0.456279 |
711f35d841be31dde8068c8e91b2272aed3cd6e2
| 5,814 |
//! Transaction Protocol Manager facilitates the process of constructing a Mimblewimble transaction between two parties.
//!
//! The Transaction Protocol Manager implements a protocol to construct a Mimwblewimble transaction between two parties
//! , a Sender and a Receiver. In this transaction the Sender is paying the Receiver from their inputs and also paying
//! to as many change outputs as they like. The Receiver will receive a single output from this transaction.
//! The module consists of three main components:
//! - A Builder for the initial Sender state data
//! - A SenderTransactionProtocolManager which manages the Sender's state machine
//! - A ReceiverTransactionProtocolManager which manages the Receiver's state machine.
//!
//! The two state machines run in parallel and will be managed by each respective party. Each state machine has methods
//! to construct and accept the public data messages that needs to be transmitted between the parties. The diagram below
//! illustrates the progression of the two state machines and shows where the public data messages are constructed and
//! accepted in each state machine
//!
//! The sequence diagram for the single receiver protocol is:
//!
//! <div class="mermaid">
//! sequenceDiagram
//! participant Sender
//! participant Receiver
//! #
//! activate Sender
//! Sender-->>Sender: initialize transaction
//! deactivate Sender
//! #
//! activate Sender
//! Sender-->>+Receiver: partial tx info
//! Receiver-->>Receiver: validate tx info
//! Receiver-->>Receiver: create new output and sign
//! Receiver-->>-Sender: signed partial transaction
//! deactivate Sender
//! #
//! activate Sender
//! Sender-->>Sender: validate and sign
//! deactivate Sender
//! #
//! alt tx is valid
//! Sender-->>Network: Broadcast transaction
//! else tx is invalid
//! Sender--XSender: Failed
//! end
//! </div>
//!
//! If there are multiple recipients, the protocol is more involved and requires three rounds of communication:
//!
//! <div class="mermaid">
//! sequenceDiagram
//! participant Sender
//! participant Receivers
//! #
//! activate Sender
//! Sender-->>Sender: initialize
//! deactivate Sender
//! #
//! activate Sender
//! Sender-->>+Receivers: [tx_id, amount_i]
//! note left of Sender: CollectingPubKeys
//! note right of Receivers: Initialization
//! Receivers-->>-Sender: [tx_id, Pi, Ri]
//! deactivate Sender
//! #
//! alt invalid
//! Sender--XSender: failed
//! end
//! #
//! activate Sender
//! Sender-->>+Receivers: [tx_id, ΣR, ΣP]
//! note left of Sender: CollectingSignatures
//! note right of Receivers: Signing
//! Receivers-->>Receivers: create output and sign
//! Receivers-->>-Sender: [tx_id, Output_i, s_i]
//! deactivate Sender
//! #
//! note left of Sender: Finalizing
//! alt is_valid()
//! Sender-->>Sender: Finalized
//! else invalid
//! Sender--XSender: Failed
//! end
//! </div>
// #![allow(clippy::op_ref)]
use digest::Digest;
use serde::{Deserialize, Serialize};
use tari_common_types::types::{MessageHash, PrivateKey, PublicKey};
use tari_comms::types::Challenge;
use tari_crypto::{
range_proof::{RangeProofError, REWIND_USER_MESSAGE_LENGTH},
signatures::SchnorrSignatureError,
tari_utilities::byte_array::ByteArray,
};
use thiserror::Error;
use crate::transactions::{tari_amount::*, transaction_components::TransactionError};
pub mod proto;
pub mod recipient;
pub mod sender;
pub mod single_receiver;
pub mod transaction_initializer;
#[derive(Clone, Debug, PartialEq, Error, Deserialize, Serialize)]
pub enum TransactionProtocolError {
#[error("The current state is not yet completed, cannot transition to next state: `{0}`")]
IncompleteStateError(String),
#[error("Validation error: `{0}`")]
ValidationError(String),
#[error("Invalid state transition")]
InvalidTransitionError,
#[error("Invalid state")]
InvalidStateError,
#[error("An error occurred while performing a signature: `{0}`")]
SigningError(#[from] SchnorrSignatureError),
#[error("A signature verification failed: {0}")]
InvalidSignatureError(String),
#[error("An error occurred while building the final transaction: `{0}`")]
TransactionBuildError(#[from] TransactionError),
#[error("The transaction construction broke down due to communication failure")]
TimeoutError,
#[error("An error was produced while constructing a rangeproof: `{0}`")]
RangeProofError(#[from] RangeProofError),
#[error("This set of parameters is currently not supported: `{0}`")]
UnsupportedError(String),
#[error("There has been an error serializing or deserializing this structure")]
SerializationError,
#[error("Conversion error: `{0}`")]
ConversionError(String),
#[error("The script offset private key could not be found")]
ScriptOffsetPrivateKeyNotFound,
}
/// Transaction metadata, including the fee and lock height
#[derive(Debug, Clone, PartialEq, Eq, Default, Deserialize, Serialize)]
pub struct TransactionMetadata {
/// The absolute fee for the transaction
pub fee: MicroTari,
/// The earliest block this transaction can be mined
pub lock_height: u64,
}
#[derive(Debug, Clone)]
pub struct RewindData {
pub rewind_key: PrivateKey,
pub rewind_blinding_key: PrivateKey,
pub proof_message: [u8; REWIND_USER_MESSAGE_LENGTH],
}
/// Convenience function that calculates the challenge for the Schnorr signatures
pub fn build_challenge(sum_public_nonces: &PublicKey, metadata: &TransactionMetadata) -> MessageHash {
Challenge::new()
.chain(sum_public_nonces.as_bytes())
.chain(&u64::from(metadata.fee).to_le_bytes())
.chain(&metadata.lock_height.to_le_bytes())
.finalize()
.to_vec()
}
| 36.566038 | 120 | 0.707602 |
7a376247de0c659f433dc48eea45d367ea58224f
| 716 |
// Copyright 2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use gdk_sys;
use glib::translate::*;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct EventProximity(::Event);
event_wrapper!(EventProximity, GdkEventProximity);
event_subtype!(
EventProximity,
gdk_sys::GDK_PROXIMITY_IN | gdk_sys::GDK_PROXIMITY_OUT
);
impl EventProximity {
pub fn get_time(&self) -> u32 {
self.as_ref().time
}
pub fn get_device(&self) -> Option<::Device> {
unsafe { from_glib_none(self.as_ref().device) }
}
}
| 27.538462 | 95 | 0.705307 |
7126751a1a71702359ee6fdbf69b140f8df9ff43
| 724 |
// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
// !! DO NOT EDIT !!
// To regenerate this file, run `fuzz/generate_proto_tests.py`.
#![no_main]
#![allow(non_snake_case)]
use libfuzzer_sys::fuzz_target;
use manticore::protocol::Command;
use manticore::protocol::wire::ToWire;
use manticore::protocol::borrowed::AsStatic;
use manticore::protocol::borrowed::Borrowed;
use manticore::protocol::get_host_state::GetHostState as C;
type Req<'a> = <C as Command<'a>>::Req;
fuzz_target!(|data: AsStatic<'static, Req<'static>>| {
let mut out = [0u8; 1024];
let _ = Req::borrow(&data).to_wire(&mut &mut out[..]);
});
| 27.846154 | 75 | 0.708564 |
28e2b4096f4b8e8fabb47349ff9c2cef9ad57994
| 730 |
use service;
use clap::App;
pub fn run() {
let matches = App::new("captain")
.version("0.0.1")
.author("kazami")
.arg(clap::Arg::new("input")
.about("the input wasm file")
.required(true)
.index(1))
.subcommand(App::new("run")
.about("run cli tool")
.version("0.0.1"))
.get_matches();
if let Some(i) = matches.value_of("input") {
println!("Value for input: {}", i);
}
let _str = matches.value_of("input").unwrap();
println!("the path is {}", _str);
let string = String::from(_str);
if let Some(ref matches) = matches.subcommand_matches("run") {
service::task(string);
}
}
| 23.548387 | 66 | 0.515068 |
67aa6fed77b8796c9b8cbcace74f9097f8b3e0b2
| 5,515 |
use crate::kube;
use super::{Config, Region, Result};
use shipcat_definitions::Crd;
use std::process::Command;
fn git(args: &[&str]) -> Result<()> {
debug!("git {}", args.join(" "));
let s = Command::new("git").args(args).status()?;
if !s.success() {
bail!("Subprocess failure from git: {}", s.code().unwrap_or(1001))
}
Ok(())
}
/// Fast local git compare of the crd
///
/// Should be pretty safe. Stashes existing work, checks out master, compares,
/// then goes back to previous branch and pops the stash.
///
/// Because this does fiddle with git state while running it is not the default implementation.
pub fn values_vs_git(svc: &str, conf: &Config, region: &Region) -> Result<bool> {
let aftermf = shipcat_filebacked::load_manifest(&svc, conf, region)?;
let after = serde_yaml::to_string(&aftermf)?;
// move git to get before state:
git(&["checkout", "master", "--quiet"])?;
let needs_stash = git(&["diff", "--quiet", "--exit-code"]).is_err() || git(&["diff", "--cached", "--quiet", "--exit-code"]).is_err();
if needs_stash {
git(&["stash", "--quiet"])?;
}
// compute before state
let beforemf = shipcat_filebacked::load_manifest(&svc, conf, region)?;
let before = serde_yaml::to_string(&beforemf)?;
// move git back
if needs_stash {
git(&["stash", "pop", "--quiet"])?;
}
git(&["checkout", "-", "--quiet"])?;
// display diff
shell_diff(&before, &after)
}
/// Fast local git compare of shipcat template
///
/// Because this uses the template in master against local state,
/// we don't resolve secrets for this (would compare equal values anyway).
pub fn template_vs_git(svc: &str, conf: &Config, region: &Region) -> Result<bool> {
use crate::helm;
let mock = true; // both would be equivalent vault reads anyway
let afterpth = Path::new(".").join("after.shipcat.gen.yml");
let _after = helm::direct::template(&svc, ®ion, &conf, None, mock, Some(afterpth.clone()))?;
// move git to get before state:
git(&["checkout", "master", "--quiet"])?;
let needs_stash = git(&["diff", "--quiet", "--exit-code"]).is_err() || git(&["diff", "--cached", "--quiet", "--exit-code"]).is_err();
if needs_stash {
git(&["stash", "--quiet"])?;
}
// compute old state:
let beforepth = Path::new(".").join("before.shipcat.gen.yml");
let _before = helm::direct::template(&svc, ®ion, &conf, None, mock, Some(beforepth.clone()))?;
// move git back
if needs_stash {
git(&["stash", "pop", "--quiet"])?;
}
git(&["checkout", "-", "--quiet"])?;
// display diff
// doesn't reuse shell_diff because we already have files from direct::template
let args = ["-u", "before.shipcat.gen.yml", "after.shipcat.gen.yml"];
debug!("diff {}", args.join(" "));
let s = Command::new("diff").args(&args).status()?;
// cleanup
fs::remove_file(beforepth)?;
fs::remove_file(afterpth)?;
Ok(s.success())
}
use std::path::Path;
use std::fs::{self, File};
use std::io::Write;
/// Diff values using kubectl diff
///
/// Generate crd as we write it and pipe it to `kubectl diff -`
/// Only works on clusters with kubectl 1.13 on the server side, so not available everywhere
pub fn values_vs_kubectl(svc: &str, conf: &Config, region: &Region) -> Result<bool> {
// Generate crd in a temp file:
let mf = shipcat_filebacked::load_manifest(svc, conf, region)?;
let crd = Crd::from(mf);
let encoded = serde_yaml::to_string(&crd)?;
let cfile = format!("{}.shipcat.crd.gen.yml", svc);
let pth = Path::new(".").join(cfile);
debug!("Writing crd for {} to {}", svc, pth.display());
let mut f = File::create(&pth)?;
writeln!(f, "{}", encoded)?;
// shell out to kubectl:
let (out, success) = kube::diff(pth.clone(), ®ion.namespace)?;
println!("{}", out);
// cleanup:
fs::remove_file(pth)?;
Ok(success)
}
/// Diff using template kubectl diff
///
/// Generate template as we write it and pipe it to `kubectl diff -`
/// Only works on clusters with kubectl 1.13 on the server side, so not available everywhere
pub fn template_vs_kubectl(svc: &str, conf: &Config, region: &Region, mock: bool) -> Result<bool> {
use crate::helm;
// Generate template in a temp file:
let tfile = format!("{}.shipcat.tpl.gen.yml", svc);
let pth = Path::new(".").join(tfile);
let version = None; // TODO: override in rolling?
helm::direct::template(&svc, ®ion, &conf, version, mock, Some(pth.clone()))?;
let (out, success) = kube::diff(pth.clone(), ®ion.namespace)?;
println!("{}", out);
// cleanup:
fs::remove_file(pth)?;
Ok(success)
}
// Compare using diff(1)
// difference libraries all seemed to be lacking somewhat
fn shell_diff(before: &str, after: &str) -> Result<bool> {
let beforepth = Path::new(".").join("before.shipcat.gen.yml");
debug!("Writing before to {}", beforepth.display());
let mut f = File::create(&beforepth)?;
writeln!(f, "{}", before)?;
let afterpth = Path::new(".").join("after.shipcat.gen.yml");
debug!("Writing after to {}", afterpth.display());
let mut f = File::create(&afterpth)?;
writeln!(f, "{}", after)?;
let args = ["-u", "before.shipcat.gen.yml", "after.shipcat.gen.yml"];
debug!("diff {}", args.join(" "));
let s = Command::new("diff").args(&args).status()?;
// cleanup
fs::remove_file(beforepth)?;
fs::remove_file(afterpth)?;
Ok(s.success())
}
| 36.045752 | 137 | 0.612511 |
21c31b621835e37275d2eafb54da8bc183d05ea4
| 573 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: int }
pub fn main() {
let _x = match 0i {
_ => X {
x: 0
}
};
}
| 28.65 | 68 | 0.670157 |
fbf3278ad1f7c7b57cdf6ec7b009303f33c8e955
| 62,576 |
//! See `CompletionContext` structure.
use std::iter;
use base_db::SourceDatabaseExt;
use hir::{
HasAttrs, Local, Name, PathResolution, ScopeDef, Semantics, SemanticsScope, Type, TypeInfo,
};
use ide_db::{
active_parameter::ActiveParameter,
base_db::{FilePosition, SourceDatabase},
famous_defs::FamousDefs,
FxHashMap, FxHashSet, RootDatabase,
};
use syntax::{
algo::{find_node_at_offset, non_trivia_sibling},
ast::{self, AttrKind, HasArgList, HasName, NameOrNameRef},
match_ast, AstNode, AstToken, NodeOrToken,
SyntaxKind::{self, *},
SyntaxNode, SyntaxToken, TextRange, TextSize, T,
};
use text_edit::Indel;
use crate::{
patterns::{
determine_location, determine_prev_sibling, is_in_loop_body, is_in_token_of_for_loop,
previous_token, ImmediateLocation, ImmediatePrevSibling,
},
CompletionConfig,
};
const COMPLETION_MARKER: &str = "intellijRulezz";
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(crate) enum PatternRefutability {
Refutable,
Irrefutable,
}
pub(crate) enum Visible {
Yes,
Editable,
No,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) enum PathKind {
Expr {
in_block_expr: bool,
in_loop_body: bool,
in_functional_update: bool,
},
Type,
Attr {
kind: AttrKind,
annotated_item_kind: Option<SyntaxKind>,
},
Derive,
/// Path in item position, that is inside an (Assoc)ItemList
Item {
kind: ItemListKind,
},
Pat,
Vis {
has_in_token: bool,
},
Use,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) enum ItemListKind {
SourceFile,
Module,
Impl,
Trait,
ExternBlock,
}
#[derive(Debug)]
pub(crate) struct PathCompletionCtx {
/// If this is a call with () already there (or {} in case of record patterns)
pub(super) has_call_parens: bool,
/// If this has a macro call bang !
pub(super) has_macro_bang: bool,
/// Whether this path stars with a `::`.
pub(super) is_absolute_path: bool,
/// The qualifier of the current path if it exists.
pub(super) qualifier: Option<PathQualifierCtx>,
#[allow(dead_code)]
// FIXME: use this
/// The parent of the path we are completing.
pub(super) parent: Option<ast::Path>,
pub(super) kind: PathKind,
/// Whether the path segment has type args or not.
pub(super) has_type_args: bool,
}
#[derive(Debug)]
pub(crate) struct PathQualifierCtx {
pub(crate) path: ast::Path,
pub(crate) resolution: Option<PathResolution>,
/// Whether this path consists solely of `super` segments
pub(crate) is_super_chain: bool,
/// Whether the qualifier comes from a use tree parent or not
pub(crate) use_tree_parent: bool,
/// <_>
pub(crate) is_infer_qualifier: bool,
}
#[derive(Debug)]
pub(super) struct PatternContext {
pub(super) refutability: PatternRefutability,
pub(super) param_ctx: Option<(ast::ParamList, ast::Param, ParamKind)>,
pub(super) has_type_ascription: bool,
pub(super) parent_pat: Option<ast::Pat>,
pub(super) ref_token: Option<SyntaxToken>,
pub(super) mut_token: Option<SyntaxToken>,
}
#[derive(Debug)]
pub(super) struct LifetimeContext {
pub(super) lifetime: Option<ast::Lifetime>,
pub(super) kind: LifetimeKind,
}
#[derive(Debug)]
pub(super) enum LifetimeKind {
LifetimeParam { is_decl: bool, param: ast::LifetimeParam },
Lifetime,
LabelRef,
LabelDef,
}
#[derive(Debug)]
pub(super) struct NameContext {
#[allow(dead_code)]
pub(super) name: Option<ast::Name>,
pub(super) kind: NameKind,
}
#[derive(Debug)]
#[allow(dead_code)]
pub(super) enum NameKind {
Const,
ConstParam,
Enum,
Function,
IdentPat,
MacroDef,
MacroRules,
/// Fake node
Module(ast::Module),
RecordField,
Rename,
SelfParam,
Static,
Struct,
Trait,
TypeAlias,
TypeParam,
Union,
Variant,
}
#[derive(Debug)]
pub(super) struct NameRefContext {
/// NameRef syntax in the original file
pub(super) nameref: Option<ast::NameRef>,
pub(super) dot_access: Option<DotAccess>,
pub(super) path_ctx: Option<PathCompletionCtx>,
}
#[derive(Debug)]
pub(super) enum IdentContext {
Name(NameContext),
NameRef(NameRefContext),
Lifetime(LifetimeContext),
/// Original token, fake token
String {
original: ast::String,
expanded: Option<ast::String>,
},
UnexpandedAttrTT {
fake_attribute_under_caret: Option<ast::Attr>,
},
}
#[derive(Debug)]
pub(super) enum DotAccess {
Field {
receiver: Option<ast::Expr>,
/// True if the receiver is an integer and there is no ident in the original file after it yet
/// like `0.$0`
receiver_is_ambiguous_float_literal: bool,
},
Method {
receiver: Option<ast::Expr>,
has_parens: bool,
},
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum ParamKind {
Function(ast::Fn),
Closure(ast::ClosureExpr),
}
/// `CompletionContext` is created early during completion to figure out, where
/// exactly is the cursor, syntax-wise.
#[derive(Debug)]
pub(crate) struct CompletionContext<'a> {
pub(super) sema: Semantics<'a, RootDatabase>,
pub(super) scope: SemanticsScope<'a>,
pub(super) db: &'a RootDatabase,
pub(super) config: &'a CompletionConfig,
pub(super) position: FilePosition,
/// The token before the cursor, in the original file.
pub(super) original_token: SyntaxToken,
/// The token before the cursor, in the macro-expanded file.
pub(super) token: SyntaxToken,
/// The crate of the current file.
pub(super) krate: hir::Crate,
/// The module of the `scope`.
pub(super) module: hir::Module,
/// The expected name of what we are completing.
/// This is usually the parameter name of the function argument we are completing.
pub(super) expected_name: Option<NameOrNameRef>,
/// The expected type of what we are completing.
pub(super) expected_type: Option<Type>,
/// The parent function of the cursor position if it exists.
pub(super) function_def: Option<ast::Fn>,
/// The parent impl of the cursor position if it exists.
pub(super) impl_def: Option<ast::Impl>,
/// Are we completing inside a let statement with a missing semicolon?
pub(super) incomplete_let: bool,
pub(super) completion_location: Option<ImmediateLocation>,
pub(super) prev_sibling: Option<ImmediatePrevSibling>,
pub(super) previous_token: Option<SyntaxToken>,
pub(super) ident_ctx: IdentContext,
pub(super) pattern_ctx: Option<PatternContext>,
pub(super) existing_derives: FxHashSet<hir::Macro>,
pub(super) locals: FxHashMap<Name, Local>,
}
impl<'a> CompletionContext<'a> {
/// The range of the identifier that is being completed.
pub(crate) fn source_range(&self) -> TextRange {
// check kind of macro-expanded token, but use range of original token
let kind = self.token.kind();
match kind {
CHAR => {
// assume we are completing a lifetime but the user has only typed the '
cov_mark::hit!(completes_if_lifetime_without_idents);
TextRange::at(self.original_token.text_range().start(), TextSize::from(1))
}
IDENT | LIFETIME_IDENT | UNDERSCORE => self.original_token.text_range(),
_ if kind.is_keyword() => self.original_token.text_range(),
_ => TextRange::empty(self.position.offset),
}
}
pub(crate) fn previous_token_is(&self, kind: SyntaxKind) -> bool {
self.previous_token.as_ref().map_or(false, |tok| tok.kind() == kind)
}
pub(crate) fn famous_defs(&self) -> FamousDefs {
FamousDefs(&self.sema, self.krate)
}
pub(super) fn nameref_ctx(&self) -> Option<&NameRefContext> {
match &self.ident_ctx {
IdentContext::NameRef(it) => Some(it),
_ => None,
}
}
pub(super) fn name_ctx(&self) -> Option<&NameContext> {
match &self.ident_ctx {
IdentContext::Name(it) => Some(it),
_ => None,
}
}
pub(super) fn lifetime_ctx(&self) -> Option<&LifetimeContext> {
match &self.ident_ctx {
IdentContext::Lifetime(it) => Some(it),
_ => None,
}
}
pub(crate) fn dot_receiver(&self) -> Option<&ast::Expr> {
match self.nameref_ctx() {
Some(NameRefContext {
dot_access:
Some(DotAccess::Method { receiver, .. } | DotAccess::Field { receiver, .. }),
..
}) => receiver.as_ref(),
_ => None,
}
}
pub(crate) fn has_dot_receiver(&self) -> bool {
self.dot_receiver().is_some()
}
pub(crate) fn expects_assoc_item(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::Trait | ImmediateLocation::Impl))
}
pub(crate) fn expects_variant(&self) -> bool {
matches!(self.name_ctx(), Some(NameContext { kind: NameKind::Variant, .. }))
}
pub(crate) fn expects_non_trait_assoc_item(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::Impl))
}
pub(crate) fn expects_item(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::ItemList))
}
// FIXME: This shouldn't exist
pub(crate) fn expects_generic_arg(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::GenericArgList(_)))
}
pub(crate) fn has_block_expr_parent(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::StmtList))
}
pub(crate) fn expects_ident_ref_expr(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::RefExpr))
}
pub(crate) fn expect_field(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::TupleField))
|| matches!(self.name_ctx(), Some(NameContext { kind: NameKind::RecordField, .. }))
}
/// Whether the cursor is right after a trait or impl header.
/// trait Foo ident$0
// FIXME: This probably shouldn't exist
pub(crate) fn has_unfinished_impl_or_trait_prev_sibling(&self) -> bool {
matches!(
self.prev_sibling,
Some(ImmediatePrevSibling::ImplDefType | ImmediatePrevSibling::TraitDefName)
)
}
// FIXME: This probably shouldn't exist
pub(crate) fn has_impl_prev_sibling(&self) -> bool {
matches!(self.prev_sibling, Some(ImmediatePrevSibling::ImplDefType))
}
pub(crate) fn has_visibility_prev_sibling(&self) -> bool {
matches!(self.prev_sibling, Some(ImmediatePrevSibling::Visibility))
}
pub(crate) fn after_if(&self) -> bool {
matches!(self.prev_sibling, Some(ImmediatePrevSibling::IfExpr))
}
// FIXME: This shouldn't exist
pub(crate) fn is_path_disallowed(&self) -> bool {
self.previous_token_is(T![unsafe])
|| matches!(
self.prev_sibling,
Some(ImmediatePrevSibling::Attribute | ImmediatePrevSibling::Visibility)
)
|| matches!(
self.completion_location,
Some(ImmediateLocation::RecordPat(_) | ImmediateLocation::RecordExpr(_))
)
|| matches!(
self.name_ctx(),
Some(NameContext { kind: NameKind::Module(_) | NameKind::Rename, .. })
)
}
pub(crate) fn path_context(&self) -> Option<&PathCompletionCtx> {
self.nameref_ctx().and_then(|ctx| ctx.path_ctx.as_ref())
}
pub(crate) fn expects_expression(&self) -> bool {
matches!(self.path_context(), Some(PathCompletionCtx { kind: PathKind::Expr { .. }, .. }))
}
pub(crate) fn is_non_trivial_path(&self) -> bool {
matches!(
self.path_context(),
Some(
PathCompletionCtx { is_absolute_path: true, .. }
| PathCompletionCtx { qualifier: Some(_), .. }
)
)
}
pub(crate) fn path_qual(&self) -> Option<&ast::Path> {
self.path_context().and_then(|it| it.qualifier.as_ref().map(|it| &it.path))
}
pub(crate) fn path_kind(&self) -> Option<PathKind> {
self.path_context().map(|it| it.kind)
}
/// Checks if an item is visible and not `doc(hidden)` at the completion site.
pub(crate) fn is_visible<I>(&self, item: &I) -> Visible
where
I: hir::HasVisibility + hir::HasAttrs + hir::HasCrate + Copy,
{
self.is_visible_impl(&item.visibility(self.db), &item.attrs(self.db), item.krate(self.db))
}
pub(crate) fn is_scope_def_hidden(&self, scope_def: ScopeDef) -> bool {
if let (Some(attrs), Some(krate)) = (scope_def.attrs(self.db), scope_def.krate(self.db)) {
return self.is_doc_hidden(&attrs, krate);
}
false
}
/// Check if an item is `#[doc(hidden)]`.
pub(crate) fn is_item_hidden(&self, item: &hir::ItemInNs) -> bool {
let attrs = item.attrs(self.db);
let krate = item.krate(self.db);
match (attrs, krate) {
(Some(attrs), Some(krate)) => self.is_doc_hidden(&attrs, krate),
_ => false,
}
}
/// Whether the given trait is an operator trait or not.
pub(crate) fn is_ops_trait(&self, trait_: hir::Trait) -> bool {
match trait_.attrs(self.db).lang() {
Some(lang) => OP_TRAIT_LANG_NAMES.contains(&lang.as_str()),
None => false,
}
}
/// Returns the traits in scope, with the [`Drop`] trait removed.
pub(crate) fn traits_in_scope(&self) -> hir::VisibleTraits {
let mut traits_in_scope = self.scope.visible_traits();
if let Some(drop) = self.famous_defs().core_ops_Drop() {
traits_in_scope.0.remove(&drop.into());
}
traits_in_scope
}
/// A version of [`SemanticsScope::process_all_names`] that filters out `#[doc(hidden)]` items.
pub(crate) fn process_all_names(&self, f: &mut dyn FnMut(Name, ScopeDef)) {
let _p = profile::span("CompletionContext::process_all_names");
self.scope.process_all_names(&mut |name, def| {
if self.is_scope_def_hidden(def) {
return;
}
f(name, def);
});
}
pub(crate) fn process_all_names_raw(&self, f: &mut dyn FnMut(Name, ScopeDef)) {
let _p = profile::span("CompletionContext::process_all_names_raw");
self.scope.process_all_names(&mut |name, def| f(name, def));
}
fn is_visible_impl(
&self,
vis: &hir::Visibility,
attrs: &hir::Attrs,
defining_crate: hir::Crate,
) -> Visible {
if !vis.is_visible_from(self.db, self.module.into()) {
if !self.config.enable_private_editable {
return Visible::No;
}
// If the definition location is editable, also show private items
let root_file = defining_crate.root_file(self.db);
let source_root_id = self.db.file_source_root(root_file);
let is_editable = !self.db.source_root(source_root_id).is_library;
return if is_editable { Visible::Editable } else { Visible::No };
}
if self.is_doc_hidden(attrs, defining_crate) {
Visible::No
} else {
Visible::Yes
}
}
fn is_doc_hidden(&self, attrs: &hir::Attrs, defining_crate: hir::Crate) -> bool {
// `doc(hidden)` items are only completed within the defining crate.
self.krate != defining_crate && attrs.has_doc_hidden()
}
}
// CompletionContext construction
impl<'a> CompletionContext<'a> {
pub(super) fn new(
db: &'a RootDatabase,
position @ FilePosition { file_id, offset }: FilePosition,
config: &'a CompletionConfig,
) -> Option<CompletionContext<'a>> {
let _p = profile::span("CompletionContext::new");
let sema = Semantics::new(db);
let original_file = sema.parse(file_id);
// Insert a fake ident to get a valid parse tree. We will use this file
// to determine context, though the original_file will be used for
// actual completion.
let file_with_fake_ident = {
let parse = db.parse(file_id);
let edit = Indel::insert(offset, COMPLETION_MARKER.to_string());
parse.reparse(&edit).tree()
};
let fake_ident_token =
file_with_fake_ident.syntax().token_at_offset(offset).right_biased()?;
let original_token = original_file.syntax().token_at_offset(offset).left_biased()?;
let token = sema.descend_into_macros_single(original_token.clone());
let scope = sema.scope_at_offset(&token.parent()?, offset)?;
let krate = scope.krate();
let module = scope.module();
let mut locals = FxHashMap::default();
scope.process_all_names(&mut |name, scope| {
if let ScopeDef::Local(local) = scope {
locals.insert(name, local);
}
});
let mut ctx = CompletionContext {
sema,
scope,
db,
config,
position,
original_token,
token,
krate,
module,
expected_name: None,
expected_type: None,
function_def: None,
impl_def: None,
incomplete_let: false,
completion_location: None,
prev_sibling: None,
previous_token: None,
// dummy value, will be overwritten
ident_ctx: IdentContext::UnexpandedAttrTT { fake_attribute_under_caret: None },
pattern_ctx: None,
existing_derives: Default::default(),
locals,
};
ctx.expand_and_fill(
original_file.syntax().clone(),
file_with_fake_ident.syntax().clone(),
offset,
fake_ident_token,
)?;
Some(ctx)
}
/// Expand attributes and macro calls at the current cursor position for both the original file
/// and fake file repeatedly. As soon as one of the two expansions fail we stop so the original
/// and speculative states stay in sync.
fn expand_and_fill(
&mut self,
mut original_file: SyntaxNode,
mut speculative_file: SyntaxNode,
mut offset: TextSize,
mut fake_ident_token: SyntaxToken,
) -> Option<()> {
let _p = profile::span("CompletionContext::expand_and_fill");
let mut derive_ctx = None;
'expansion: loop {
let parent_item =
|item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast);
let ancestor_items = iter::successors(
Option::zip(
find_node_at_offset::<ast::Item>(&original_file, offset),
find_node_at_offset::<ast::Item>(&speculative_file, offset),
),
|(a, b)| parent_item(a).zip(parent_item(b)),
);
// first try to expand attributes as these are always the outermost macro calls
'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items {
match (
self.sema.expand_attr_macro(&actual_item),
self.sema.speculative_expand_attr_macro(
&actual_item,
&item_with_fake_ident,
fake_ident_token.clone(),
),
) {
// maybe parent items have attributes, so continue walking the ancestors
(None, None) => continue 'ancestors,
// successful expansions
(Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => {
let new_offset = fake_mapped_token.text_range().start();
if new_offset > actual_expansion.text_range().end() {
// offset outside of bounds from the original expansion,
// stop here to prevent problems from happening
break 'expansion;
}
original_file = actual_expansion;
speculative_file = fake_expansion;
fake_ident_token = fake_mapped_token;
offset = new_offset;
continue 'expansion;
}
// exactly one expansion failed, inconsistent state so stop expanding completely
_ => break 'expansion,
}
}
// No attributes have been expanded, so look for macro_call! token trees or derive token trees
let orig_tt = match find_node_at_offset::<ast::TokenTree>(&original_file, offset) {
Some(it) => it,
None => break 'expansion,
};
let spec_tt = match find_node_at_offset::<ast::TokenTree>(&speculative_file, offset) {
Some(it) => it,
None => break 'expansion,
};
// Expand pseudo-derive expansion
if let (Some(orig_attr), Some(spec_attr)) = (
orig_tt.syntax().parent().and_then(ast::Meta::cast).and_then(|it| it.parent_attr()),
spec_tt.syntax().parent().and_then(ast::Meta::cast).and_then(|it| it.parent_attr()),
) {
if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) = (
self.sema.expand_derive_as_pseudo_attr_macro(&orig_attr),
self.sema.speculative_expand_derive_as_pseudo_attr_macro(
&orig_attr,
&spec_attr,
fake_ident_token.clone(),
),
) {
derive_ctx = Some((
actual_expansion,
fake_expansion,
fake_mapped_token.text_range().start(),
orig_attr,
));
}
// at this point we won't have any more successful expansions, so stop
break 'expansion;
}
// Expand fn-like macro calls
if let (Some(actual_macro_call), Some(macro_call_with_fake_ident)) = (
orig_tt.syntax().ancestors().find_map(ast::MacroCall::cast),
spec_tt.syntax().ancestors().find_map(ast::MacroCall::cast),
) {
let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text());
let mac_call_path1 =
macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text());
// inconsistent state, stop expanding
if mac_call_path0 != mac_call_path1 {
break 'expansion;
}
let speculative_args = match macro_call_with_fake_ident.token_tree() {
Some(tt) => tt,
None => break 'expansion,
};
match (
self.sema.expand(&actual_macro_call),
self.sema.speculative_expand(
&actual_macro_call,
&speculative_args,
fake_ident_token.clone(),
),
) {
// successful expansions
(Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => {
let new_offset = fake_mapped_token.text_range().start();
if new_offset > actual_expansion.text_range().end() {
// offset outside of bounds from the original expansion,
// stop here to prevent problems from happening
break 'expansion;
}
original_file = actual_expansion;
speculative_file = fake_expansion;
fake_ident_token = fake_mapped_token;
offset = new_offset;
continue 'expansion;
}
// at least on expansion failed, we won't have anything to expand from this point
// onwards so break out
_ => break 'expansion,
}
}
// none of our states have changed so stop the loop
break 'expansion;
}
self.fill(&original_file, speculative_file, offset, derive_ctx)
}
/// Calculate the expected type and name of the cursor position.
fn expected_type_and_name(&self) -> (Option<Type>, Option<NameOrNameRef>) {
let mut node = match self.token.parent() {
Some(it) => it,
None => return (None, None),
};
loop {
break match_ast! {
match node {
ast::LetStmt(it) => {
cov_mark::hit!(expected_type_let_with_leading_char);
cov_mark::hit!(expected_type_let_without_leading_char);
let ty = it.pat()
.and_then(|pat| self.sema.type_of_pat(&pat))
.or_else(|| it.initializer().and_then(|it| self.sema.type_of_expr(&it)))
.map(TypeInfo::original);
let name = match it.pat() {
Some(ast::Pat::IdentPat(ident)) => ident.name().map(NameOrNameRef::Name),
Some(_) | None => None,
};
(ty, name)
},
ast::LetExpr(it) => {
cov_mark::hit!(expected_type_if_let_without_leading_char);
let ty = it.pat()
.and_then(|pat| self.sema.type_of_pat(&pat))
.or_else(|| it.expr().and_then(|it| self.sema.type_of_expr(&it)))
.map(TypeInfo::original);
(ty, None)
},
ast::ArgList(_) => {
cov_mark::hit!(expected_type_fn_param);
ActiveParameter::at_token(
&self.sema,
self.token.clone(),
).map(|ap| {
let name = ap.ident().map(NameOrNameRef::Name);
let ty = if has_ref(&self.token) {
cov_mark::hit!(expected_type_fn_param_ref);
ap.ty.remove_ref()
} else {
Some(ap.ty)
};
(ty, name)
})
.unwrap_or((None, None))
},
ast::RecordExprFieldList(it) => {
// wouldn't try {} be nice...
(|| {
if self.token.kind() == T![..]
|| self.token.prev_token().map(|t| t.kind()) == Some(T![..])
{
cov_mark::hit!(expected_type_struct_func_update);
let record_expr = it.syntax().parent().and_then(ast::RecordExpr::cast)?;
let ty = self.sema.type_of_expr(&record_expr.into())?;
Some((
Some(ty.original),
None
))
} else {
cov_mark::hit!(expected_type_struct_field_without_leading_char);
let expr_field = self.token.prev_sibling_or_token()?
.into_node()
.and_then(ast::RecordExprField::cast)?;
let (_, _, ty) = self.sema.resolve_record_field(&expr_field)?;
Some((
Some(ty),
expr_field.field_name().map(NameOrNameRef::NameRef),
))
}
})().unwrap_or((None, None))
},
ast::RecordExprField(it) => {
if let Some(expr) = it.expr() {
cov_mark::hit!(expected_type_struct_field_with_leading_char);
(
self.sema.type_of_expr(&expr).map(TypeInfo::original),
it.field_name().map(NameOrNameRef::NameRef),
)
} else {
cov_mark::hit!(expected_type_struct_field_followed_by_comma);
let ty = self.sema.resolve_record_field(&it)
.map(|(_, _, ty)| ty);
(
ty,
it.field_name().map(NameOrNameRef::NameRef),
)
}
},
ast::MatchExpr(it) => {
cov_mark::hit!(expected_type_match_arm_without_leading_char);
let ty = it.expr().and_then(|e| self.sema.type_of_expr(&e)).map(TypeInfo::original);
(ty, None)
},
ast::IfExpr(it) => {
let ty = it.condition()
.and_then(|e| self.sema.type_of_expr(&e))
.map(TypeInfo::original);
(ty, None)
},
ast::IdentPat(it) => {
cov_mark::hit!(expected_type_if_let_with_leading_char);
cov_mark::hit!(expected_type_match_arm_with_leading_char);
let ty = self.sema.type_of_pat(&ast::Pat::from(it)).map(TypeInfo::original);
(ty, None)
},
ast::Fn(it) => {
cov_mark::hit!(expected_type_fn_ret_with_leading_char);
cov_mark::hit!(expected_type_fn_ret_without_leading_char);
let def = self.sema.to_def(&it);
(def.map(|def| def.ret_type(self.db)), None)
},
ast::ClosureExpr(it) => {
let ty = self.sema.type_of_expr(&it.into());
ty.and_then(|ty| ty.original.as_callable(self.db))
.map(|c| (Some(c.return_type()), None))
.unwrap_or((None, None))
},
ast::ParamList(_) => (None, None),
ast::Stmt(_) => (None, None),
ast::Item(_) => (None, None),
_ => {
match node.parent() {
Some(n) => {
node = n;
continue;
},
None => (None, None),
}
},
}
};
}
}
/// Fill the completion context, this is what does semantic reasoning about the surrounding context
/// of the completion location.
fn fill(
&mut self,
original_file: &SyntaxNode,
file_with_fake_ident: SyntaxNode,
offset: TextSize,
derive_ctx: Option<(SyntaxNode, SyntaxNode, TextSize, ast::Attr)>,
) -> Option<()> {
let fake_ident_token = file_with_fake_ident.token_at_offset(offset).right_biased().unwrap();
let syntax_element = NodeOrToken::Token(fake_ident_token);
if is_in_token_of_for_loop(syntax_element.clone()) {
// for pat $0
// there is nothing to complete here except `in` keyword
// don't bother populating the context
// FIXME: the completion calculations should end up good enough
// such that this special case becomes unnecessary
return None;
}
self.previous_token = previous_token(syntax_element.clone());
self.incomplete_let =
syntax_element.ancestors().take(6).find_map(ast::LetStmt::cast).map_or(false, |it| {
it.syntax().text_range().end() == syntax_element.text_range().end()
});
(self.expected_type, self.expected_name) = self.expected_type_and_name();
// Overwrite the path kind for derives
if let Some((original_file, file_with_fake_ident, offset, origin_attr)) = derive_ctx {
self.existing_derives = self
.sema
.resolve_derive_macro(&origin_attr)
.into_iter()
.flatten()
.flatten()
.collect();
if let Some(ast::NameLike::NameRef(name_ref)) =
find_node_at_offset(&file_with_fake_ident, offset)
{
let parent = name_ref.syntax().parent()?;
let (mut nameref_ctx, _) =
Self::classify_name_ref(&self.sema, &original_file, name_ref, parent);
if let Some(path_ctx) = &mut nameref_ctx.path_ctx {
path_ctx.kind = PathKind::Derive;
}
self.ident_ctx = IdentContext::NameRef(nameref_ctx);
return Some(());
}
return None;
}
let name_like = match find_node_at_offset(&file_with_fake_ident, offset) {
Some(it) => it,
None => {
if let Some(original) = ast::String::cast(self.original_token.clone()) {
self.ident_ctx = IdentContext::String {
original,
expanded: ast::String::cast(self.token.clone()),
};
} else {
// Fix up trailing whitespace problem
// #[attr(foo = $0
let token = if self.token.kind() == SyntaxKind::WHITESPACE {
self.previous_token.as_ref()?
} else {
&self.token
};
let p = token.parent()?;
if p.kind() == SyntaxKind::TOKEN_TREE
&& p.ancestors().any(|it| it.kind() == SyntaxKind::META)
{
self.ident_ctx = IdentContext::UnexpandedAttrTT {
fake_attribute_under_caret: syntax_element
.ancestors()
.find_map(ast::Attr::cast),
};
} else {
return None;
}
}
return Some(());
}
};
self.completion_location =
determine_location(&self.sema, original_file, offset, &name_like);
self.prev_sibling = determine_prev_sibling(&name_like);
self.impl_def = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE)
.find_map(ast::Impl::cast);
self.function_def = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE)
.find_map(ast::Fn::cast);
match name_like {
ast::NameLike::Lifetime(lifetime) => {
self.ident_ctx = IdentContext::Lifetime(Self::classify_lifetime(
&self.sema,
original_file,
lifetime,
)?);
}
ast::NameLike::NameRef(name_ref) => {
let parent = name_ref.syntax().parent()?;
let (nameref_ctx, pat_ctx) =
Self::classify_name_ref(&self.sema, &original_file, name_ref, parent);
self.ident_ctx = IdentContext::NameRef(nameref_ctx);
self.pattern_ctx = pat_ctx;
}
ast::NameLike::Name(name) => {
let (name_ctx, pat_ctx) = Self::classify_name(&self.sema, original_file, name)?;
self.pattern_ctx = pat_ctx;
self.ident_ctx = IdentContext::Name(name_ctx);
}
}
Some(())
}
fn classify_lifetime(
_sema: &Semantics<RootDatabase>,
original_file: &SyntaxNode,
lifetime: ast::Lifetime,
) -> Option<LifetimeContext> {
let parent = lifetime.syntax().parent()?;
if parent.kind() == ERROR {
return None;
}
let kind = match_ast! {
match parent {
ast::LifetimeParam(param) => LifetimeKind::LifetimeParam {
is_decl: param.lifetime().as_ref() == Some(&lifetime),
param
},
ast::BreakExpr(_) => LifetimeKind::LabelRef,
ast::ContinueExpr(_) => LifetimeKind::LabelRef,
ast::Label(_) => LifetimeKind::LabelDef,
_ => LifetimeKind::Lifetime,
}
};
let lifetime = find_node_at_offset(&original_file, lifetime.syntax().text_range().start());
Some(LifetimeContext { lifetime, kind })
}
fn classify_name(
_sema: &Semantics<RootDatabase>,
original_file: &SyntaxNode,
name: ast::Name,
) -> Option<(NameContext, Option<PatternContext>)> {
let parent = name.syntax().parent()?;
let mut pat_ctx = None;
let kind = match_ast! {
match parent {
ast::Const(_) => NameKind::Const,
ast::ConstParam(_) => NameKind::ConstParam,
ast::Enum(_) => NameKind::Enum,
ast::Fn(_) => NameKind::Function,
ast::IdentPat(bind_pat) => {
let is_name_in_field_pat = bind_pat
.syntax()
.parent()
.and_then(ast::RecordPatField::cast)
.map_or(false, |pat_field| pat_field.name_ref().is_none());
if !is_name_in_field_pat {
pat_ctx = Some(pattern_context_for(original_file, bind_pat.into()));
}
NameKind::IdentPat
},
ast::MacroDef(_) => NameKind::MacroDef,
ast::MacroRules(_) => NameKind::MacroRules,
ast::Module(module) => NameKind::Module(module),
ast::RecordField(_) => NameKind::RecordField,
ast::Rename(_) => NameKind::Rename,
ast::SelfParam(_) => NameKind::SelfParam,
ast::Static(_) => NameKind::Static,
ast::Struct(_) => NameKind::Struct,
ast::Trait(_) => NameKind::Trait,
ast::TypeAlias(_) => NameKind::TypeAlias,
ast::TypeParam(_) => NameKind::TypeParam,
ast::Union(_) => NameKind::Union,
ast::Variant(_) => NameKind::Variant,
_ => return None,
}
};
let name = find_node_at_offset(&original_file, name.syntax().text_range().start());
Some((NameContext { name, kind }, pat_ctx))
}
fn classify_name_ref(
sema: &Semantics<RootDatabase>,
original_file: &SyntaxNode,
name_ref: ast::NameRef,
parent: SyntaxNode,
) -> (NameRefContext, Option<PatternContext>) {
let nameref = find_node_at_offset(&original_file, name_ref.syntax().text_range().start());
let mut nameref_ctx = NameRefContext { dot_access: None, path_ctx: None, nameref };
let segment = match_ast! {
match parent {
ast::PathSegment(segment) => segment,
ast::FieldExpr(field) => {
let receiver = find_in_original_file(field.expr(), original_file);
let receiver_is_ambiguous_float_literal = match &receiver {
Some(ast::Expr::Literal(l)) => matches! {
l.kind(),
ast::LiteralKind::FloatNumber { .. } if l.syntax().last_token().map_or(false, |it| it.text().ends_with('.'))
},
_ => false,
};
nameref_ctx.dot_access = Some(DotAccess::Field { receiver, receiver_is_ambiguous_float_literal });
return (nameref_ctx, None);
},
ast::MethodCallExpr(method) => {
nameref_ctx.dot_access = Some(
DotAccess::Method {
receiver: find_in_original_file(method.receiver(), original_file),
has_parens: method.arg_list().map_or(false, |it| it.l_paren_token().is_some())
}
);
return (nameref_ctx, None);
},
_ => return (nameref_ctx, None),
}
};
let path = segment.parent_path();
let mut path_ctx = PathCompletionCtx {
has_call_parens: false,
has_macro_bang: false,
is_absolute_path: false,
qualifier: None,
parent: path.parent_path(),
kind: PathKind::Item { kind: ItemListKind::SourceFile },
has_type_args: false,
};
let mut pat_ctx = None;
let is_in_block = |it: &SyntaxNode| {
it.parent()
.map(|node| {
ast::ExprStmt::can_cast(node.kind()) || ast::StmtList::can_cast(node.kind())
})
.unwrap_or(false)
};
let is_in_func_update = |it: &SyntaxNode| {
it.parent().map_or(false, |it| ast::RecordExprFieldList::can_cast(it.kind()))
};
let kind = path.syntax().ancestors().find_map(|it| {
// using Option<Option<PathKind>> as extra controlflow
let kind = match_ast! {
match it {
ast::PathType(_) => Some(PathKind::Type),
ast::PathExpr(it) => {
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
let in_block_expr = is_in_block(it.syntax());
let in_loop_body = is_in_loop_body(it.syntax());
let in_functional_update = is_in_func_update(it.syntax());
Some(PathKind::Expr { in_block_expr, in_loop_body, in_functional_update })
},
ast::TupleStructPat(it) => {
path_ctx.has_call_parens = true;
pat_ctx = Some(pattern_context_for(original_file, it.into()));
Some(PathKind::Pat)
},
ast::RecordPat(it) => {
path_ctx.has_call_parens = true;
pat_ctx = Some(pattern_context_for(original_file, it.into()));
Some(PathKind::Pat)
},
ast::PathPat(it) => {
pat_ctx = Some(pattern_context_for(original_file, it.into()));
Some(PathKind::Pat)
},
ast::MacroCall(it) => {
path_ctx.has_macro_bang = it.excl_token().is_some();
let parent = it.syntax().parent();
match parent.as_ref().map(|it| it.kind()) {
Some(SyntaxKind::MACRO_PAT) => Some(PathKind::Pat),
Some(SyntaxKind::MACRO_TYPE) => Some(PathKind::Type),
Some(SyntaxKind::ITEM_LIST) => Some(PathKind::Item { kind: ItemListKind::Module }),
Some(SyntaxKind::ASSOC_ITEM_LIST) => Some(PathKind::Item { kind: match parent.and_then(|it| it.parent()).map(|it| it.kind()) {
Some(SyntaxKind::TRAIT) => ItemListKind::Trait,
Some(SyntaxKind::IMPL) => ItemListKind::Impl,
_ => return Some(None),
} }),
Some(SyntaxKind::EXTERN_ITEM_LIST) => Some(PathKind::Item { kind: ItemListKind::ExternBlock }),
Some(SyntaxKind::SOURCE_FILE) => Some(PathKind::Item { kind: ItemListKind::SourceFile }),
_ => {
return Some(parent.and_then(ast::MacroExpr::cast).map(|it| {
let in_loop_body = is_in_loop_body(it.syntax());
let in_block_expr = is_in_block(it.syntax());
let in_functional_update = is_in_func_update(it.syntax());
PathKind::Expr { in_block_expr, in_loop_body, in_functional_update }
}));
},
}
},
ast::Meta(meta) => (|| {
let attr = meta.parent_attr()?;
let kind = attr.kind();
let attached = attr.syntax().parent()?;
let is_trailing_outer_attr = kind != AttrKind::Inner
&& non_trivia_sibling(attr.syntax().clone().into(), syntax::Direction::Next).is_none();
let annotated_item_kind = if is_trailing_outer_attr {
None
} else {
Some(attached.kind())
};
Some(PathKind::Attr {
kind,
annotated_item_kind,
})
})(),
ast::Visibility(it) => Some(PathKind::Vis { has_in_token: it.in_token().is_some() }),
ast::UseTree(_) => Some(PathKind::Use),
ast::ItemList(_) => Some(PathKind::Item { kind: ItemListKind::Module }),
ast::AssocItemList(it) => Some(PathKind::Item { kind: {
match it.syntax().parent()?.kind() {
SyntaxKind::TRAIT => ItemListKind::Trait,
SyntaxKind::IMPL => ItemListKind::Impl,
_ => return None,
}
}}),
ast::ExternItemList(_) => Some(PathKind::Item { kind: ItemListKind::ExternBlock }),
ast::SourceFile(_) => Some(PathKind::Item { kind: ItemListKind::SourceFile }),
_ => return None,
}
};
Some(kind)
}).flatten();
match kind {
Some(kind) => path_ctx.kind = kind,
None => return (nameref_ctx, pat_ctx),
}
path_ctx.has_type_args = segment.generic_arg_list().is_some();
if let Some((path, use_tree_parent)) = path_or_use_tree_qualifier(&path) {
if !use_tree_parent {
path_ctx.is_absolute_path =
path.top_path().segment().map_or(false, |it| it.coloncolon_token().is_some());
}
let path = path
.segment()
.and_then(|it| find_node_in_file(original_file, &it))
.map(|it| it.parent_path());
path_ctx.qualifier = path.map(|path| {
let res = sema.resolve_path(&path);
let is_super_chain = iter::successors(Some(path.clone()), |p| p.qualifier())
.all(|p| p.segment().and_then(|s| s.super_token()).is_some());
// `<_>::$0`
let is_infer_qualifier = path.qualifier().is_none()
&& matches!(
path.segment().and_then(|it| it.kind()),
Some(ast::PathSegmentKind::Type {
type_ref: Some(ast::Type::InferType(_)),
trait_ref: None,
})
);
PathQualifierCtx {
path,
resolution: res,
is_super_chain,
use_tree_parent,
is_infer_qualifier,
}
});
} else if let Some(segment) = path.segment() {
if segment.coloncolon_token().is_some() {
path_ctx.is_absolute_path = true;
}
}
nameref_ctx.path_ctx = Some(path_ctx);
(nameref_ctx, pat_ctx)
}
}
fn pattern_context_for(original_file: &SyntaxNode, pat: ast::Pat) -> PatternContext {
let mut is_param = None;
let (refutability, has_type_ascription) =
pat
.syntax()
.ancestors()
.skip_while(|it| ast::Pat::can_cast(it.kind()))
.next()
.map_or((PatternRefutability::Irrefutable, false), |node| {
let refutability = match_ast! {
match node {
ast::LetStmt(let_) => return (PatternRefutability::Irrefutable, let_.ty().is_some()),
ast::Param(param) => {
let has_type_ascription = param.ty().is_some();
is_param = (|| {
let fake_param_list = param.syntax().parent().and_then(ast::ParamList::cast)?;
let param_list = find_node_in_file_compensated(original_file, &fake_param_list)?;
let param_list_owner = param_list.syntax().parent()?;
let kind = match_ast! {
match param_list_owner {
ast::ClosureExpr(closure) => ParamKind::Closure(closure),
ast::Fn(fn_) => ParamKind::Function(fn_),
_ => return None,
}
};
Some((param_list, param, kind))
})();
return (PatternRefutability::Irrefutable, has_type_ascription)
},
ast::MatchArm(_) => PatternRefutability::Refutable,
ast::LetExpr(_) => PatternRefutability::Refutable,
ast::ForExpr(_) => PatternRefutability::Irrefutable,
_ => PatternRefutability::Irrefutable,
}
};
(refutability, false)
});
let (ref_token, mut_token) = match &pat {
ast::Pat::IdentPat(it) => (it.ref_token(), it.mut_token()),
_ => (None, None),
};
PatternContext {
refutability,
param_ctx: is_param,
has_type_ascription,
parent_pat: pat.syntax().parent().and_then(ast::Pat::cast),
mut_token,
ref_token,
}
}
fn find_in_original_file<N: AstNode>(x: Option<N>, original_file: &SyntaxNode) -> Option<N> {
fn find_node_with_range<N: AstNode>(syntax: &SyntaxNode, range: TextRange) -> Option<N> {
let range = syntax.text_range().intersect(range)?;
syntax.covering_element(range).ancestors().find_map(N::cast)
}
x.map(|e| e.syntax().text_range()).and_then(|r| find_node_with_range(original_file, r))
}
/// Attempts to find `node` inside `syntax` via `node`'s text range.
fn find_node_in_file<N: AstNode>(syntax: &SyntaxNode, node: &N) -> Option<N> {
let syntax_range = syntax.text_range();
let range = node.syntax().text_range();
let intersection = range.intersect(syntax_range)?;
syntax.covering_element(intersection).ancestors().find_map(N::cast)
}
/// Attempts to find `node` inside `syntax` via `node`'s text range while compensating
/// for the offset introduced by the fake ident.
/// This is wrong if `node` comes before the insertion point! Use `find_node_in_file` instead.
fn find_node_in_file_compensated<N: AstNode>(syntax: &SyntaxNode, node: &N) -> Option<N> {
let syntax_range = syntax.text_range();
let range = node.syntax().text_range();
let end = range.end().checked_sub(TextSize::try_from(COMPLETION_MARKER.len()).ok()?)?;
if end < range.start() {
return None;
}
let range = TextRange::new(range.start(), end);
// our inserted ident could cause `range` to be go outside of the original syntax, so cap it
let intersection = range.intersect(syntax_range)?;
syntax.covering_element(intersection).ancestors().find_map(N::cast)
}
fn path_or_use_tree_qualifier(path: &ast::Path) -> Option<(ast::Path, bool)> {
if let Some(qual) = path.qualifier() {
return Some((qual, false));
}
let use_tree_list = path.syntax().ancestors().find_map(ast::UseTreeList::cast)?;
let use_tree = use_tree_list.syntax().parent().and_then(ast::UseTree::cast)?;
Some((use_tree.path()?, true))
}
fn has_ref(token: &SyntaxToken) -> bool {
let mut token = token.clone();
for skip in [IDENT, WHITESPACE, T![mut]] {
if token.kind() == skip {
token = match token.prev_token() {
Some(it) => it,
None => return false,
}
}
}
token.kind() == T![&]
}
const OP_TRAIT_LANG_NAMES: &[&str] = &[
"add_assign",
"add",
"bitand_assign",
"bitand",
"bitor_assign",
"bitor",
"bitxor_assign",
"bitxor",
"deref_mut",
"deref",
"div_assign",
"div",
"eq",
"fn_mut",
"fn_once",
"fn",
"index_mut",
"index",
"mul_assign",
"mul",
"neg",
"not",
"partial_ord",
"rem_assign",
"rem",
"shl_assign",
"shl",
"shr_assign",
"shr",
"sub",
];
#[cfg(test)]
mod tests {
use expect_test::{expect, Expect};
use hir::HirDisplay;
use crate::tests::{position, TEST_CONFIG};
use super::CompletionContext;
fn check_expected_type_and_name(ra_fixture: &str, expect: Expect) {
let (db, pos) = position(ra_fixture);
let config = TEST_CONFIG;
let completion_context = CompletionContext::new(&db, pos, &config).unwrap();
let ty = completion_context
.expected_type
.map(|t| t.display_test(&db).to_string())
.unwrap_or("?".to_owned());
let name = completion_context
.expected_name
.map_or_else(|| "?".to_owned(), |name| name.to_string());
expect.assert_eq(&format!("ty: {}, name: {}", ty, name));
}
#[test]
fn expected_type_let_without_leading_char() {
cov_mark::check!(expected_type_let_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
let x: u32 = $0;
}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_let_with_leading_char() {
cov_mark::check!(expected_type_let_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
let x: u32 = c$0;
}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_let_pat() {
check_expected_type_and_name(
r#"
fn foo() {
let x$0 = 0u32;
}
"#,
expect![[r#"ty: u32, name: ?"#]],
);
check_expected_type_and_name(
r#"
fn foo() {
let $0 = 0u32;
}
"#,
expect![[r#"ty: u32, name: ?"#]],
);
}
#[test]
fn expected_type_fn_param() {
cov_mark::check!(expected_type_fn_param);
check_expected_type_and_name(
r#"
fn foo() { bar($0); }
fn bar(x: u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(c$0); }
fn bar(x: u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_fn_param_ref() {
cov_mark::check!(expected_type_fn_param_ref);
check_expected_type_and_name(
r#"
fn foo() { bar(&$0); }
fn bar(x: &u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(&mut $0); }
fn bar(x: &mut u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(& c$0); }
fn bar(x: &u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(&mut c$0); }
fn bar(x: &mut u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(&c$0); }
fn bar(x: &u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_struct_field_without_leading_char() {
cov_mark::check!(expected_type_struct_field_without_leading_char);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: $0 };
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_struct_field_followed_by_comma() {
cov_mark::check!(expected_type_struct_field_followed_by_comma);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: $0, };
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_generic_struct_field() {
check_expected_type_and_name(
r#"
struct Foo<T> { a: T }
fn foo() -> Foo<u32> {
Foo { a: $0 }
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_struct_field_with_leading_char() {
cov_mark::check!(expected_type_struct_field_with_leading_char);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: c$0 };
}
"#,
expect![[r#"ty: u32, name: a"#]],
);
}
#[test]
fn expected_type_match_arm_without_leading_char() {
cov_mark::check!(expected_type_match_arm_without_leading_char);
check_expected_type_and_name(
r#"
enum E { X }
fn foo() {
match E::X { $0 }
}
"#,
expect![[r#"ty: E, name: ?"#]],
);
}
#[test]
fn expected_type_match_arm_with_leading_char() {
cov_mark::check!(expected_type_match_arm_with_leading_char);
check_expected_type_and_name(
r#"
enum E { X }
fn foo() {
match E::X { c$0 }
}
"#,
expect![[r#"ty: E, name: ?"#]],
);
}
#[test]
fn expected_type_if_let_without_leading_char() {
cov_mark::check!(expected_type_if_let_without_leading_char);
check_expected_type_and_name(
r#"
enum Foo { Bar, Baz, Quux }
fn foo() {
let f = Foo::Quux;
if let $0 = f { }
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
)
}
#[test]
fn expected_type_if_let_with_leading_char() {
cov_mark::check!(expected_type_if_let_with_leading_char);
check_expected_type_and_name(
r#"
enum Foo { Bar, Baz, Quux }
fn foo() {
let f = Foo::Quux;
if let c$0 = f { }
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_without_leading_char() {
cov_mark::check!(expected_type_fn_ret_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() -> u32 {
$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_with_leading_char() {
cov_mark::check!(expected_type_fn_ret_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() -> u32 {
c$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_fn_ref_fully_typed() {
check_expected_type_and_name(
r#"
fn foo() -> u32 {
foo$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_closure_param_return() {
// FIXME: make this work with `|| $0`
check_expected_type_and_name(
r#"
//- minicore: fn
fn foo() {
bar(|| a$0);
}
fn bar(f: impl FnOnce() -> u32) {}
"#,
expect![[r#"ty: u32, name: ?"#]],
);
}
#[test]
fn expected_type_generic_function() {
check_expected_type_and_name(
r#"
fn foo() {
bar::<u32>($0);
}
fn bar<T>(t: T) {}
"#,
expect![[r#"ty: u32, name: t"#]],
);
}
#[test]
fn expected_type_generic_method() {
check_expected_type_and_name(
r#"
fn foo() {
S(1u32).bar($0);
}
struct S<T>(T);
impl<T> S<T> {
fn bar(self, t: T) {}
}
"#,
expect![[r#"ty: u32, name: t"#]],
);
}
#[test]
fn expected_type_functional_update() {
cov_mark::check!(expected_type_struct_func_update);
check_expected_type_and_name(
r#"
struct Foo { field: u32 }
fn foo() {
Foo {
..$0
}
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
);
}
#[test]
fn expected_type_param_pat() {
check_expected_type_and_name(
r#"
struct Foo { field: u32 }
fn foo(a$0: Foo) {}
"#,
expect![[r#"ty: Foo, name: ?"#]],
);
check_expected_type_and_name(
r#"
struct Foo { field: u32 }
fn foo($0: Foo) {}
"#,
// FIXME make this work, currently fails due to pattern recovery eating the `:`
expect![[r#"ty: ?, name: ?"#]],
);
}
}
| 35.880734 | 154 | 0.518985 |
3acec2a137d344f7fd3cd27dedf0b6e965f4d714
| 17,686 |
//! Board file for Imix development platform.
//!
//! - <https://github.com/tock/tock/tree/master/boards/imix>
//! - <https://github.com/tock/imix>
#![no_std]
#![no_main]
#![feature(in_band_lifetimes)]
#![feature(infer_outlives_requirements)]
#![feature(panic_implementation)]
#![deny(missing_docs)]
extern crate capsules;
#[allow(unused_imports)]
#[macro_use(debug, debug_gpio, static_init, create_capability)]
extern crate kernel;
extern crate cortexm4;
extern crate sam4l;
mod components;
use capsules::alarm::AlarmDriver;
use capsules::net::ieee802154::MacAddress;
use capsules::net::ipv6::ip_utils::IPAddr;
use capsules::virtual_alarm::{MuxAlarm, VirtualMuxAlarm};
use capsules::virtual_i2c::MuxI2C;
use capsules::virtual_spi::{MuxSpiMaster, VirtualSpiMasterDevice};
use capsules::virtual_uart::{UartDevice, UartMux};
use kernel::capabilities;
use kernel::component::Component;
use kernel::hil;
use kernel::hil::radio;
#[allow(unused_imports)]
use kernel::hil::radio::{RadioConfig, RadioData};
use kernel::hil::spi::SpiMaster;
use kernel::hil::Controller;
use components::adc::AdcComponent;
use components::alarm::AlarmDriverComponent;
use components::analog_comparator::AcComponent;
use components::button::ButtonComponent;
use components::console::ConsoleComponent;
use components::crc::CrcComponent;
use components::fxos8700::NineDofComponent;
use components::gpio::GpioComponent;
use components::isl29035::AmbientLightComponent;
use components::led::LedComponent;
use components::nonvolatile_storage::NonvolatileStorageComponent;
use components::nrf51822::Nrf51822Component;
use components::radio::RadioComponent;
use components::rf233::RF233Component;
use components::si7021::{HumidityComponent, SI7021Component, TemperatureComponent};
use components::spi::{SpiComponent, SpiSyscallComponent};
use components::udp_6lowpan::UDPComponent;
use components::usb::UsbComponent;
/// Support routines for debugging I/O.
///
/// Note: Use of this module will trample any other USART3 configuration.
#[macro_use]
pub mod io;
// Unit Tests for drivers.
#[allow(dead_code)]
mod i2c_dummy;
#[allow(dead_code)]
mod icmp_lowpan_test;
#[allow(dead_code)]
mod ipv6_lowpan_test;
#[allow(dead_code)]
mod spi_dummy;
#[allow(dead_code)]
mod udp_lowpan_test;
#[allow(dead_code)]
mod aes_test;
#[allow(dead_code)]
mod aes_ccm_test;
#[allow(dead_code)]
mod rng_test;
#[allow(dead_code)]
mod power;
#[allow(dead_code)]
mod virtual_uart_rx_test;
// State for loading apps.
const NUM_PROCS: usize = 2;
// Constants related to the configuration of the 15.4 network stack
const RADIO_CHANNEL: u8 = 26;
const SRC_MAC: u16 = 0xf00f;
const DST_MAC_ADDR: MacAddress = MacAddress::Short(0x802);
const SRC_MAC_ADDR: MacAddress = MacAddress::Short(SRC_MAC);
const DEFAULT_CTX_PREFIX_LEN: u8 = 8; //Length of context for 6LoWPAN compression
const DEFAULT_CTX_PREFIX: [u8; 16] = [0x0 as u8; 16]; //Context for 6LoWPAN Compression
const PAN_ID: u16 = 0xABCD;
static LOCAL_IP_IFACES: [IPAddr; 2] = [
IPAddr([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f,
]),
IPAddr([
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
0x1f,
]),
];
// how should the kernel respond when a process faults
const FAULT_RESPONSE: kernel::procs::FaultResponse = kernel::procs::FaultResponse::Panic;
#[link_section = ".app_memory"]
static mut APP_MEMORY: [u8; 16384] = [0; 16384];
static mut PROCESSES: [Option<&'static kernel::procs::ProcessType>; NUM_PROCS] = [None, None];
/// Dummy buffer that causes the linker to reserve enough space for the stack.
#[no_mangle]
#[link_section = ".stack_buffer"]
pub static mut STACK_MEMORY: [u8; 0x2000] = [0; 0x2000];
struct Imix {
console: &'static capsules::console::Console<'static, UartDevice<'static>>,
gpio: &'static capsules::gpio::GPIO<'static, sam4l::gpio::GPIOPin>,
alarm: &'static AlarmDriver<'static, VirtualMuxAlarm<'static, sam4l::ast::Ast<'static>>>,
temp: &'static capsules::temperature::TemperatureSensor<'static>,
humidity: &'static capsules::humidity::HumiditySensor<'static>,
ambient_light: &'static capsules::ambient_light::AmbientLight<'static>,
adc: &'static capsules::adc::Adc<'static, sam4l::adc::Adc>,
led: &'static capsules::led::LED<'static, sam4l::gpio::GPIOPin>,
button: &'static capsules::button::Button<'static, sam4l::gpio::GPIOPin>,
analog_comparator: &'static capsules::analog_comparator::AnalogComparator<
'static,
sam4l::acifc::Acifc<'static>,
>,
spi: &'static capsules::spi::Spi<'static, VirtualSpiMasterDevice<'static, sam4l::spi::SpiHw>>,
ipc: kernel::ipc::IPC,
ninedof: &'static capsules::ninedof::NineDof<'static>,
radio_driver: &'static capsules::ieee802154::RadioDriver<'static>,
udp_driver: &'static capsules::net::udp::UDPDriver<'static>,
crc: &'static capsules::crc::Crc<'static, sam4l::crccu::Crccu<'static>>,
usb_driver: &'static capsules::usb_user::UsbSyscallDriver<
'static,
capsules::usbc_client::Client<'static, sam4l::usbc::Usbc<'static>>,
>,
nrf51822: &'static capsules::nrf51822_serialization::Nrf51822Serialization<
'static,
sam4l::usart::USART,
>,
nonvolatile_storage: &'static capsules::nonvolatile_storage_driver::NonvolatileStorage<'static>,
}
// The RF233 radio stack requires our buffers for its SPI operations:
//
// 1. buf: a packet-sized buffer for SPI operations, which is
// used as the read buffer when it writes a packet passed to it and the write
// buffer when it reads a packet into a buffer passed to it.
// 2. rx_buf: buffer to receive packets into
// 3 + 4: two small buffers for performing registers
// operations (one read, one write).
static mut RF233_BUF: [u8; radio::MAX_BUF_SIZE] = [0x00; radio::MAX_BUF_SIZE];
static mut RF233_REG_WRITE: [u8; 2] = [0x00; 2];
static mut RF233_REG_READ: [u8; 2] = [0x00; 2];
impl kernel::Platform for Imix {
fn with_driver<F, R>(&self, driver_num: usize, f: F) -> R
where
F: FnOnce(Option<&kernel::Driver>) -> R,
{
match driver_num {
capsules::console::DRIVER_NUM => f(Some(self.console)),
capsules::gpio::DRIVER_NUM => f(Some(self.gpio)),
capsules::alarm::DRIVER_NUM => f(Some(self.alarm)),
capsules::spi::DRIVER_NUM => f(Some(self.spi)),
capsules::adc::DRIVER_NUM => f(Some(self.adc)),
capsules::led::DRIVER_NUM => f(Some(self.led)),
capsules::button::DRIVER_NUM => f(Some(self.button)),
capsules::analog_comparator::DRIVER_NUM => f(Some(self.analog_comparator)),
capsules::ambient_light::DRIVER_NUM => f(Some(self.ambient_light)),
capsules::temperature::DRIVER_NUM => f(Some(self.temp)),
capsules::humidity::DRIVER_NUM => f(Some(self.humidity)),
capsules::ninedof::DRIVER_NUM => f(Some(self.ninedof)),
capsules::crc::DRIVER_NUM => f(Some(self.crc)),
capsules::usb_user::DRIVER_NUM => f(Some(self.usb_driver)),
capsules::ieee802154::DRIVER_NUM => f(Some(self.radio_driver)),
capsules::net::udp::DRIVER_NUM => f(Some(self.udp_driver)),
capsules::nrf51822_serialization::DRIVER_NUM => f(Some(self.nrf51822)),
capsules::nonvolatile_storage_driver::DRIVER_NUM => f(Some(self.nonvolatile_storage)),
kernel::ipc::DRIVER_NUM => f(Some(&self.ipc)),
_ => f(None),
}
}
}
unsafe fn set_pin_primary_functions() {
use sam4l::gpio::PeripheralFunction::{A, B, C, E};
use sam4l::gpio::{PA, PB, PC};
// Right column: Imix pin name
// Left column: SAM4L peripheral function
PA[04].configure(Some(A)); // AD0 -- ADCIFE AD0
PA[05].configure(Some(A)); // AD1 -- ADCIFE AD1
PA[06].configure(Some(C)); // EXTINT1 -- EIC EXTINT1
PA[07].configure(Some(A)); // AD1 -- ADCIFE AD2
PA[08].configure(None); //... RF233 IRQ -- GPIO pin
PA[09].configure(None); //... RF233 RST -- GPIO pin
PA[10].configure(None); //... RF233 SLP -- GPIO pin
PA[13].configure(None); //... TRNG EN -- GPIO pin
PA[14].configure(None); //... TRNG_OUT -- GPIO pin
PA[17].configure(None); //... NRF INT -- GPIO pin
PA[18].configure(Some(A)); // NRF CLK -- USART2_CLK
PA[20].configure(None); //... D8 -- GPIO pin
PA[21].configure(Some(E)); // TWI2 SDA -- TWIM2_SDA
PA[22].configure(Some(E)); // TWI2 SCL -- TWIM2 TWCK
PA[25].configure(Some(A)); // USB_N -- USB DM
PA[26].configure(Some(A)); // USB_P -- USB DP
PB[00].configure(Some(A)); // TWI1_SDA -- TWIMS1 TWD
PB[01].configure(Some(A)); // TWI1_SCL -- TWIMS1 TWCK
PB[02].configure(Some(A)); // AD3 -- ADCIFE AD3
PB[03].configure(Some(A)); // AD4 -- ADCIFE AD4
PB[04].configure(Some(A)); // AD5 -- ADCIFE AD5
PB[05].configure(Some(A)); // VHIGHSAMPLE -- ADCIFE AD6
PB[06].configure(Some(A)); // RTS3 -- USART3 RTS
PB[07].configure(None); //... NRF RESET -- GPIO
PB[09].configure(Some(A)); // RX3 -- USART3 RX
PB[10].configure(Some(A)); // TX3 -- USART3 TX
PB[11].configure(Some(A)); // CTS0 -- USART0 CTS
PB[12].configure(Some(A)); // RTS0 -- USART0 RTS
PB[13].configure(Some(A)); // CLK0 -- USART0 CLK
PB[14].configure(Some(A)); // RX0 -- USART0 RX
PB[15].configure(Some(A)); // TX0 -- USART0 TX
PC[00].configure(Some(A)); // CS2 -- SPI NPCS2
PC[01].configure(Some(A)); // CS3 (RF233) -- SPI NPCS3
PC[02].configure(Some(A)); // CS1 -- SPI NPCS1
PC[03].configure(Some(A)); // CS0 -- SPI NPCS0
PC[04].configure(Some(A)); // MISO -- SPI MISO
PC[05].configure(Some(A)); // MOSI -- SPI MOSI
PC[06].configure(Some(A)); // SCK -- SPI CLK
PC[07].configure(Some(B)); // RTS2 (BLE) -- USART2_RTS
PC[08].configure(Some(E)); // CTS2 (BLE) -- USART2_CTS
//PC[09].configure(None); //... NRF GPIO -- GPIO
//PC[10].configure(None); //... USER LED -- GPIO
PC[09].configure(Some(E)); // ACAN1 -- ACIFC comparator
PC[10].configure(Some(E)); // ACAP1 -- ACIFC comparator
PC[11].configure(Some(B)); // RX2 (BLE) -- USART2_RX
PC[12].configure(Some(B)); // TX2 (BLE) -- USART2_TX
//PC[13].configure(None); //... ACC_INT1 -- GPIO
//PC[14].configure(None); //... ACC_INT2 -- GPIO
PC[13].configure(Some(E)); //... ACBN1 -- ACIFC comparator
PC[14].configure(Some(E)); //... ACBP1 -- ACIFC comparator
PC[16].configure(None); //... SENSE_PWR -- GPIO pin
PC[17].configure(None); //... NRF_PWR -- GPIO pin
PC[18].configure(None); //... RF233_PWR -- GPIO pin
PC[19].configure(None); //... TRNG_PWR -- GPIO Pin
PC[22].configure(None); //... KERNEL LED -- GPIO Pin
PC[24].configure(None); //... USER_BTN -- GPIO Pin
PC[25].configure(Some(B)); // LI_INT -- EIC EXTINT2
PC[26].configure(None); //... D7 -- GPIO Pin
PC[27].configure(None); //... D6 -- GPIO Pin
PC[28].configure(None); //... D5 -- GPIO Pin
PC[29].configure(None); //... D4 -- GPIO Pin
PC[30].configure(None); //... D3 -- GPIO Pin
PC[31].configure(None); //... D2 -- GPIO Pin
}
/// Reset Handler.
///
/// This symbol is loaded into vector table by the SAM4L chip crate.
/// When the chip first powers on or later does a hard reset, after the core
/// initializes all the hardware, the address of this function is loaded and
/// execution begins here.
#[no_mangle]
pub unsafe fn reset_handler() {
sam4l::init();
sam4l::pm::PM.setup_system_clock(sam4l::pm::SystemClockSource::PllExternalOscillatorAt48MHz {
frequency: sam4l::pm::OscillatorFrequency::Frequency16MHz,
startup_mode: sam4l::pm::OscillatorStartup::FastStart,
});
// Source 32Khz and 1Khz clocks from RC23K (SAM4L Datasheet 11.6.8)
sam4l::bpm::set_ck32source(sam4l::bpm::CK32Source::RC32K);
set_pin_primary_functions();
// Create capabilities that the board needs to call certain protected kernel
// functions.
let process_mgmt_cap = create_capability!(capabilities::ProcessManagementCapability);
let main_cap = create_capability!(capabilities::MainLoopCapability);
let grant_cap = create_capability!(capabilities::MemoryAllocationCapability);
power::configure_submodules(power::SubmoduleConfig {
rf233: true,
nrf51422: true,
sensors: true,
trng: true,
});
let board_kernel = static_init!(kernel::Kernel, kernel::Kernel::new(&PROCESSES));
// # CONSOLE
// Create a shared UART channel for the console and for kernel debug.
sam4l::usart::USART3.set_mode(sam4l::usart::UsartMode::Uart);
let uart_mux = static_init!(
UartMux<'static>,
UartMux::new(
&sam4l::usart::USART3,
&mut capsules::virtual_uart::RX_BUF,
115200
)
);
hil::uart::UART::set_client(&sam4l::usart::USART3, uart_mux);
let console = ConsoleComponent::new(board_kernel, uart_mux, 115200).finalize();
// Allow processes to communicate over BLE through the nRF51822
let nrf_serialization =
Nrf51822Component::new(&sam4l::usart::USART2, &sam4l::gpio::PB[07]).finalize();
// # TIMER
let ast = &sam4l::ast::AST;
let mux_alarm = static_init!(
MuxAlarm<'static, sam4l::ast::Ast>,
MuxAlarm::new(&sam4l::ast::AST)
);
ast.configure(mux_alarm);
let alarm = AlarmDriverComponent::new(board_kernel, mux_alarm).finalize();
// # I2C and I2C Sensors
let mux_i2c = static_init!(MuxI2C<'static>, MuxI2C::new(&sam4l::i2c::I2C2));
sam4l::i2c::I2C2.set_master_client(mux_i2c);
let ambient_light = AmbientLightComponent::new(board_kernel, mux_i2c, mux_alarm).finalize();
let si7021 = SI7021Component::new(mux_i2c, mux_alarm).finalize();
let temp = TemperatureComponent::new(board_kernel, si7021).finalize();
let humidity = HumidityComponent::new(board_kernel, si7021).finalize();
let ninedof = NineDofComponent::new(board_kernel, mux_i2c, &sam4l::gpio::PC[13]).finalize();
// SPI MUX, SPI syscall driver and RF233 radio
let mux_spi = static_init!(
MuxSpiMaster<'static, sam4l::spi::SpiHw>,
MuxSpiMaster::new(&sam4l::spi::SPI)
);
sam4l::spi::SPI.set_client(mux_spi);
sam4l::spi::SPI.init();
let spi_syscalls = SpiSyscallComponent::new(mux_spi).finalize();
let rf233_spi = SpiComponent::new(mux_spi).finalize();
let rf233 = RF233Component::new(
rf233_spi,
&sam4l::gpio::PA[09], // reset
&sam4l::gpio::PA[10], // sleep
&sam4l::gpio::PA[08], // irq
&sam4l::gpio::PA[08],
RADIO_CHANNEL,
).finalize();
// Clear sensors enable pin to enable sensor rail
// sam4l::gpio::PC[16].enable_output();
// sam4l::gpio::PC[16].clear();
let adc = AdcComponent::new().finalize();
let gpio = GpioComponent::new().finalize();
let led = LedComponent::new().finalize();
let button = ButtonComponent::new(board_kernel).finalize();
let crc = CrcComponent::new(board_kernel).finalize();
let analog_comparator = AcComponent::new().finalize();
// Can this initialize be pushed earlier, or into component? -pal
rf233.initialize(&mut RF233_BUF, &mut RF233_REG_WRITE, &mut RF233_REG_READ);
let (radio_driver, mux_mac) =
RadioComponent::new(board_kernel, rf233, PAN_ID, SRC_MAC).finalize();
let usb_driver = UsbComponent::new(board_kernel).finalize();
let nonvolatile_storage = NonvolatileStorageComponent::new(board_kernel).finalize();
let udp_driver = UDPComponent::new(
board_kernel,
mux_mac,
DEFAULT_CTX_PREFIX_LEN,
DEFAULT_CTX_PREFIX,
DST_MAC_ADDR,
SRC_MAC_ADDR,
&LOCAL_IP_IFACES,
mux_alarm,
).finalize();
let imix = Imix {
console,
alarm,
gpio,
temp,
humidity,
ambient_light,
adc,
led,
button,
analog_comparator,
crc,
spi: spi_syscalls,
ipc: kernel::ipc::IPC::new(board_kernel, &grant_cap),
ninedof,
radio_driver,
udp_driver,
usb_driver,
nrf51822: nrf_serialization,
nonvolatile_storage: nonvolatile_storage,
};
let chip = sam4l::chip::Sam4l::new();
// Need to reset the nRF on boot, toggle it's SWDIO
imix.nrf51822.reset();
imix.nrf51822.initialize();
// These two lines need to be below the creation of the chip for
// initialization to work.
rf233.reset();
rf233.start();
// debug!("Starting virtual read test.");
// virtual_uart_rx_test::run_virtual_uart_receive(uart_mux);
debug!("Initialization complete. Entering main loop");
// rng_test::run_entropy32();
extern "C" {
/// Beginning of the ROM region containing app images.
static _sapps: u8;
}
kernel::procs::load_processes(
board_kernel,
&cortexm4::syscall::SysCall::new(),
&_sapps as *const u8,
&mut APP_MEMORY,
&mut PROCESSES,
FAULT_RESPONSE,
&process_mgmt_cap,
);
board_kernel.kernel_loop(&imix, &chip, Some(&imix.ipc), &main_cap);
}
| 39.477679 | 100 | 0.640337 |
1c3ec6b6f133800aba6146bf7791429accc7bead
| 28,969 |
mod block_proposal_process;
mod block_transactions_process;
mod block_transactions_verifier;
mod block_uncles_verifier;
mod compact_block_process;
mod compact_block_verifier;
mod get_block_proposal_process;
mod get_block_transactions_process;
mod get_transactions_process;
#[cfg(test)]
mod tests;
mod transaction_hashes_process;
mod transactions_process;
use self::block_proposal_process::BlockProposalProcess;
use self::block_transactions_process::BlockTransactionsProcess;
use self::compact_block_process::CompactBlockProcess;
use self::get_block_proposal_process::GetBlockProposalProcess;
use self::get_block_transactions_process::GetBlockTransactionsProcess;
use self::get_transactions_process::GetTransactionsProcess;
use self::transaction_hashes_process::TransactionHashesProcess;
use self::transactions_process::TransactionsProcess;
use crate::block_status::BlockStatus;
use crate::types::{ActiveChain, SyncShared};
use crate::{Status, StatusCode, BAD_MESSAGE_BAN_TIME};
use ckb_chain::chain::ChainController;
use ckb_logger::{debug_target, error_target, info_target, trace_target, warn_target};
use ckb_metrics::metrics;
use ckb_network::{
bytes::Bytes, tokio, CKBProtocolContext, CKBProtocolHandler, PeerIndex, TargetSession,
};
use ckb_types::core::BlockView;
use ckb_types::{
core::{self, Cycle, FeeRate},
packed::{self, Byte32, ProposalShortId},
prelude::*,
};
use ckb_util::Mutex;
use faketime::unix_time_as_millis;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::{Duration, Instant};
pub const TX_PROPOSAL_TOKEN: u64 = 0;
pub const ASK_FOR_TXS_TOKEN: u64 = 1;
pub const TX_HASHES_TOKEN: u64 = 2;
pub const SEARCH_ORPHAN_POOL_TOKEN: u64 = 3;
pub const MAX_RELAY_PEERS: usize = 128;
pub const MAX_RELAY_TXS_NUM_PER_BATCH: usize = 32767;
pub const MAX_RELAY_TXS_BYTES_PER_BATCH: usize = 1024 * 1024;
type RateLimiter<T> = governor::RateLimiter<
T,
governor::state::keyed::DefaultKeyedStateStore<T>,
governor::clock::DefaultClock,
>;
#[derive(Debug, Eq, PartialEq)]
pub enum ReconstructionResult {
Block(BlockView),
Missing(Vec<usize>, Vec<usize>),
Collided,
Error(Status),
}
/// Relayer protocol handle
#[derive(Clone)]
pub struct Relayer {
chain: ChainController,
pub(crate) shared: Arc<SyncShared>,
pub(crate) min_fee_rate: FeeRate,
pub(crate) max_tx_verify_cycles: Cycle,
rate_limiter: Arc<Mutex<RateLimiter<(PeerIndex, u32)>>>,
}
impl Relayer {
/// Init relay protocol handle
///
/// This is a runtime relay protocol shared state, and any relay messages will be processed and forwarded by it
///
/// min_fee_rate: Default transaction fee unit, can be modified by configuration file
/// max_tx_verify_cycles: Maximum transaction consumption allowed by default, can be modified by configuration file
pub fn new(
chain: ChainController,
shared: Arc<SyncShared>,
min_fee_rate: FeeRate,
max_tx_verify_cycles: Cycle,
) -> Self {
// setup a rate limiter keyed by peer and message type that lets through 30 requests per second
// current max rps is 10 (ASK_FOR_TXS_TOKEN / TX_PROPOSAL_TOKEN), 30 is a flexible hard cap with buffer
let quota = governor::Quota::per_second(std::num::NonZeroU32::new(30).unwrap());
let rate_limiter = Arc::new(Mutex::new(RateLimiter::keyed(quota)));
Relayer {
chain,
shared,
min_fee_rate,
max_tx_verify_cycles,
rate_limiter,
}
}
/// Get shared state
pub fn shared(&self) -> &Arc<SyncShared> {
&self.shared
}
fn try_process<'r>(
&mut self,
nc: Arc<dyn CKBProtocolContext + Sync>,
peer: PeerIndex,
message: packed::RelayMessageUnionReader<'r>,
) -> Status {
// CompactBlock will be verified by POW, it's OK to skip rate limit checking.
let should_check_rate = match message {
packed::RelayMessageUnionReader::CompactBlock(_) => false,
_ => true,
};
if should_check_rate
&& self
.rate_limiter
.lock()
.check_key(&(peer, message.item_id()))
.is_err()
{
return StatusCode::TooManyRequests.with_context(message.item_name());
}
match message {
packed::RelayMessageUnionReader::CompactBlock(reader) => {
CompactBlockProcess::new(reader, self, nc, peer).execute()
}
packed::RelayMessageUnionReader::RelayTransactions(reader) => {
if reader.check_data() {
TransactionsProcess::new(reader, self, nc, peer).execute()
} else {
StatusCode::ProtocolMessageIsMalformed
.with_context("RelayTransactions is invalid")
}
}
packed::RelayMessageUnionReader::RelayTransactionHashes(reader) => {
TransactionHashesProcess::new(reader, self, peer).execute()
}
packed::RelayMessageUnionReader::GetRelayTransactions(reader) => {
GetTransactionsProcess::new(reader, self, nc, peer).execute()
}
packed::RelayMessageUnionReader::GetBlockTransactions(reader) => {
GetBlockTransactionsProcess::new(reader, self, nc, peer).execute()
}
packed::RelayMessageUnionReader::BlockTransactions(reader) => {
if reader.check_data() {
BlockTransactionsProcess::new(reader, self, nc, peer).execute()
} else {
StatusCode::ProtocolMessageIsMalformed
.with_context("BlockTransactions is invalid")
}
}
packed::RelayMessageUnionReader::GetBlockProposal(reader) => {
GetBlockProposalProcess::new(reader, self, nc, peer).execute()
}
packed::RelayMessageUnionReader::BlockProposal(reader) => {
BlockProposalProcess::new(reader, self).execute()
}
}
}
fn process<'r>(
&mut self,
nc: Arc<dyn CKBProtocolContext + Sync>,
peer: PeerIndex,
message: packed::RelayMessageUnionReader<'r>,
) {
let item_name = message.item_name();
let status = self.try_process(Arc::clone(&nc), peer, message);
metrics!(counter, "ckb-net.received", 1, "action" => "relay", "item" => item_name.to_owned());
if !status.is_ok() {
metrics!(counter, "ckb-net.status", 1, "action" => "relay", "status" => status.tag());
}
if let Some(ban_time) = status.should_ban() {
error_target!(
crate::LOG_TARGET_RELAY,
"receive {} from {}, ban {:?} for {}",
item_name,
peer,
ban_time,
status
);
nc.ban_peer(peer, ban_time, status.to_string());
} else if status.should_warn() {
warn_target!(
crate::LOG_TARGET_RELAY,
"receive {} from {}, {}",
item_name,
peer,
status
);
} else if !status.is_ok() {
debug_target!(
crate::LOG_TARGET_RELAY,
"receive {} from {}, {}",
item_name,
peer,
status
);
}
}
/// Request the transaction corresponding to the proposal id from the specified node
pub fn request_proposal_txs(
&self,
nc: &dyn CKBProtocolContext,
peer: PeerIndex,
block_hash: Byte32,
mut proposals: Vec<packed::ProposalShortId>,
) {
proposals.dedup();
let tx_pool = self.shared.shared().tx_pool_controller();
let fresh_proposals = match tx_pool.fresh_proposals_filter(proposals) {
Err(err) => {
debug_target!(
crate::LOG_TARGET_RELAY,
"tx_pool fresh_proposals_filter error: {:?}",
err,
);
return;
}
Ok(fresh_proposals) => fresh_proposals,
};
let to_ask_proposals: Vec<ProposalShortId> = self
.shared()
.state()
.insert_inflight_proposals(fresh_proposals.clone())
.into_iter()
.zip(fresh_proposals)
.filter_map(|(firstly_in, id)| if firstly_in { Some(id) } else { None })
.collect();
if !to_ask_proposals.is_empty() {
let content = packed::GetBlockProposal::new_builder()
.block_hash(block_hash)
.proposals(to_ask_proposals.clone().pack())
.build();
let message = packed::RelayMessage::new_builder().set(content).build();
if let Err(err) = nc.send_message_to(peer, message.as_bytes()) {
debug_target!(
crate::LOG_TARGET_RELAY,
"relayer send GetBlockProposal error {:?}",
err,
);
self.shared()
.state()
.remove_inflight_proposals(&to_ask_proposals);
}
crate::relayer::metrics_counter_send(message.to_enum().item_name());
}
}
/// Accept a new block from network
pub fn accept_block(
&self,
nc: &dyn CKBProtocolContext,
peer: PeerIndex,
block: core::BlockView,
) {
if self
.shared()
.active_chain()
.contains_block_status(&block.hash(), BlockStatus::BLOCK_STORED)
{
return;
}
let boxed = Arc::new(block);
if self
.shared()
.insert_new_block(&self.chain, Arc::clone(&boxed))
.unwrap_or(false)
{
debug_target!(
crate::LOG_TARGET_RELAY,
"[block_relay] relayer accept_block {} {}",
boxed.header().hash(),
unix_time_as_millis()
);
let block_hash = boxed.hash();
self.shared().state().remove_header_view(&block_hash);
let cb = packed::CompactBlock::build_from_block(&boxed, &HashSet::new());
let message = packed::RelayMessage::new_builder().set(cb).build();
let selected_peers: Vec<PeerIndex> = nc
.connected_peers()
.into_iter()
.filter(|target_peer| peer != *target_peer)
.take(MAX_RELAY_PEERS)
.collect();
if let Err(err) =
nc.quick_filter_broadcast(TargetSession::Multi(selected_peers), message.as_bytes())
{
debug_target!(
crate::LOG_TARGET_RELAY,
"relayer send block when accept block error: {:?}",
err,
);
}
}
}
/// Reorganize the full block according to the compact block/txs/uncles
// nodes should attempt to reconstruct the full block by taking the prefilledtxn transactions
// from the original CompactBlock message and placing them in the marked positions,
// then for each short transaction ID from the original compact_block message, in order,
// find the corresponding transaction either from the BlockTransactions message or
// from other sources and place it in the first available position in the block
// then once the block has been reconstructed, it shall be processed as normal,
// keeping in mind that short_ids are expected to occasionally collide,
// and that nodes must not be penalized for such collisions, wherever they appear.
pub fn reconstruct_block(
&self,
active_chain: &ActiveChain,
compact_block: &packed::CompactBlock,
received_transactions: Vec<core::TransactionView>,
uncles_index: &[u32],
received_unlces: &[core::UncleBlockView],
) -> ReconstructionResult {
let block_txs_len = received_transactions.len();
let compact_block_hash = compact_block.calc_header_hash();
debug_target!(
crate::LOG_TARGET_RELAY,
"start block reconstruction, block hash: {}, received transactions len: {}",
compact_block_hash,
block_txs_len,
);
let mut short_ids_set: HashSet<ProposalShortId> =
compact_block.short_ids().into_iter().collect();
let mut txs_map: HashMap<ProposalShortId, core::TransactionView> = received_transactions
.into_iter()
.filter_map(|tx| {
let short_id = tx.proposal_short_id();
if short_ids_set.remove(&short_id) {
Some((short_id, tx))
} else {
None
}
})
.collect();
if !short_ids_set.is_empty() {
let tx_pool = self.shared.shared().tx_pool_controller();
let fetch_txs = tx_pool.fetch_txs(short_ids_set.into_iter().collect());
if let Err(e) = fetch_txs {
return ReconstructionResult::Error(StatusCode::TxPool.with_context(e));
}
txs_map.extend(fetch_txs.unwrap().into_iter());
}
let txs_len = compact_block.txs_len();
let mut block_transactions: Vec<Option<core::TransactionView>> =
Vec::with_capacity(txs_len);
let short_ids_iter = &mut compact_block.short_ids().into_iter();
// fill transactions gap
compact_block
.prefilled_transactions()
.into_iter()
.for_each(|pt| {
let index: usize = pt.index().unpack();
let gap = index - block_transactions.len();
if gap > 0 {
short_ids_iter
.take(gap)
.for_each(|short_id| block_transactions.push(txs_map.remove(&short_id)));
}
block_transactions.push(Some(pt.transaction().into_view()));
});
// append remain transactions
short_ids_iter.for_each(|short_id| block_transactions.push(txs_map.remove(&short_id)));
let missing = block_transactions.iter().any(Option::is_none);
let mut missing_uncles = Vec::with_capacity(compact_block.uncles().len());
let mut uncles = Vec::with_capacity(compact_block.uncles().len());
let mut position = 0;
for (i, uncle_hash) in compact_block.uncles().into_iter().enumerate() {
if uncles_index.contains(&(i as u32)) {
uncles.push(
received_unlces
.get(position)
.expect("have checked the indexes")
.clone()
.data(),
);
position += 1;
continue;
};
let status = active_chain.get_block_status(&uncle_hash);
match status {
BlockStatus::UNKNOWN | BlockStatus::HEADER_VALID => missing_uncles.push(i),
BlockStatus::BLOCK_STORED | BlockStatus::BLOCK_VALID => {
if let Some(uncle) = active_chain.get_block(&uncle_hash) {
uncles.push(uncle.as_uncle().data());
} else {
debug_target!(
crate::LOG_TARGET_RELAY,
"reconstruct_block could not find {:#?} uncle block: {:#?}",
status,
uncle_hash,
);
missing_uncles.push(i);
}
}
BlockStatus::BLOCK_RECEIVED => {
if let Some(uncle) = self.shared.state().get_orphan_block(&uncle_hash) {
uncles.push(uncle.as_uncle().data());
} else {
debug_target!(
crate::LOG_TARGET_RELAY,
"reconstruct_block could not find {:#?} uncle block: {:#?}",
status,
uncle_hash,
);
missing_uncles.push(i);
}
}
BlockStatus::BLOCK_INVALID => {
return ReconstructionResult::Error(
StatusCode::CompactBlockHasInvalidUncle.with_context(uncle_hash),
)
}
_ => missing_uncles.push(i),
}
}
if !missing && missing_uncles.is_empty() {
let txs = block_transactions
.into_iter()
.collect::<Option<Vec<_>>>()
.expect("missing checked, should not fail");
let block = packed::Block::new_builder()
.header(compact_block.header())
.uncles(uncles.pack())
.transactions(txs.into_iter().map(|tx| tx.data()).pack())
.proposals(compact_block.proposals())
.build()
.into_view();
debug_target!(
crate::LOG_TARGET_RELAY,
"finish block reconstruction, block hash: {}",
compact_block.calc_header_hash(),
);
let compact_block_tx_root = compact_block.header().raw().transactions_root();
let reconstruct_block_tx_root = block.transactions_root();
if compact_block_tx_root != reconstruct_block_tx_root {
if compact_block.short_ids().is_empty()
|| compact_block.short_ids().len() == block_txs_len
{
return ReconstructionResult::Error(
StatusCode::CompactBlockHasUnmatchedTransactionRootWithReconstructedBlock
.with_context(format!(
"Compact_block_tx_root({}) != reconstruct_block_tx_root({})",
compact_block.header().raw().transactions_root(),
block.transactions_root(),
)),
);
} else {
return ReconstructionResult::Collided;
}
}
ReconstructionResult::Block(block)
} else {
let missing_indexes: Vec<usize> = block_transactions
.iter()
.enumerate()
.filter_map(|(i, t)| if t.is_none() { Some(i) } else { None })
.collect();
debug_target!(
crate::LOG_TARGET_RELAY,
"block reconstruction failed, block hash: {}, missing: {}, total: {}",
compact_block.calc_header_hash(),
missing_indexes.len(),
compact_block.short_ids().len(),
);
ReconstructionResult::Missing(missing_indexes, missing_uncles)
}
}
fn prune_tx_proposal_request(&self, nc: &dyn CKBProtocolContext) {
let get_block_proposals = self.shared().state().clear_get_block_proposals();
let tx_pool = self.shared.shared().tx_pool_controller();
let fetch_txs = tx_pool.fetch_txs(get_block_proposals.keys().cloned().collect());
if let Err(err) = fetch_txs {
debug_target!(
crate::LOG_TARGET_RELAY,
"relayer prune_tx_proposal_request internal error: {:?}",
err,
);
return;
}
let txs = fetch_txs.unwrap();
let mut peer_txs = HashMap::new();
for (id, peer_indices) in get_block_proposals.into_iter() {
if let Some(tx) = txs.get(&id) {
for peer_index in peer_indices {
let tx_set = peer_txs.entry(peer_index).or_insert_with(Vec::new);
tx_set.push(tx.clone());
}
}
}
for (peer_index, txs) in peer_txs {
let content = packed::BlockProposal::new_builder()
.transactions(txs.into_iter().map(|tx| tx.data()).pack())
.build();
let message = packed::RelayMessage::new_builder().set(content).build();
if let Err(err) = nc.send_message_to(peer_index, message.as_bytes()) {
debug_target!(
crate::LOG_TARGET_RELAY,
"relayer send BlockProposal error: {:?}",
err,
);
}
crate::relayer::metrics_counter_send(message.to_enum().item_name());
}
}
/// Ask for relay transaction by hash from all peers
pub fn ask_for_txs(&self, nc: &dyn CKBProtocolContext) {
let state = self.shared().state();
for (peer, peer_state) in state.peers().state.write().iter_mut() {
let tx_hashes = peer_state
.pop_ask_for_txs()
.into_iter()
.filter(|tx_hash| {
let already_known = state.already_known_tx(&tx_hash);
if already_known {
// Remove tx_hash from `tx_ask_for_set`
peer_state.remove_ask_for_tx(&tx_hash);
}
!already_known
})
.take(MAX_RELAY_TXS_NUM_PER_BATCH)
.collect::<Vec<_>>();
if !tx_hashes.is_empty() {
debug_target!(
crate::LOG_TARGET_RELAY,
"Send get transaction ({} hashes) to {}",
tx_hashes.len(),
peer,
);
let content = packed::GetRelayTransactions::new_builder()
.tx_hashes(tx_hashes.pack())
.build();
let message = packed::RelayMessage::new_builder().set(content).build();
if let Err(err) = nc.send_message_to(*peer, message.as_bytes()) {
debug_target!(
crate::LOG_TARGET_RELAY,
"relayer send Transaction error: {:?}",
err,
);
}
crate::relayer::metrics_counter_send(message.to_enum().item_name());
}
}
}
/// Send bulk of tx hashes to selected peers
pub fn send_bulk_of_tx_hashes(&self, nc: &dyn CKBProtocolContext) {
let connected_peers = nc.connected_peers();
if connected_peers.is_empty() {
return;
}
let mut selected: HashMap<PeerIndex, Vec<Byte32>> = HashMap::default();
{
let peer_tx_hashes = self.shared.state().take_tx_hashes();
let mut known_txs = self.shared.state().known_txs();
for (peer_index, tx_hashes) in peer_tx_hashes.into_iter() {
for tx_hash in tx_hashes {
for &peer in connected_peers
.iter()
.filter(|&target_peer| {
known_txs.insert(*target_peer, tx_hash.clone())
&& (peer_index != *target_peer)
})
.take(MAX_RELAY_PEERS)
{
let hashes = selected
.entry(peer)
.or_insert_with(|| Vec::with_capacity(MAX_RELAY_TXS_NUM_PER_BATCH));
if hashes.len() < MAX_RELAY_TXS_NUM_PER_BATCH {
hashes.push(tx_hash.clone());
}
}
}
}
};
for (peer, hashes) in selected {
let content = packed::RelayTransactionHashes::new_builder()
.tx_hashes(hashes.pack())
.build();
let message = packed::RelayMessage::new_builder().set(content).build();
if let Err(err) = nc.filter_broadcast(TargetSession::Single(peer), message.as_bytes()) {
debug_target!(
crate::LOG_TARGET_RELAY,
"relayer send TransactionHashes error: {:?}",
err,
);
}
}
}
}
impl CKBProtocolHandler for Relayer {
fn init(&mut self, nc: Arc<dyn CKBProtocolContext + Sync>) {
nc.set_notify(Duration::from_millis(100), TX_PROPOSAL_TOKEN)
.expect("set_notify at init is ok");
nc.set_notify(Duration::from_millis(100), ASK_FOR_TXS_TOKEN)
.expect("set_notify at init is ok");
nc.set_notify(Duration::from_millis(300), TX_HASHES_TOKEN)
.expect("set_notify at init is ok");
// todo: remove when the asynchronous verification is completed
nc.set_notify(Duration::from_secs(5), SEARCH_ORPHAN_POOL_TOKEN)
.expect("set_notify at init is ok");
}
fn received(
&mut self,
nc: Arc<dyn CKBProtocolContext + Sync>,
peer_index: PeerIndex,
data: Bytes,
) {
// If self is in the IBD state, don't process any relayer message.
if self.shared.active_chain().is_initial_block_download() {
return;
}
let msg = match packed::RelayMessage::from_slice(&data) {
Ok(msg) => msg.to_enum(),
_ => {
info_target!(
crate::LOG_TARGET_RELAY,
"Peer {} sends us a malformed message",
peer_index
);
nc.ban_peer(
peer_index,
BAD_MESSAGE_BAN_TIME,
String::from("send us a malformed message"),
);
return;
}
};
debug_target!(
crate::LOG_TARGET_RELAY,
"received msg {} from {}",
msg.item_name(),
peer_index
);
#[cfg(feature = "with_sentry")]
{
let sentry_hub = sentry::Hub::current();
let _scope_guard = sentry_hub.push_scope();
sentry_hub.configure_scope(|scope| {
scope.set_tag("p2p.protocol", "relayer");
scope.set_tag("p2p.message", msg.item_name());
});
}
let start_time = Instant::now();
self.process(nc, peer_index, msg.as_reader());
debug_target!(
crate::LOG_TARGET_RELAY,
"process message={}, peer={}, cost={:?}",
msg.item_name(),
peer_index,
start_time.elapsed(),
);
}
fn connected(
&mut self,
_nc: Arc<dyn CKBProtocolContext + Sync>,
peer_index: PeerIndex,
version: &str,
) {
self.shared().state().peers().relay_connected(peer_index);
info_target!(
crate::LOG_TARGET_RELAY,
"RelayProtocol({}).connected peer={}",
version,
peer_index
);
}
fn disconnected(&mut self, _nc: Arc<dyn CKBProtocolContext + Sync>, peer_index: PeerIndex) {
info_target!(
crate::LOG_TARGET_RELAY,
"RelayProtocol.disconnected peer={}",
peer_index
);
// Retains all keys in the rate limiter that were used recently enough.
self.rate_limiter.lock().retain_recent();
}
fn notify(&mut self, nc: Arc<dyn CKBProtocolContext + Sync>, token: u64) {
// If self is in the IBD state, don't trigger any relayer notify.
if self.shared.active_chain().is_initial_block_download() {
return;
}
let start_time = Instant::now();
trace_target!(crate::LOG_TARGET_RELAY, "start notify token={}", token);
match token {
TX_PROPOSAL_TOKEN => {
tokio::task::block_in_place(|| self.prune_tx_proposal_request(nc.as_ref()))
}
ASK_FOR_TXS_TOKEN => self.ask_for_txs(nc.as_ref()),
TX_HASHES_TOKEN => self.send_bulk_of_tx_hashes(nc.as_ref()),
SEARCH_ORPHAN_POOL_TOKEN => tokio::task::block_in_place(|| {
self.shared.try_search_orphan_pool(
&self.chain,
&self.shared.active_chain().tip_header().hash(),
)
}),
_ => unreachable!(),
}
trace_target!(
crate::LOG_TARGET_RELAY,
"finished notify token={} cost={:?}",
token,
start_time.elapsed()
);
}
}
pub(self) fn metrics_counter_send(item_name: &str) {
metrics!(counter, "ckb-net.sent", 1, "action" => "relay", "item" => item_name.to_owned());
}
| 38.067017 | 119 | 0.540958 |
1e826a4f93cd8288a236f03c95c3b624f4a2cb7f
| 3,091 |
use internal::Timer;
use model::id::GuildId;
use std::{
sync::mpsc::{Receiver as MpscReceiver, TryRecvError},
thread::Builder as ThreadBuilder
};
use super::{
connection::Connection,
Status
};
pub(crate) fn start(guild_id: GuildId, rx: MpscReceiver<Status>) {
let name = format!("Serenity Voice (G{})", guild_id);
ThreadBuilder::new()
.name(name)
.spawn(move || runner(&rx))
.expect(&format!("[Voice] Error starting guild: {:?}", guild_id));
}
fn runner(rx: &MpscReceiver<Status>) {
let mut senders = Vec::new();
let mut receiver = None;
let mut connection = None;
let mut timer = Timer::new(20);
'runner: loop {
loop {
match rx.try_recv() {
Ok(Status::Connect(info)) => {
connection = match Connection::new(info) {
Ok(connection) => Some(connection),
Err(why) => {
warn!("[Voice] Error connecting: {:?}", why);
None
},
};
},
Ok(Status::Disconnect) => {
connection = None;
},
Ok(Status::SetReceiver(r)) => {
receiver = r;
},
Ok(Status::SetSender(s)) => {
senders.clear();
if let Some(aud) = s {
senders.push(aud);
}
},
Ok(Status::AddSender(s)) => {
senders.push(s);
},
Err(TryRecvError::Empty) => {
// If we receieved nothing, then we can perform an update.
break;
},
Err(TryRecvError::Disconnected) => {
break 'runner;
},
}
}
// Overall here, check if there's an error.
//
// If there is a connection, try to send an update. This should not
// error. If there is though for some spurious reason, then set `error`
// to `true`.
//
// Otherwise, wait out the timer and do _not_ error and wait to receive
// another event.
let error = match connection.as_mut() {
Some(connection) => {
let cycle = connection.cycle(&mut senders, &mut receiver, &mut timer);
match cycle {
Ok(()) => false,
Err(why) => {
error!(
"(╯°□°)╯︵ ┻━┻ Error updating connection: {:?}",
why
);
true
},
}
},
None => {
timer.await();
false
},
};
// If there was an error, then just reset the connection and try to get
// another.
if error {
connection = None;
}
}
}
| 29.721154 | 86 | 0.413135 |
e9a2ff7c795f533278b3fe1d0d24852c14c23d2e
| 1,275 |
//! A category that contains data about its collection.
//! These are the main stores of the data used by Way Cooler and its clients.
use std::ops::{Deref, DerefMut};
use std::collections::hash_map::HashMap;
use rustc_serialize::json::{Json};
/// The main data mapping between a key and some Json.
pub type DataMap = HashMap<String, Json>;
/// A category that has a canonical name, and some data.
///
/// The `Category` can be used exactly like a hash map.
#[derive(Clone, Debug)]
pub struct Category {
name: String,
data: HashMap<String, Json>
}
impl PartialEq for Category {
fn eq(&self, other: &Category) -> bool {
self.name == other.name
}
}
impl Eq for Category {}
impl Category {
/// Makes a new category that has some name.
/// Data mapping is initially empty.
pub fn new(name: String) -> Self {
Category {
name: name,
data: HashMap::new()
}
}
/// Gets the name of the Category.
pub fn name(&self) -> &str {
self.name.as_str()
}
}
impl Deref for Category {
type Target = DataMap;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl DerefMut for Category {
fn deref_mut(&mut self) -> &mut DataMap {
&mut self.data
}
}
| 21.610169 | 77 | 0.614902 |
b9574dcc9c9fbbfcb3e68d006e159950d17588b5
| 10,548 |
// You have to run from the ezgui crate (abstreet/ezgui), due to relative paths to fonts and
// images.
//
// To run:
// > cargo run --example demo
//
// Try the web version, but there's no text rendering yet:
// > cargo web start --target wasm32-unknown-unknown --no-default-features \
// --features wasm-backend --example demo
use ezgui::{
hotkey, lctrl, Btn, Checkbox, Color, Composite, Drawable, EventCtx, GeomBatch, GfxCtx,
HorizontalAlignment, Key, Line, LinePlot, Outcome, PlotOptions, Series, Text, TextExt,
UpdateType, VerticalAlignment, Widget, GUI,
};
use geom::{Angle, Duration, Polygon, Pt2D, Time};
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use std::collections::HashSet;
fn main() {
// Control flow surrendered here. App implements State, which has an event handler and a draw
// callback.
ezgui::run(
ezgui::Settings::new("ezgui demo", "../data/system/fonts"),
|ctx| App::new(ctx),
);
}
struct App {
controls: Composite,
timeseries_panel: Option<(Duration, Composite)>,
scrollable_canvas: Drawable,
elapsed: Duration,
}
impl App {
fn new(ctx: &mut EventCtx) -> App {
App {
controls: make_controls(ctx),
timeseries_panel: None,
scrollable_canvas: setup_scrollable_canvas(ctx),
elapsed: Duration::ZERO,
}
}
fn make_timeseries_panel(&self, ctx: &mut EventCtx) -> Composite {
// Make a table with 3 columns.
let mut col1 = vec![Line("Time").draw(ctx)];
let mut col = vec![Line("Linear").draw(ctx)];
let mut col3 = vec![Line("Quadratic").draw(ctx)];
for s in 0..(self.elapsed.inner_seconds() as usize) {
col1.push(
Line(Duration::seconds(s as f64).to_string())
.secondary()
.draw(ctx),
);
col.push(Line(s.to_string()).secondary().draw(ctx));
col3.push(Line(s.pow(2).to_string()).secondary().draw(ctx));
}
let mut c = Composite::new(Widget::col(vec![
Text::from_multiline(vec![
Line("Here's a bunch of text to force some scrolling.").small_heading(),
Line(
"Bug: scrolling by clicking and dragging doesn't work while the stopwatch is \
running.",
)
.fg(Color::RED),
])
.draw(ctx),
Widget::row(vec![
// Examples of styling widgets
Widget::col(col1).outline(3.0, Color::BLACK).padding(5),
Widget::col(col).outline(3.0, Color::BLACK).padding(5),
Widget::col(col3).outline(3.0, Color::BLACK).padding(5),
]),
LinePlot::new(
ctx,
vec![
Series {
label: "Linear".to_string(),
color: Color::GREEN,
// These points are (x axis = Time, y axis = usize)
pts: (0..(self.elapsed.inner_seconds() as usize))
.map(|s| (Time::START_OF_DAY + Duration::seconds(s as f64), s))
.collect(),
},
Series {
label: "Quadratic".to_string(),
color: Color::BLUE,
pts: (0..(self.elapsed.inner_seconds() as usize))
.map(|s| (Time::START_OF_DAY + Duration::seconds(s as f64), s.pow(2)))
.collect(),
},
],
PlotOptions {
filterable: false,
// Without this, the plot doesn't stretch to cover times in between whole
// seconds.
max_x: Some(Time::START_OF_DAY + self.elapsed),
max_y: None,
disabled: HashSet::new(),
},
),
]))
// Don't let the panel exceed this percentage of the window. Scrollbars appear
// automatically if needed.
.max_size_percent(30, 40)
// We take up 30% width, and we want to leave 10% window width as buffer.
.aligned(HorizontalAlignment::Percent(0.6), VerticalAlignment::Center)
.build(ctx);
// Since we're creating an entirely new panel when the time changes, we need to preserve
// some internal state, like scroll and whether plot checkboxes were enabled.
if let Some((_, ref old)) = self.timeseries_panel {
c.restore(ctx, old);
}
c
}
}
impl GUI for App {
fn event(&mut self, ctx: &mut EventCtx) {
// Allow panning and zooming to work.
ctx.canvas_movement();
// This dispatches event handling to all of the widgets inside.
match self.controls.event(ctx) {
Some(Outcome::Clicked(x)) => match x.as_ref() {
// These outcomes should probably be a custom enum per Composite, to be more
// typesafe.
"reset the stopwatch" => {
self.elapsed = Duration::ZERO;
// We can replace any named widget with another one. Layout gets recalculated.
self.controls.replace(
ctx,
"stopwatch",
format!("Stopwatch: {}", self.elapsed)
.draw_text(ctx)
.named("stopwatch"),
);
}
"generate new faces" => {
self.scrollable_canvas = setup_scrollable_canvas(ctx);
}
_ => unreachable!(),
},
None => {}
}
// An update event means that no keyboard/mouse input happened, but time has passed.
// (Ignore the "nonblocking"; this API is funky right now. Only one caller "consumes" an
// event, so that multiple things don't all respond to one keypress, but that's set up
// oddly for update events.)
if let Some(dt) = ctx.input.nonblocking_is_update_event() {
ctx.input.use_update_event();
self.elapsed += dt;
self.controls.replace(
ctx,
"stopwatch",
format!("Stopwatch: {}", self.elapsed)
.draw_text(ctx)
.named("stopwatch"),
);
}
if self.controls.is_checked("Show timeseries") {
// Update the panel when time changes.
if self
.timeseries_panel
.as_ref()
.map(|(dt, _)| *dt != self.elapsed)
.unwrap_or(true)
{
self.timeseries_panel = Some((self.elapsed, self.make_timeseries_panel(ctx)));
}
} else {
self.timeseries_panel = None;
}
if let Some((_, ref mut p)) = self.timeseries_panel {
match p.event(ctx) {
// No buttons in there
Some(Outcome::Clicked(_)) => unreachable!(),
None => {}
}
}
// If we're paused, only call event() again when there's some kind of input. If not, also
// sprinkle in periodic update events as time passes.
if !self.controls.is_checked("paused") {
ctx.request_update(UpdateType::Game);
}
}
fn draw(&self, g: &mut GfxCtx) {
g.clear(Color::BLACK);
if self.controls.is_checked("Draw scrollable canvas") {
g.redraw(&self.scrollable_canvas);
}
self.controls.draw(g);
if let Some((_, ref p)) = self.timeseries_panel {
p.draw(g);
}
}
}
// This prepares a bunch of geometry (colored polygons) and uploads it to the GPU once. Then it can
// be redrawn cheaply later.
fn setup_scrollable_canvas(ctx: &mut EventCtx) -> Drawable {
let mut batch = GeomBatch::new();
batch.push(
Color::hex("#4E30A6"),
Polygon::rounded_rectangle(5000.0, 5000.0, Some(25.0)),
);
// SVG support using lyon and usvg. Map-space means don't scale for high DPI monitors.
batch.append(
GeomBatch::mapspace_svg(&ctx.prerender, "../data/system/assets/pregame/logo.svg")
.translate(300.0, 300.0),
);
// Text rendering also goes through lyon and usvg.
batch.append(
Text::from(Line("Awesome vector text thanks to usvg and lyon").fg(Color::hex("#DF8C3D")))
.render_to_batch(&ctx.prerender)
.scale(2.0)
.centered_on(Pt2D::new(600.0, 500.0))
.rotate(Angle::new_degs(-30.0)),
);
let mut rng = XorShiftRng::from_entropy();
for i in 0..10 {
let mut svg_data = Vec::new();
svg_face::generate_face(&mut svg_data, &mut rng).unwrap();
let face = GeomBatch::from_svg_contents(svg_data).autocrop();
let dims = face.get_dims();
batch.append(
face.scale((200.0 / dims.width).min(200.0 / dims.height))
.translate(250.0 * (i as f64), 0.0),
);
}
// This is a bit of a hack; it's needed so that zooming in/out has reasonable limits.
ctx.canvas.map_dims = (5000.0, 5000.0);
batch.upload(ctx)
}
fn make_controls(ctx: &mut EventCtx) -> Composite {
Composite::new(Widget::col(vec![
Text::from_multiline(vec![
Line("ezgui demo").small_heading(),
Line("Click and drag to pan, use touchpad or scroll wheel to zoom"),
])
.draw(ctx),
Widget::row(vec![
// This just cycles between two arbitrary buttons
Checkbox::new(
false,
Btn::text_bg1("Pause").build(ctx, "pause the stopwatch", hotkey(Key::Space)),
Btn::text_bg1("Resume").build(ctx, "resume the stopwatch", hotkey(Key::Space)),
)
.named("paused"),
Btn::text_fg("Reset timer").build(ctx, "reset the stopwatch", None),
Btn::text_fg("New faces").build(ctx, "generate new faces", hotkey(Key::F)),
Checkbox::text(ctx, "Draw scrollable canvas", None, true),
Checkbox::text(ctx, "Show timeseries", lctrl(Key::T), false),
])
.evenly_spaced(),
"Stopwatch: ...".draw_text(ctx).named("stopwatch"),
]))
.aligned(HorizontalAlignment::Center, VerticalAlignment::Top)
.build(ctx)
}
| 38.079422 | 99 | 0.530622 |
eb82d56494f96e0c12493cb9b19ffd1e5b2169d6
| 18,578 |
use crate::schema;
use graph::prelude::s::{EnumType, InputValue, ScalarType, Type, TypeDefinition};
use graph::prelude::{q, r, QueryExecutionError};
use std::collections::{BTreeMap, HashMap};
/// A GraphQL value that can be coerced according to a type.
pub trait MaybeCoercible<T> {
/// On error, `self` is returned as `Err(self)`.
fn coerce(self, using_type: &T) -> Result<r::Value, q::Value>;
}
impl MaybeCoercible<EnumType> for q::Value {
fn coerce(self, using_type: &EnumType) -> Result<r::Value, q::Value> {
match self {
q::Value::Null => Ok(r::Value::Null),
q::Value::String(name) | q::Value::Enum(name)
if using_type.values.iter().any(|value| value.name == name) =>
{
Ok(r::Value::Enum(name))
}
_ => Err(self),
}
}
}
impl MaybeCoercible<ScalarType> for q::Value {
fn coerce(self, using_type: &ScalarType) -> Result<r::Value, q::Value> {
match (using_type.name.as_str(), self) {
(_, q::Value::Null) => Ok(r::Value::Null),
("Boolean", q::Value::Boolean(b)) => Ok(r::Value::Boolean(b)),
("BigDecimal", q::Value::Float(f)) => Ok(r::Value::String(f.to_string())),
("BigDecimal", q::Value::Int(i)) => Ok(r::Value::String(
i.as_i64().ok_or(q::Value::Int(i))?.to_string(),
)),
("BigDecimal", q::Value::String(s)) => Ok(r::Value::String(s)),
("Int", q::Value::Int(num)) => {
let n = num.as_i64().ok_or_else(|| q::Value::Int(num.clone()))?;
if i32::min_value() as i64 <= n && n <= i32::max_value() as i64 {
Ok(r::Value::Int((n as i32).into()))
} else {
Err(q::Value::Int(num))
}
}
("String", q::Value::String(s)) => Ok(r::Value::String(s)),
("ID", q::Value::String(s)) => Ok(r::Value::String(s)),
("ID", q::Value::Int(n)) => Ok(r::Value::String(
n.as_i64().ok_or(q::Value::Int(n))?.to_string(),
)),
("Bytes", q::Value::String(s)) => Ok(r::Value::String(s)),
("BigInt", q::Value::String(s)) => Ok(r::Value::String(s)),
("BigInt", q::Value::Int(n)) => Ok(r::Value::String(
n.as_i64().ok_or(q::Value::Int(n))?.to_string(),
)),
(_, v) => Err(v),
}
}
}
/// On error, the `value` is returned as `Err(value)`.
fn coerce_to_definition<'a>(
value: q::Value,
definition: &str,
resolver: &impl Fn(&str) -> Option<&'a TypeDefinition>,
variables: &HashMap<String, r::Value>,
) -> Result<r::Value, q::Value> {
match resolver(definition).ok_or_else(|| value.clone())? {
// Accept enum values if they match a value in the enum type
TypeDefinition::Enum(t) => value.coerce(t),
// Try to coerce Scalar values
TypeDefinition::Scalar(t) => value.coerce(t),
// Try to coerce InputObject values
TypeDefinition::InputObject(t) => match value {
q::Value::Object(object) => {
let object_for_error = q::Value::Object(object.clone());
let mut coerced_object = BTreeMap::new();
for (name, value) in object {
let def = t
.fields
.iter()
.find(|f| f.name == name)
.ok_or_else(|| object_for_error.clone())?;
coerced_object.insert(
name.clone(),
match coerce_input_value(Some(value), def, resolver, variables) {
Err(_) | Ok(None) => return Err(object_for_error),
Ok(Some(v)) => v,
},
);
}
Ok(r::Value::Object(coerced_object))
}
_ => Err(value),
},
// Everything else remains unimplemented
_ => Err(value),
}
}
/// Coerces an argument into a GraphQL value.
///
/// `Ok(None)` happens when no value is found for a nullable type.
pub(crate) fn coerce_input_value<'a>(
mut value: Option<q::Value>,
def: &InputValue,
resolver: &impl Fn(&str) -> Option<&'a TypeDefinition>,
variable_values: &HashMap<String, r::Value>,
) -> Result<Option<r::Value>, QueryExecutionError> {
if let Some(q::Value::Variable(name)) = value {
value = variable_values.get(&name).cloned().map(Into::into);
};
// Use the default value if necessary and present.
value = value.or(def.default_value.clone());
// Extract value, checking for null or missing.
let value = match value {
None => {
return if schema::ast::is_non_null_type(&def.value_type) {
Err(QueryExecutionError::MissingArgumentError(
def.position,
def.name.to_owned(),
))
} else {
Ok(None)
};
}
Some(value) => value,
};
Ok(Some(
coerce_value(value, &def.value_type, resolver, variable_values).map_err(|val| {
QueryExecutionError::InvalidArgumentError(def.position, def.name.to_owned(), val)
})?,
))
}
/// On error, the `value` is returned as `Err(value)`.
pub(crate) fn coerce_value<'a>(
value: q::Value,
ty: &Type,
resolver: &impl Fn(&str) -> Option<&'a TypeDefinition>,
variable_values: &HashMap<String, r::Value>,
) -> Result<r::Value, q::Value> {
match (ty, value) {
// Null values cannot be coerced into non-null types.
(Type::NonNullType(_), q::Value::Null) => Err(q::Value::Null),
// Non-null values may be coercible into non-null types
(Type::NonNullType(_), val) => {
// We cannot bind `t` in the pattern above because "binding by-move and by-ref in the
// same pattern is unstable". Refactor this and the others when Rust fixes this.
let t = match ty {
Type::NonNullType(ty) => ty,
_ => unreachable!(),
};
coerce_value(val, t, resolver, variable_values)
}
// Nullable types can be null.
(_, q::Value::Null) => Ok(r::Value::Null),
// Resolve named types, then try to coerce the value into the resolved type
(Type::NamedType(_), val) => {
let name = match ty {
Type::NamedType(name) => name,
_ => unreachable!(),
};
coerce_to_definition(val, name, resolver, variable_values)
}
// List values are coercible if their values are coercible into the
// inner type.
(Type::ListType(_), q::Value::List(values)) => {
let t = match ty {
Type::ListType(ty) => ty,
_ => unreachable!(),
};
let mut coerced_values = vec![];
// Coerce the list values individually
for value in values {
coerced_values.push(coerce_value(value, t, resolver, variable_values)?);
}
Ok(r::Value::List(coerced_values))
}
// Otherwise the list type is not coercible.
(Type::ListType(_), value) => Err(value),
}
}
#[cfg(test)]
mod tests {
use graph::prelude::{q, r::Value};
use graphql_parser::schema::{EnumType, EnumValue, ScalarType, TypeDefinition};
use graphql_parser::Pos;
use std::collections::HashMap;
use super::coerce_to_definition;
#[test]
fn coercion_using_enum_type_definitions_is_correct() {
let enum_type = TypeDefinition::Enum(EnumType {
name: "Enum".to_string(),
description: None,
directives: vec![],
position: Pos::default(),
values: vec![EnumValue {
name: "ValidVariant".to_string(),
position: Pos::default(),
description: None,
directives: vec![],
}],
});
let resolver = |_: &str| Some(&enum_type);
// We can coerce from Value::Enum -> TypeDefinition::Enum if the variant is valid
assert_eq!(
coerce_to_definition(
q::Value::Enum("ValidVariant".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::Enum("ValidVariant".to_string()))
);
// We cannot coerce from Value::Enum -> TypeDefinition::Enum if the variant is invalid
assert!(coerce_to_definition(
q::Value::Enum("InvalidVariant".to_string()),
"",
&resolver,
&HashMap::new()
)
.is_err());
// We also support going from Value::String -> TypeDefinition::Scalar(Enum)
assert_eq!(
coerce_to_definition(
q::Value::String("ValidVariant".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::Enum("ValidVariant".to_string())),
);
// But we don't support invalid variants
assert!(coerce_to_definition(
q::Value::String("InvalidVariant".to_string()),
"",
&resolver,
&HashMap::new()
)
.is_err());
}
#[test]
fn coercion_using_boolean_type_definitions_is_correct() {
let bool_type = TypeDefinition::Scalar(ScalarType {
name: "Boolean".to_string(),
description: None,
directives: vec![],
position: Pos::default(),
});
let resolver = |_: &str| Some(&bool_type);
// We can coerce from Value::Boolean -> TypeDefinition::Scalar(Boolean)
assert_eq!(
coerce_to_definition(q::Value::Boolean(true), "", &resolver, &HashMap::new()),
Ok(Value::Boolean(true))
);
assert_eq!(
coerce_to_definition(q::Value::Boolean(false), "", &resolver, &HashMap::new()),
Ok(Value::Boolean(false))
);
// We don't support going from Value::String -> TypeDefinition::Scalar(Boolean)
assert!(coerce_to_definition(
q::Value::String("true".to_string()),
"",
&resolver,
&HashMap::new()
)
.is_err());
assert!(coerce_to_definition(
q::Value::String("false".to_string()),
"",
&resolver,
&HashMap::new()
)
.is_err());
// We don't support going from Value::Float -> TypeDefinition::Scalar(Boolean)
assert!(
coerce_to_definition(q::Value::Float(1.0), "", &resolver, &HashMap::new()).is_err()
);
assert!(
coerce_to_definition(q::Value::Float(0.0), "", &resolver, &HashMap::new()).is_err()
);
}
#[test]
fn coercion_using_big_decimal_type_definitions_is_correct() {
let big_decimal_type = TypeDefinition::Scalar(ScalarType::new("BigDecimal".to_string()));
let resolver = |_: &str| Some(&big_decimal_type);
// We can coerce from Value::Float -> TypeDefinition::Scalar(BigDecimal)
assert_eq!(
coerce_to_definition(q::Value::Float(23.7), "", &resolver, &HashMap::new()),
Ok(Value::String("23.7".to_string()))
);
assert_eq!(
coerce_to_definition(q::Value::Float(-5.879), "", &resolver, &HashMap::new()),
Ok(Value::String("-5.879".to_string()))
);
// We can coerce from Value::String -> TypeDefinition::Scalar(BigDecimal)
assert_eq!(
coerce_to_definition(
q::Value::String("23.7".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("23.7".to_string()))
);
assert_eq!(
coerce_to_definition(
q::Value::String("-5.879".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("-5.879".to_string())),
);
// We can coerce from Value::Int -> TypeDefinition::Scalar(BigDecimal)
assert_eq!(
coerce_to_definition(q::Value::Int(23.into()), "", &resolver, &HashMap::new()),
Ok(Value::String("23".to_string()))
);
assert_eq!(
coerce_to_definition(
q::Value::Int((-5 as i32).into()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("-5".to_string())),
);
// We don't support going from Value::Boolean -> TypeDefinition::Scalar(Boolean)
assert!(
coerce_to_definition(q::Value::Boolean(true), "", &resolver, &HashMap::new()).is_err()
);
assert!(
coerce_to_definition(q::Value::Boolean(false), "", &resolver, &HashMap::new()).is_err()
);
}
#[test]
fn coercion_using_string_type_definitions_is_correct() {
let string_type = TypeDefinition::Scalar(ScalarType::new("String".to_string()));
let resolver = |_: &str| Some(&string_type);
// We can coerce from Value::String -> TypeDefinition::Scalar(String)
assert_eq!(
coerce_to_definition(
q::Value::String("foo".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("foo".to_string()))
);
assert_eq!(
coerce_to_definition(
q::Value::String("bar".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("bar".to_string()))
);
// We don't support going from Value::Boolean -> TypeDefinition::Scalar(String)
assert!(
coerce_to_definition(q::Value::Boolean(true), "", &resolver, &HashMap::new()).is_err()
);
assert!(
coerce_to_definition(q::Value::Boolean(false), "", &resolver, &HashMap::new()).is_err()
);
// We don't support going from Value::Float -> TypeDefinition::Scalar(String)
assert!(
coerce_to_definition(q::Value::Float(23.7), "", &resolver, &HashMap::new()).is_err()
);
assert!(
coerce_to_definition(q::Value::Float(-5.879), "", &resolver, &HashMap::new()).is_err()
);
}
#[test]
fn coercion_using_id_type_definitions_is_correct() {
let string_type = TypeDefinition::Scalar(ScalarType::new("ID".to_owned()));
let resolver = |_: &str| Some(&string_type);
// We can coerce from Value::String -> TypeDefinition::Scalar(ID)
assert_eq!(
coerce_to_definition(
q::Value::String("foo".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("foo".to_string()))
);
assert_eq!(
coerce_to_definition(
q::Value::String("bar".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("bar".to_string()))
);
// And also from Value::Int
assert_eq!(
coerce_to_definition(q::Value::Int(1234.into()), "", &resolver, &HashMap::new()),
Ok(Value::String("1234".to_string()))
);
// We don't support going from Value::Boolean -> TypeDefinition::Scalar(ID)
assert!(
coerce_to_definition(q::Value::Boolean(true), "", &resolver, &HashMap::new()).is_err()
);
assert!(
coerce_to_definition(q::Value::Boolean(false), "", &resolver, &HashMap::new()).is_err()
);
// We don't support going from Value::Float -> TypeDefinition::Scalar(ID)
assert!(
coerce_to_definition(q::Value::Float(23.7), "", &resolver, &HashMap::new()).is_err()
);
assert!(
coerce_to_definition(q::Value::Float(-5.879), "", &resolver, &HashMap::new()).is_err()
);
}
#[test]
fn coerce_big_int_scalar() {
let big_int_type = TypeDefinition::Scalar(ScalarType::new("BigInt".to_string()));
let resolver = |_: &str| Some(&big_int_type);
// We can coerce from Value::String -> TypeDefinition::Scalar(BigInt)
assert_eq!(
coerce_to_definition(
q::Value::String("1234".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("1234".to_string()))
);
// And also from Value::Int
assert_eq!(
coerce_to_definition(q::Value::Int(1234.into()), "", &resolver, &HashMap::new()),
Ok(Value::String("1234".to_string()))
);
assert_eq!(
coerce_to_definition(
q::Value::Int((-1234 as i32).into()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("-1234".to_string()))
);
}
#[test]
fn coerce_bytes_scalar() {
let bytes_type = TypeDefinition::Scalar(ScalarType::new("Bytes".to_string()));
let resolver = |_: &str| Some(&bytes_type);
// We can coerce from Value::String -> TypeDefinition::Scalar(Bytes)
assert_eq!(
coerce_to_definition(
q::Value::String("0x21f".to_string()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::String("0x21f".to_string()))
);
}
#[test]
fn coerce_int_scalar() {
let int_type = TypeDefinition::Scalar(ScalarType::new("Int".to_string()));
let resolver = |_: &str| Some(&int_type);
assert_eq!(
coerce_to_definition(
q::Value::Int(13289123.into()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::Int(13289123.into()))
);
assert_eq!(
coerce_to_definition(
q::Value::Int((-13289123 as i32).into()),
"",
&resolver,
&HashMap::new()
),
Ok(Value::Int((-13289123 as i32).into()))
);
}
}
| 34.986817 | 99 | 0.506405 |
0aa4027b045227a47ebe50e04e33a8145a2edaf6
| 6,922 |
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {argh::FromArgs, ffx_core::ffx_command};
/// entry point for ffx
#[ffx_command()]
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand, name = "vdl", description = "Start and manage Fuchsia emulators")]
pub struct EmulatorCommand {
#[argh(subcommand)]
pub command: VDLCommand,
/// running in fuchsia sdk (not inside the fuchsia code repository)
#[argh(switch)]
pub sdk: bool,
}
/// entry point for fvdl
#[derive(FromArgs, Debug, PartialEq)]
/// Commands to start/stop the emulator via fuchsia virtual device launcher (VDL)
pub struct Args {
#[argh(subcommand)]
pub command: VDLCommand,
/// running in fuchsia sdk (not inside the fuchsia code repository)
#[argh(switch)]
pub sdk: bool,
}
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand)]
pub enum VDLCommand {
Start(StartCommand),
Kill(KillCommand),
}
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand, name = "start")]
/// Starting Fuchsia Emulator
pub struct StartCommand {
/// bool, run emulator in headless mode.
#[argh(switch, short = 'H')]
pub headless: bool,
/// bool, run emulator with emulated nic via tun/tap.
#[argh(switch, short = 'N')]
pub tuntap: bool,
/// bool, run emulator with host GPU acceleration, this doesn't work on remote-desktop with --headless.
#[argh(switch)]
pub host_gpu: bool,
/// bool, run emulator without host GPU acceleration, default.
#[argh(switch)]
pub software_gpu: bool,
/// bool, enable pixel scaling on HiDPI devices.
#[argh(switch)]
pub hidpi_scaling: bool,
/// path to tun/tap upscript, this script will be executed before booting up FEMU.
#[argh(option, short = 'u')]
pub upscript: Option<String>,
/// comma separated string of fuchsia package urls, extra packages to serve after starting FEMU.
#[argh(option)]
pub packages_to_serve: Option<String>,
/// set pointing device used on emulator: mouse or touch screen. Allowed values are "touch", "mouse". Default is "touch".
#[argh(option, short = 'p')]
pub pointing_device: Option<String>,
/// emulator window width. Default to 1280.
#[argh(option, default = "default_window_width()", short = 'w')]
pub window_width: usize,
/// emulator window height. Default to 800.
#[argh(option, default = "default_window_height()", short = 'h')]
pub window_height: usize,
/// extends storage size to <size> bytes. Default is "2G".
#[argh(option, short = 's')]
pub image_size: Option<String>,
/// path to fuchsia virtual device configuration, if not specified a generic one will be generated.
#[argh(option, short = 'f')]
pub device_proto: Option<String>,
/// path to aemu location.
/// When running in fuchsia repo, defaults to looking in prebuilt/third_party/aemu/PLATFORM.
/// When running in fuchsia sdk, defaults to looking in $HOME/.fuchsia/femu.
#[argh(option, short = 'e')]
pub aemu_path: Option<String>,
/// label used to download AEMU from CIPD. Default is "integration".
/// Download only happens if aemu binary cannot be found from known paths.
#[argh(option)]
pub aemu_version: Option<String>,
/// device_launcher binary location.
/// When running in fuchsia repo, defaults to looking in prebuilt/vdl/device_launcher.
/// When running in fuchsia sdk, defaults to looking in directory containing `fvdl`.
#[argh(option, short = 'd')]
pub vdl_path: Option<String>,
/// label used to download vdl from CIPD. Default is "latest".
/// Download only happens if vdl (device_launcher) binary cannot be found from known paths.
#[argh(option)]
pub vdl_version: Option<String>,
/// enable WebRTC HTTP service on port, if set to 0 a random port will be picked
#[argh(option, short = 'x')]
pub grpcwebproxy: Option<usize>,
/// location of grpcwebproxy,
/// When running in fuchsia repo, defaults to looking in prebuilt/third_party/grpcwebproxy
/// When running in fuchsia sdk, defaults to looking in $HOME/.fuchsia/femu.
#[argh(option, short = 'X')]
pub grpcwebproxy_path: Option<String>,
/// label used to download grpcwebproxy from CIPD. Default is "latest".
/// Download only happens if --grpcwebproxy is set and grpcwebproxy binary cannot be found from known paths or path specified by --grpcwebproxy_path.
#[argh(option)]
pub grpcwebproxy_version: Option<String>,
/// fuchsia sdk ID used to fetch from gcs, if specified, the emulator will launch with fuchsia sdk files fetched from gcs.
/// To find the latest version run `gsutil cat gs://fuchsia/development/LATEST_LINUX`.
#[argh(option, short = 'v')]
pub sdk_version: Option<String>,
/// gcs bucket name. Default is "fuchsia".
#[argh(option)]
pub gcs_bucket: Option<String>,
/// image file name used to fetch from gcs. Default is "qemu-x64".
/// To view availabe image names run `gsutil ls -l gs://fuchsia/development/$(gsutil cat gs://fuchsia/development/LATEST_LINUX)/images`.
#[argh(option)]
pub image_name: Option<String>,
/// file path to store emulator log. Default is a temp file that is deleted after `fvdl` exits.
#[argh(option, short = 'l')]
pub emulator_log: Option<String>,
/// host port mapping for user-networking mode. This flag will be ignored if --tuntap is used.
/// If not specified, an ssh port on host will be randomly picked and forwarded.
/// ex: hostfwd=tcp::<host_port>-:<guest_port>,hostfwd=tcp::<host_port>-:<guest_port>
#[argh(option)]
pub port_map: Option<String>,
/// file destination to write `device_launcher` output.
/// Required for --nointeractive mode. Default is a temp file that is deleted after `fvdl` exits.
/// Specify this flag if you plan to use the `kill` subcommand.
#[argh(option)]
pub vdl_output: Option<String>,
/// bool, turn off interactive mode.
/// if turned off, fvdl will not land user in ssh console. A ssh port will still be forwarded.
/// User needs to specify --vdl-output flag with this mode, and manually call
/// the `kill` subcommand to perform clean shutdown.
#[argh(switch)]
pub nointeractive: bool,
}
fn default_window_height() -> usize {
800
}
fn default_window_width() -> usize {
1280
}
#[derive(FromArgs, Debug, PartialEq)]
#[argh(subcommand, name = "kill")]
/// Killing Fuchsia Emulator
pub struct KillCommand {
/// device_launcher binary location. Defaults to looking in prebuilt/vdl/device_launcher
#[argh(option, short = 'd')]
pub vdl_path: Option<String>,
/// required, file containing device_launcher process artifact location.
#[argh(option)]
pub launched_proto: Option<String>,
}
| 37.825137 | 153 | 0.683906 |
69d37d6194532b4238a848a89b35aff731d023c1
| 9,481 |
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! NibblePath library simplify operations with nibbles in a compact format for modified sparse
//! Merkle tree by providing powerful iterators advancing by either bit or nibble.
#[cfg(test)]
mod nibble_path_test;
use crate::ROOT_NIBBLE_HEIGHT;
use proptest::{collection::vec, prelude::*};
use serde::{Deserialize, Serialize};
use std::{fmt, iter::FromIterator};
/// NibblePath defines a path in Merkle tree in the unit of nibble (4 bits).
#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)]
pub struct NibblePath {
/// Indicates the total number of nibbles in bytes. Either `bytes.len() * 2 - 1` or
/// `bytes.len() * 2`.
// Guarantees intended ordering based on the top-to-bottom declaration order of the struct's
// members.
num_nibbles: usize,
/// The underlying bytes that stores the path, 2 nibbles per byte. If the number of nibbles is
/// odd, the second half of the last byte must be 0.
bytes: Vec<u8>,
}
/// Supports debug format by concatenating nibbles literally. For example, [0x12, 0xa0] with 3
/// nibbles will be printed as "12a".
impl fmt::Debug for NibblePath {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.nibbles().map(|x| write!(f, "{:x}", x)).collect()
}
}
/// Convert a vector of bytes into `NibblePath` using the lower 4 bits of each byte as nibble.
impl FromIterator<u8> for NibblePath {
fn from_iter<I: IntoIterator<Item = u8>>(iter: I) -> Self {
let mut nibble_path = NibblePath::new(vec![]);
for nibble in iter {
nibble_path.push(nibble);
}
nibble_path
}
}
impl Arbitrary for NibblePath {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
arb_nibble_path().boxed()
}
}
prop_compose! {
fn arb_nibble_path()(
mut bytes in vec(any::<u8>(), 0..ROOT_NIBBLE_HEIGHT/2), is_odd in any::<bool>()
) -> NibblePath {
if let Some(last_byte) = bytes.last_mut() {
if is_odd {
*last_byte &= 0xf0;
return NibblePath::new_odd(bytes);
}
}
NibblePath::new(bytes)
}
}
impl NibblePath {
/// Creates a new `NibblePath` from a vector of bytes assuming each byte has 2 nibbles.
pub fn new(bytes: Vec<u8>) -> Self {
let num_nibbles = bytes.len() * 2;
assert!(num_nibbles <= ROOT_NIBBLE_HEIGHT);
NibblePath { bytes, num_nibbles }
}
/// Similar to `new()` but assumes that the bytes have one less nibble.
pub fn new_odd(bytes: Vec<u8>) -> Self {
assert_eq!(
bytes.last().expect("Should have odd number of nibbles.") & 0x0f,
0,
"Last nibble must be 0."
);
let num_nibbles = bytes.len() * 2 - 1;
assert!(num_nibbles <= ROOT_NIBBLE_HEIGHT);
NibblePath { bytes, num_nibbles }
}
/// Adds a nibble to the end of the nibble path.
pub fn push(&mut self, nibble: u8) {
assert!(nibble < 16);
assert!(ROOT_NIBBLE_HEIGHT > self.num_nibbles);
if self.num_nibbles % 2 == 0 {
self.bytes.push(nibble << 4);
} else {
self.bytes[self.num_nibbles / 2] |= nibble;
}
self.num_nibbles += 1;
}
/// Pops a nibble from the end of the nibble path.
pub fn pop(&mut self) -> Option<u8> {
let poped_nibble = if self.num_nibbles % 2 == 0 {
self.bytes.last_mut().map(|last_byte| {
let nibble = *last_byte & 0x0f;
*last_byte &= 0xf0;
nibble
})
} else {
self.bytes.pop().map(|byte| byte >> 4)
};
if poped_nibble.is_some() {
self.num_nibbles -= 1;
}
poped_nibble
}
/// Get the i-th bit.
fn get_bit(&self, i: usize) -> bool {
assert!(i / 4 < self.num_nibbles);
let pos = i / 8;
let bit = 7 - i % 8;
((self.bytes[pos] >> bit) & 1) != 0
}
/// Get the i-th nibble, stored at lower 4 bits
fn get_nibble(&self, i: usize) -> u8 {
assert!(i < self.num_nibbles);
(self.bytes[i / 2] >> (if i % 2 == 1 { 0 } else { 4 })) & 0xf
}
/// Get a bit iterator iterates over the whole nibble path.
pub fn bits(&self) -> BitIterator {
BitIterator {
nibble_path: self,
pos: (0..self.num_nibbles * 4),
}
}
/// Get a nibble iterator iterates over the whole nibble path.
pub fn nibbles(&self) -> NibbleIterator {
NibbleIterator::new(self, 0, self.num_nibbles)
}
/// Get the total number of nibbles stored.
pub fn num_nibbles(&self) -> usize {
self.num_nibbles
}
/// Get the underlying bytes storing nibbles.
pub fn bytes(&self) -> &[u8] {
&self.bytes
}
}
pub trait Peekable: Iterator {
/// Returns the `next()` value without advancing the iterator.
fn peek(&self) -> Option<Self::Item>;
}
/// BitIterator iterates a nibble path by bit.
pub struct BitIterator<'a> {
nibble_path: &'a NibblePath,
pos: std::ops::Range<usize>,
}
impl<'a> Peekable for BitIterator<'a> {
/// Returns the `next()` value without advancing the iterator.
fn peek(&self) -> Option<Self::Item> {
if self.pos.start < self.pos.end {
Some(self.nibble_path.get_bit(self.pos.start))
} else {
None
}
}
}
/// BitIterator spits out a boolean each time. True/false denotes 1/0.
impl<'a> Iterator for BitIterator<'a> {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
self.pos
.next()
.and_then(|i| Some(self.nibble_path.get_bit(i)))
}
}
/// Support iterating bits in reversed order.
impl<'a> DoubleEndedIterator for BitIterator<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
self.pos
.next_back()
.and_then(|i| Some(self.nibble_path.get_bit(i)))
}
}
/// NibbleIterator iterates a nibble path by nibble.
pub struct NibbleIterator<'a> {
/// The underlying nibble path that stores the nibbles
nibble_path: &'a NibblePath,
/// The current index, `pos.start`, will bump by 1 after calling `next()` until `pos.start ==
/// pos.end`.
pos: std::ops::Range<usize>,
/// The start index of the iterator. At the beginning, `pos.start == start`. [start, pos.end)
/// defines the range of `nibble_path` this iterator iterates over. `nibble_path` refers to
/// the entire underlying buffer but the range may only be partial.
start: usize,
}
/// NibbleIterator spits out a byte each time. Each byte must be in range [0, 16).
impl<'a> Iterator for NibbleIterator<'a> {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
self.pos
.next()
.and_then(|i| Some(self.nibble_path.get_nibble(i)))
}
}
impl<'a> Peekable for NibbleIterator<'a> {
/// Returns the `next()` value without advancing the iterator.
fn peek(&self) -> Option<Self::Item> {
if self.pos.start < self.pos.end {
Some(self.nibble_path.get_nibble(self.pos.start))
} else {
None
}
}
}
impl<'a> NibbleIterator<'a> {
fn new(nibble_path: &'a NibblePath, start: usize, end: usize) -> Self {
Self {
nibble_path,
pos: (start..end),
start,
}
}
/// Returns a nibble iterator that iterates all visited nibbles.
pub fn visited_nibbles(&self) -> NibbleIterator<'a> {
Self::new(self.nibble_path, self.start, self.pos.start)
}
/// Returns a nibble iterator that iterates all remaining nibbles.
pub fn remaining_nibbles(&self) -> NibbleIterator<'a> {
Self::new(self.nibble_path, self.pos.start, self.pos.end)
}
/// Turn it into a `BitIterator`.
pub fn bits(&self) -> BitIterator<'a> {
BitIterator {
nibble_path: self.nibble_path,
pos: (self.pos.start * 4..self.pos.end * 4),
}
}
/// Cut and return the range of the underlying `nibble_path` that this iterator is iterating
/// over as a new `NibblePath`
pub fn get_nibble_path(&self) -> NibblePath {
self.visited_nibbles()
.chain(self.remaining_nibbles())
.collect()
}
/// Get the number of nibbles that this iterator covers.
pub fn num_nibbles(&self) -> usize {
self.pos.end - self.start
}
/// Return `true` if the iteration is over.
pub fn is_finished(&self) -> bool {
self.peek().is_none()
}
}
/// Advance both iterators if their next nibbles are the same until either reaches the end or
/// the find a mismatch. Return the number of matched nibbles.
pub fn skip_common_prefix<'a, 'b, I1: 'a, I2: 'b>(x: &'a mut I1, y: &mut I2) -> usize
where
I1: Iterator + Peekable,
I2: Iterator + Peekable,
<I1 as Iterator>::Item: std::cmp::PartialEq<<I2 as Iterator>::Item>,
{
let mut count = 0;
loop {
let x_peek = x.peek();
let y_peek = y.peek();
if x_peek.is_none()
|| y_peek.is_none()
|| x_peek.expect("cannot be none") != y_peek.expect("cannot be none")
{
break;
}
count += 1;
x.next();
y.next();
}
count
}
| 31.39404 | 98 | 0.589178 |
bb474705566d4accf7771d912869f58b94821631
| 4,514 |
use std::io::{stdout, Write};
use std::path::{Path, PathBuf};
use public_api::diff::PublicItemsDiff;
use public_api::{public_api_from_rustdoc_json_str, Options, MINIMUM_RUSTDOC_JSON_VERSION};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
#[derive(Default)]
struct Args {
help: bool,
with_blanket_implementations: bool,
files: Vec<PathBuf>,
}
fn main() -> Result<()> {
let args = args();
let mut options = Options::default();
options.with_blanket_implementations = args.with_blanket_implementations;
options.sorted = true;
let files = args.files;
if args.help || files.is_empty() || files.len() > 2 {
print_usage()?;
} else if files.len() == 1 {
let path = &files[0];
print_public_api(path, options)?;
} else if files.len() == 2 {
let old = &files[0];
let new = &files[1];
print_public_api_diff(old, new, options)?;
}
Ok(())
}
fn print_public_api(path: &Path, options: Options) -> Result<()> {
let json = &std::fs::read_to_string(path)?;
for public_item in public_api_from_rustdoc_json_str(json, options)? {
writeln!(std::io::stdout(), "{}", public_item)?;
}
Ok(())
}
fn print_public_api_diff(old: &Path, new: &Path, options: Options) -> Result<()> {
let old_json = std::fs::read_to_string(old)?;
let old_items = public_api_from_rustdoc_json_str(&old_json, options)?;
let new_json = std::fs::read_to_string(new)?;
let new_items = public_api_from_rustdoc_json_str(&new_json, options)?;
let diff = PublicItemsDiff::between(old_items, new_items);
print_diff_with_headers(&diff, &mut stdout(), "Removed:", "Changed:", "Added:")?;
Ok(())
}
fn print_diff_with_headers(
diff: &PublicItemsDiff,
w: &mut impl std::io::Write,
header_removed: &str,
header_changed: &str,
header_added: &str,
) -> std::io::Result<()> {
print_items_with_header(w, header_removed, &diff.removed, |w, item| {
writeln!(w, "-{}", item)
})?;
print_items_with_header(w, header_changed, &diff.changed, |w, item| {
writeln!(w, "-{}", item.old)?;
writeln!(w, "+{}", item.new)
})?;
print_items_with_header(w, header_added, &diff.added, |w, item| {
writeln!(w, "+{}", item)
})?;
Ok(())
}
fn print_items_with_header<W: std::io::Write, T>(
w: &mut W,
header: &str,
items: &[T],
print_fn: impl Fn(&mut W, &T) -> std::io::Result<()>,
) -> std::io::Result<()> {
writeln!(w, "{}", header)?;
if items.is_empty() {
writeln!(w, "(nothing)")?;
} else {
for item in items {
print_fn(w, item)?;
}
}
writeln!(w)
}
fn print_usage() -> std::io::Result<()> {
writeln!(
stdout(),
"public-api v{}
Requires at least {}.
NOTE: See https://github.com/Enselic/cargo-public-api for a convenient cargo
wrapper around this program (or to be precise; library) that does everything
automatically.
If you insist of using this low-level utility and thin wrapper, you run it like this:
public-api <RUSTDOC_JSON_FILE>
where RUSTDOC_JSON_FILE is the path to the output of
RUSTDOCFLAGS='-Z unstable-options --output-format json' cargo +nightly doc --lib --no-deps
which you can find in
./target/doc/${{CRATE}}.json
To diff the public API between two commits, you generate one rustdoc JSON file for each
commit and then pass the path of both files to this utility:
public-api <RUSTDOC_JSON_FILE_OLD> <RUSTDOC_JSON_FILE_NEW>
To include blanket implementations, pass --with-blanket-implementations.
",
env!("CARGO_PKG_VERSION"),
MINIMUM_RUSTDOC_JSON_VERSION,
)
}
/// Helper to parse args.
///
/// Note: I want this Rust package to be simple and without unnecessary
/// dependencies and without the need to select features. For that reason I
/// currently consider it undesirable to for example make this utility depend on
/// `clap` or `anyhow`.
///
/// The convenient wrapper <https://github.com/Enselic/cargo-public-api>
/// depends on both `clap` and `anyhow` though which is perfectly fine.
fn args() -> Args {
let mut args = Args::default();
for arg in std::env::args_os().skip(1) {
if arg == "--with-blanket-implementations" {
args.with_blanket_implementations = true;
} else if arg == "--help" || arg == "-h" {
args.help = true;
} else {
args.files.push(PathBuf::from(arg));
}
}
args
}
| 28.56962 | 94 | 0.630926 |
67be9b26735564e6f896d9cc6203e24283fa85ed
| 6,780 |
use std::fs;
use std::io::{Read, Write};
use std::path::PathBuf;
use directories::ProjectDirs;
const ORGANIZATION: &str = "jhspetersson";
const APPLICATION: &str = "fselect";
const CONFIG_FILE: &str = "config.toml";
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Config {
pub no_color : Option<bool>,
pub gitignore: Option<bool>,
pub hgignore: Option<bool>,
pub dockerignore: Option<bool>,
pub is_zip_archive : Vec<String>,
pub is_archive : Vec<String>,
pub is_audio : Vec<String>,
pub is_book : Vec<String>,
pub is_doc : Vec<String>,
pub is_image : Vec<String>,
pub is_source : Vec<String>,
pub is_video : Vec<String>,
pub default_file_size_format : Option<String>,
#[serde(skip_serializing, default = "get_false")]
pub debug : bool,
#[serde(skip)]
save : bool,
}
fn get_false() -> bool {
false
}
impl Config {
pub fn new() -> Result<Config, String> {
let mut config_file;
if let Some(cf) = Self::get_current_dir_config() {
config_file = cf;
} else {
let config_dir = Self::get_project_dir();
if config_dir.is_none() {
return Ok(Config::default());
}
config_file = config_dir.unwrap();
config_file.push(CONFIG_FILE);
if !config_file.exists() {
return Ok(Config::default());
}
}
Config::from(config_file)
}
pub fn from(config_file: PathBuf) -> Result<Config, String> {
if let Ok(mut file) = fs::File::open(&config_file) {
let mut contents = String::new();
if let Ok(_) = file.read_to_string(&mut contents) {
match toml::from_str(&contents) {
Ok(config) => Ok(config),
Err(err) => Err(err.to_string())
}
} else {
Err("Could not read config file. Using default settings.".to_string())
}
} else {
Err("Could not open config file. Using default settings.".to_string())
}
}
fn get_current_dir_config() -> Option<PathBuf> {
if let Ok(mut pb) = std::env::current_exe() {
pb.pop();
pb.push(CONFIG_FILE);
if pb.exists() {
return Some(pb);
}
}
None
}
#[cfg(not(windows))]
fn get_project_dir() -> Option<PathBuf> {
match ProjectDirs::from("", ORGANIZATION, APPLICATION) {
Some(pd) => Some(pd.config_dir().to_path_buf()),
_ => None
}
}
#[cfg(windows)]
fn get_project_dir() -> Option<PathBuf> {
match ProjectDirs::from("", ORGANIZATION, APPLICATION) {
Some(pd) => Some(pd.config_dir().parent().unwrap().to_path_buf()),
_ => None
}
}
pub fn save(&self) {
if !self.save {
return;
}
let config_dir = Self::get_project_dir();
if config_dir.is_none() {
return;
}
let mut config_file = config_dir.unwrap();
let _ = fs::create_dir_all(&config_file);
config_file.push(CONFIG_FILE);
if config_file.exists() {
return;
}
let toml = toml::to_vec(&self).unwrap();
if let Ok(mut file) = fs::File::create(&config_file) {
let _ = file.write_all(&toml);
}
}
pub fn default() -> Config {
Config {
no_color : Some(false),
gitignore : Some(false),
hgignore : Some(false),
dockerignore : Some(false),
is_zip_archive : vec![String::from(".zip"), String::from(".jar"), String::from(".war"), String::from(".ear")],
is_archive : vec![String::from(String::from(".7z")), String::from(String::from(".bz2")), String::from(String::from(".bzip2")), String::from(String::from(".gz")), String::from(String::from(".gzip")), String::from(String::from(".lz")), String::from(String::from(".rar")), String::from(String::from(".tar")), String::from(".xz"), String::from(".zip")],
is_audio : vec![String::from(".aac"), String::from(".aiff"), String::from(".amr"), String::from(".flac"), String::from(".gsm"), String::from(".m4a"), String::from(".m4b"), String::from(".m4p"), String::from(".mp3"), String::from(".ogg"), String::from(".wav"), String::from(".wma")],
is_book : vec![String::from(".azw3"), String::from(".chm"), String::from(".djvu"), String::from(".epub"), String::from(".fb2"), String::from(".mobi"), String::from(".pdf")],
is_doc : vec![String::from(".accdb"), String::from(".doc"), String::from(".docm"), String::from(".docx"), String::from(".dot"), String::from(".dotm"), String::from(".dotx"), String::from(".mdb"), String::from(".ods"), String::from(".odt"), String::from(".pdf"), String::from(".potm"), String::from(".potx"), String::from(".ppt"), String::from(".pptm"), String::from(".pptx"), String::from(".rtf"), String::from(".xlm"), String::from(".xls"), String::from(".xlsm"), String::from(".xlsx"), String::from(".xlt"), String::from(".xltm"), String::from(".xltx"), String::from(".xps")],
is_image : vec![String::from(".bmp"), String::from(".gif"), String::from(".heic"), String::from(".jpeg"), String::from(".jpg"), String::from(".jxl"), String::from(".png"), String::from(".psb"), String::from(".psd"), String::from(".tiff"), String::from(".webp")],
is_source : vec![String::from(".asm"), String::from(".bas"), String::from(".c"), String::from(".cc"), String::from(".ceylon"), String::from(".clj"), String::from(".coffee"), String::from(".cpp"), String::from(".cs"), String::from(".d"), String::from(".dart"), String::from(".elm"), String::from(".erl"), String::from(".go"), String::from(".groovy"), String::from(".h"), String::from(".hh"), String::from(".hpp"), String::from(".java"), String::from(".jl"), String::from(".js"), String::from(".jsp"), String::from(".kt"), String::from(".kts"), String::from(".lua"), String::from(".nim"), String::from(".pas"), String::from(".php"), String::from(".pl"), String::from(".pm"), String::from(".py"), String::from(".rb"), String::from(".rs"), String::from(".scala"), String::from(".swift"), String::from(".tcl"), String::from(".vala"), String::from(".vb")],
is_video : vec![String::from(".3gp"), String::from(".avi"), String::from(".flv"), String::from(".m4p"), String::from(".m4v"), String::from(".mkv"), String::from(".mov"), String::from(".mp4"), String::from(".mpeg"), String::from(".mpg"), String::from(".webm"), String::from(".wmv")],
default_file_size_format : Some(String::new()),
debug : false,
save : true,
}
}
}
| 45.503356 | 862 | 0.55413 |
d52d7a64004ad50622864a56390f782cde3042c3
| 226 |
// @flag --unroll=10
// @expect verified
fn fac(n: u64, acc: u64) -> u64 {
match n {
0 => acc,
_ => fac(n - 1, acc * n),
}
}
pub fn main() {
let x = fac(5, 1);
verifier::assert_eq!(x, 120);
}
| 15.066667 | 33 | 0.455752 |
56cb182dbf0f96bfa1d287f013e6058518d6d27c
| 7,257 |
use std::marker::PhantomData;
use rustc_data_structures::snapshot_vec as sv;
use rustc_data_structures::undo_log::{Rollback, UndoLogs};
use rustc_data_structures::unify as ut;
use rustc_middle::ty;
use crate::{
infer::{region_constraints, type_variable, InferCtxtInner},
traits,
};
pub struct Snapshot<'tcx> {
pub(crate) undo_len: usize,
_marker: PhantomData<&'tcx ()>,
}
/// Records the 'undo' data fora single operation that affects some form of inference variable.
pub(crate) enum UndoLog<'tcx> {
TypeVariables(type_variable::UndoLog<'tcx>),
ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>),
FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>),
RegionConstraintCollector(region_constraints::UndoLog<'tcx>),
RegionUnificationTable(sv::UndoLog<ut::Delegate<ty::RegionVid>>),
ProjectionCache(traits::UndoLog<'tcx>),
PushRegionObligation,
}
macro_rules! impl_from {
($($ctor: ident ($ty: ty),)*) => {
$(
impl<'tcx> From<$ty> for UndoLog<'tcx> {
fn from(x: $ty) -> Self {
UndoLog::$ctor(x.into())
}
}
)*
}
}
// Upcast from a single kind of "undoable action" to the general enum
impl_from! {
RegionConstraintCollector(region_constraints::UndoLog<'tcx>),
TypeVariables(type_variable::UndoLog<'tcx>),
TypeVariables(sv::UndoLog<ut::Delegate<type_variable::TyVidEqKey<'tcx>>>),
TypeVariables(sv::UndoLog<ut::Delegate<ty::TyVid>>),
TypeVariables(sv::UndoLog<type_variable::Delegate>),
TypeVariables(type_variable::Instantiate),
IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>),
FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>),
ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
RegionUnificationTable(sv::UndoLog<ut::Delegate<ty::RegionVid>>),
ProjectionCache(traits::UndoLog<'tcx>),
}
/// The Rollback trait defines how to rollback a particular action.
impl<'tcx> Rollback<UndoLog<'tcx>> for InferCtxtInner<'tcx> {
fn reverse(&mut self, undo: UndoLog<'tcx>) {
match undo {
UndoLog::TypeVariables(undo) => self.type_variable_storage.reverse(undo),
UndoLog::ConstUnificationTable(undo) => self.const_unification_storage.reverse(undo),
UndoLog::IntUnificationTable(undo) => self.int_unification_storage.reverse(undo),
UndoLog::FloatUnificationTable(undo) => self.float_unification_storage.reverse(undo),
UndoLog::RegionConstraintCollector(undo) => {
self.region_constraint_storage.as_mut().unwrap().reverse(undo)
}
UndoLog::RegionUnificationTable(undo) => {
self.region_constraint_storage.as_mut().unwrap().unification_table.reverse(undo)
}
UndoLog::ProjectionCache(undo) => self.projection_cache.reverse(undo),
UndoLog::PushRegionObligation => {
self.region_obligations.pop();
}
}
}
}
/// The combined undo log for all the various unification tables. For each change to the storage
/// for any kind of inference variable, we record an UndoLog entry in the vector here.
pub(crate) struct InferCtxtUndoLogs<'tcx> {
logs: Vec<UndoLog<'tcx>>,
num_open_snapshots: usize,
}
impl Default for InferCtxtUndoLogs<'_> {
fn default() -> Self {
Self { logs: Default::default(), num_open_snapshots: Default::default() }
}
}
/// The UndoLogs trait defines how we undo a particular kind of action (of type T). We can undo any
/// action that is convertable into a UndoLog (per the From impls above).
impl<'tcx, T> UndoLogs<T> for InferCtxtUndoLogs<'tcx>
where
UndoLog<'tcx>: From<T>,
{
fn num_open_snapshots(&self) -> usize {
self.num_open_snapshots
}
fn push(&mut self, undo: T) {
if self.in_snapshot() {
self.logs.push(undo.into())
}
}
fn clear(&mut self) {
self.logs.clear();
self.num_open_snapshots = 0;
}
fn extend<J>(&mut self, undos: J)
where
Self: Sized,
J: IntoIterator<Item = T>,
{
if self.in_snapshot() {
self.logs.extend(undos.into_iter().map(UndoLog::from))
}
}
}
impl<'tcx> InferCtxtInner<'tcx> {
pub fn rollback_to(&mut self, snapshot: Snapshot<'tcx>) {
debug!("rollback_to({})", snapshot.undo_len);
self.undo_log.assert_open_snapshot(&snapshot);
while self.undo_log.logs.len() > snapshot.undo_len {
let undo = self.undo_log.logs.pop().unwrap();
self.reverse(undo);
}
if self.undo_log.num_open_snapshots == 1 {
// The root snapshot. It's safe to clear the undo log because
// there's no snapshot further out that we might need to roll back
// to.
assert!(snapshot.undo_len == 0);
self.undo_log.logs.clear();
}
self.undo_log.num_open_snapshots -= 1;
}
pub fn commit(&mut self, snapshot: Snapshot<'tcx>) {
debug!("commit({})", snapshot.undo_len);
if self.undo_log.num_open_snapshots == 1 {
// The root snapshot. It's safe to clear the undo log because
// there's no snapshot further out that we might need to roll back
// to.
assert!(snapshot.undo_len == 0);
self.undo_log.logs.clear();
}
self.undo_log.num_open_snapshots -= 1;
}
}
impl<'tcx> InferCtxtUndoLogs<'tcx> {
pub fn actions_since_snapshot(&self, snapshot: &Snapshot<'tcx>) -> &[UndoLog<'tcx>] {
&self.logs[snapshot.undo_len..]
}
pub fn start_snapshot(&mut self) -> Snapshot<'tcx> {
self.num_open_snapshots += 1;
Snapshot { undo_len: self.logs.len(), _marker: PhantomData }
}
pub(crate) fn region_constraints_in_snapshot(
&self,
s: &Snapshot<'tcx>,
) -> impl Iterator<Item = &'_ region_constraints::UndoLog<'tcx>> + Clone {
self.logs[s.undo_len..].iter().filter_map(|log| match log {
UndoLog::RegionConstraintCollector(log) => Some(log),
_ => None,
})
}
pub(crate) fn region_constraints(
&self,
) -> impl Iterator<Item = &'_ region_constraints::UndoLog<'tcx>> + Clone {
self.logs.iter().filter_map(|log| match log {
UndoLog::RegionConstraintCollector(log) => Some(log),
_ => None,
})
}
fn assert_open_snapshot(&self, snapshot: &Snapshot<'tcx>) {
// Failures here may indicate a failure to follow a stack discipline.
assert!(self.logs.len() >= snapshot.undo_len);
assert!(self.num_open_snapshots > 0);
}
pub(crate) fn iter(&self) -> std::slice::Iter<'_, UndoLog<'tcx>> {
self.logs.iter()
}
}
impl<'tcx> std::ops::Index<usize> for InferCtxtUndoLogs<'tcx> {
type Output = UndoLog<'tcx>;
fn index(&self, key: usize) -> &Self::Output {
&self.logs[key]
}
}
impl<'tcx> std::ops::IndexMut<usize> for InferCtxtUndoLogs<'tcx> {
fn index_mut(&mut self, key: usize) -> &mut Self::Output {
&mut self.logs[key]
}
}
| 33.288991 | 99 | 0.627394 |
f5ce7e1ee8bcaab26799501c42d21c587e1fc4e5
| 784 |
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Audio processing modules, packaged as a library.
#[macro_use]
extern crate lazy_static;
pub mod queue;
pub mod graph;
pub mod module;
pub mod modules;
pub mod worker;
| 30.153846 | 75 | 0.746173 |
e26b91e1708d68267151f738835418b25e7aafaf
| 7,722 |
use crate::ir::get_statement_operands_mut;
use crate::ir::get_statement_value_operands;
use crate::ir::get_value_operands_mut;
use crate::ir::value_storage::ValueId;
use crate::ir::value_storage::ValueStorage;
use crate::ir::BasicBlock;
use crate::ir::ControlFlowGraph;
use crate::ir::FunctionMap;
use crate::ir::Statement;
use crate::ir::StatementLocation;
use crate::ir::Value;
use itertools::Itertools;
use petgraph::graph::NodeIndex;
use petgraph::visit::EdgeRef;
use petgraph::Direction;
use std::collections::HashMap;
use std::iter::once;
use std::iter::repeat;
use std::iter::FromIterator;
use std::mem::replace;
pub(crate) fn remove_dead_code(
entry_block: NodeIndex,
program_result: &mut ValueId,
values: &mut ValueStorage,
cfg: &mut ControlFlowGraph,
definitions: &mut HashMap<ValueId, StatementLocation>,
functions: &mut FunctionMap,
) {
let mut value_usage = compute_value_usage(*program_result, values, cfg, definitions);
remove_unused_definitions(&mut value_usage, values, cfg, definitions);
remove_empty_blocks(cfg);
merge_consecutive_basic_blocks(entry_block, values, cfg, &mut value_usage, definitions);
remove_unused_definitions(&mut value_usage, values, cfg, definitions);
remove_unused_values(definitions, values, functions, cfg, program_result);
}
fn compute_value_usage(
program_result: ValueId,
values: &mut ValueStorage,
cfg: &mut ControlFlowGraph,
definitions: &mut HashMap<ValueId, StatementLocation>,
) -> HashMap<ValueId, usize> {
let mut value_usage = HashMap::from_iter(definitions.keys().cloned().zip(repeat(0)));
let used_values = cfg
.node_indices()
.flat_map(|node| cfg[node].iter())
.flat_map(|statement| get_statement_value_operands(values, statement))
.chain(once(program_result));
for value_id in used_values {
*value_usage.get_mut(&value_id).unwrap() += 1;
}
value_usage
}
fn remove_unused_definitions(
value_usage: &mut HashMap<ValueId, usize>,
values: &mut ValueStorage,
cfg: &mut ControlFlowGraph,
definitions: &mut HashMap<ValueId, StatementLocation>,
) {
let unused_values = value_usage
.iter()
.filter_map(|(value_id, usages)| match usages {
0 => Some(*value_id),
_ => None,
})
.collect_vec();
for value_id in unused_values {
disuse(value_id, cfg, values, value_usage, definitions);
}
let (unused_values, dead_code): (Vec<_>, Vec<_>) = value_usage
.iter()
.filter_map(|(value_id, usage)| match usage {
0 => Some((*value_id, definitions[value_id])),
_ => None,
})
.unzip();
for value_id in unused_values {
value_usage.remove(&value_id);
match &values[value_id] {
Value::Arg(_) => None,
_ => definitions.remove(&value_id),
};
}
remove_statements(dead_code, cfg);
}
fn disuse(
value_id: ValueId,
cfg: &ControlFlowGraph,
values: &ValueStorage,
value_usage: &mut HashMap<ValueId, usize>,
definitions: &mut HashMap<ValueId, StatementLocation>,
) {
let usage_count = value_usage.get_mut(&value_id).unwrap();
if *usage_count > 0 {
*usage_count -= 1;
}
if *usage_count == 0 {
let location = &definitions[&value_id];
let statement = &cfg[location.block][location.index];
for operand in get_statement_value_operands(values, statement) {
disuse(operand, cfg, values, value_usage, definitions);
}
}
}
fn remove_empty_blocks(cfg: &mut ControlFlowGraph) {
let blocks = cfg.node_indices().collect_vec();
for block in blocks {
let is_empty = !cfg[block]
.iter()
.any(|statement| !matches!(statement, Statement::Comment(_)));
if is_empty {
let sources = cfg
.edges_directed(block, Direction::Incoming)
.map(|edge| edge.source())
.collect_vec();
let targets = cfg
.edges_directed(block, Direction::Outgoing)
.map(|edge| edge.target())
.collect_vec();
if sources.len() == 1 && targets.len() == 1 {
let source = sources[0];
let target = targets[0];
if !cfg.has_edge(source, target) {
cfg.add_edge(source, target);
}
cfg.remove_node(block);
}
}
}
}
fn merge_consecutive_basic_blocks(
block: NodeIndex,
values: &ValueStorage,
cfg: &mut ControlFlowGraph,
value_usage: &mut HashMap<ValueId, usize>,
definitions: &mut HashMap<ValueId, StatementLocation>,
) {
let successors = cfg
.edges_directed(block, Direction::Outgoing)
.map(|edge| edge.target())
.collect_vec();
for successor in &successors {
merge_consecutive_basic_blocks(*successor, values, cfg, value_usage, definitions);
}
if successors.len() == 1 {
let successor = successors[0];
if cfg.edges_directed(successor, Direction::Incoming).count() == 1 {
if let Some(Statement::CondJump(condition, _, _)) = cfg[block].find_last() {
disuse(*condition, cfg, values, value_usage, definitions);
cfg[block].pop();
}
let successor_basic_block = replace(&mut cfg[successor], BasicBlock::new());
let basic_block = &mut cfg[block];
for statement in successor_basic_block.into_iter() {
if let Statement::Definition(value_id) = statement {
let definition = definitions.get_mut(&value_id).unwrap();
definition.block = block;
definition.index = basic_block.push(statement);
} else {
basic_block.push(statement);
}
}
let targets = cfg
.edges_directed(successor, Direction::Outgoing)
.map(|edge| edge.target())
.collect_vec();
for target in targets {
cfg.add_edge(block, target);
}
cfg.remove_node(successor);
}
}
}
fn remove_unused_values(
definitions: &mut HashMap<ValueId, StatementLocation>,
values: &mut ValueStorage,
functions: &mut FunctionMap,
cfg: &mut ControlFlowGraph,
program_result: &mut ValueId,
) {
for (_, value_id) in values.iter() {
if !definitions.contains_key(&value_id) {
if let Value::Function(fn_id, _) = &values[value_id] {
functions.remove(fn_id);
}
}
}
let mut remap = HashMap::new();
let mut new_definitions = HashMap::new();
values.retain(
|value_id| definitions.contains_key(&value_id),
|from, to| {
remap.insert(from, to);
new_definitions.insert(to, definitions[&from]);
},
);
for (value, _) in values.iter_mut() {
for operand in get_value_operands_mut(value) {
*operand = remap[operand];
}
}
let blocks = cfg.node_indices().collect_vec();
for block in blocks {
let basic_block = &mut cfg[block];
for statement in basic_block.iter_mut() {
for operand in get_statement_operands_mut(statement) {
*operand = remap[operand];
}
}
}
*definitions = new_definitions;
*program_result = remap[program_result];
}
fn remove_statements(locations: Vec<StatementLocation>, cfg: &mut ControlFlowGraph) {
for location in locations {
cfg[location.block].remove(location.index);
}
}
| 33.428571 | 92 | 0.609816 |
4b99489d05bb5e008fcd219456f2ea7281b54281
| 58 |
extern crate lettre;
pub mod subcommand;
pub mod config;
| 11.6 | 20 | 0.775862 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.