hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
01626ad84a4efb50afec21a8bfb053673f4b31e4
| 350 |
//! # Ua-service
//!
//! Business logic implementation.
//! Directly provides functionality of accessing to customer specified database.
pub mod dao;
pub mod errors;
pub mod interface;
pub mod provider;
pub mod repository;
pub mod util;
pub use dao::{Dao, DaoMY, DaoOptions, DaoPG};
pub use errors::DaoError;
pub use util::{JsonType, QueryResult};
| 21.875 | 80 | 0.742857 |
fed8cd9eec04d68d3bab1bfaa4986b2f151e6e6e
| 704 |
// Copyright Jeron A. Lau 2017-2018.
// Dual-licensed under either the MIT License or the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// https://www.boost.org/LICENSE_1_0.txt)
extern crate awi;
extern crate aci_png;
use awi::Window;
pub fn main() -> () {
let mut window = Window::new(
"awi example",
&aci_png::decode(include_bytes!("../res/icon.png")).unwrap(),
None
);
'mainloop: loop {
while let Some(input) = window.input() {
use awi::Input::*;
use awi::Msg::*;
match input {
Msg(Quit) | Msg(Back) => break 'mainloop,
// Input::Redraw => redraw(&mut context),
a => println!("{}", a),
}
}
window.update();
}
}
| 21.333333 | 76 | 0.627841 |
1c1308bc87c5c31b02a35c64cccf57f3adb88d55
| 16,291 |
use bitvec::prelude as bv;
use bitvec::prelude::{BitField, AsBits};
use bytes::Bytes;
use enumflags2::BitFlags;
#[derive(BitFlags, Copy, Clone, Debug, PartialEq)]
#[repr(u32)]
pub enum ServiceClass {
Positioning = 1 << 16,
Networking = 1 << 17,
Rendering = 1 << 18,
Capturing = 1 << 19,
ObjectTransfer = 1 << 20,
Audio = 1 << 21,
Telephony = 1 << 22,
Information = 1 << 23,
}
pub type ServiceClasses = BitFlags<ServiceClass>;
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum DeviceClass {
Computer(ComputerDeviceClass),
Phone(PhoneDeviceClass),
/// The parameter is the amount of utilisation the access point currently has, expressed as a
/// fraction.
AccessPoint(f64),
AudioVideo(AudioVideoDeviceClass),
Peripheral {
keyboard: bool,
pointer: bool,
class: PeripheralDeviceClass,
},
Imaging {
display: bool,
camera: bool,
scanner: bool,
printer: bool,
},
Wearable(WearableDeviceClass),
Toy(ToyDeviceClass),
Health(HealthDeviceClass),
Uncategorized,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum ComputerDeviceClass {
Uncategorized,
Desktop,
Server,
Laptop,
HandheldPDA,
PalmPDA,
Wearable,
Tablet,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum PhoneDeviceClass {
Uncategorized,
Cellular,
Cordless,
Smartphone,
Modem,
ISDN,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum AudioVideoDeviceClass {
Headset,
HandsFree,
Microphone,
Loudspeaker,
Headphones,
Portable,
Car,
SetTop,
HiFi,
VCR,
VideoCamera,
Camcorder,
VideoMonitor,
VideoDisplayLoudspeaker,
VideoConferencing,
Gaming,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum PeripheralDeviceClass {
Uncategorized,
Joystick,
Gamepad,
Remote,
Sensor,
Digitizer,
CardReader,
Pen,
Scanner,
Wand,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum WearableDeviceClass {
Wristwatch,
Pager,
Jacket,
Helmet,
Glasses,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum ToyDeviceClass {
Robot,
Vehicle,
Doll,
Controller,
Game,
Unknown,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum HealthDeviceClass {
BloodPressureMeter,
Thermometer,
WeightScale,
GlucoseMeter,
PulseOximeter,
HeartRateMonitor,
HealthDataDisplay,
StepCounter,
BodyCompositionAnalyzer,
PeakFlowMonitor,
MedicationMonitor,
KneeProsthesis,
AnkleProsthesis,
GenericHealthManager,
PersonalMobilityDevice,
Unknown,
}
pub fn from_bytes(class: Bytes) -> (DeviceClass, ServiceClasses) {
let bits = class[0] as u32 | ((class[1] as u32) << 8) | ((class[2] as u32) << 16);
from_u32(bits)
}
pub fn from_array(class: [u8; 3]) -> (DeviceClass, ServiceClasses) {
let bits = class[0] as u32 | ((class[1] as u32) << 8) | ((class[2] as u32) << 16);
from_u32(bits)
}
pub fn from_u32(class: u32) -> (DeviceClass, ServiceClasses) {
let service_classes = ServiceClasses::from_bits_truncate(class);
let class_bits = class.bits::<bv::Lsb0>();
let device_class: DeviceClass;
// major device class encoded in bits 8-12
device_class = match class_bits[8..13].load::<u8>() {
// minor device class in bits 2-7
0b00001 => DeviceClass::Computer(match class_bits[2..8].load::<u8>() {
0b000000 => ComputerDeviceClass::Uncategorized,
0b000001 => ComputerDeviceClass::Desktop,
0b000010 => ComputerDeviceClass::Server,
0b000011 => ComputerDeviceClass::Laptop,
0b000100 => ComputerDeviceClass::HandheldPDA,
0b000101 => ComputerDeviceClass::PalmPDA,
0b000110 => ComputerDeviceClass::Wearable,
0b000111 => ComputerDeviceClass::Tablet,
_ => ComputerDeviceClass::Unknown,
}),
0b00010 => DeviceClass::Phone(match class_bits[2..8].load::<u8>() {
0b000000 => PhoneDeviceClass::Uncategorized,
0b000001 => PhoneDeviceClass::Cellular,
0b000010 => PhoneDeviceClass::Cordless,
0b000011 => PhoneDeviceClass::Smartphone,
0b000100 => PhoneDeviceClass::Modem,
0b000101 => PhoneDeviceClass::ISDN,
_ => PhoneDeviceClass::Unknown,
}),
0b00011 => DeviceClass::AccessPoint(0.),
0b00100 => DeviceClass::AudioVideo(match class_bits[2..8].load::<u8>() {
0b000001 => AudioVideoDeviceClass::Headset,
0b000010 => AudioVideoDeviceClass::HandsFree,
0b000011 => AudioVideoDeviceClass::Unknown,
0b000100 => AudioVideoDeviceClass::Microphone,
0b000101 => AudioVideoDeviceClass::Loudspeaker,
0b000110 => AudioVideoDeviceClass::Headphones,
0b000111 => AudioVideoDeviceClass::Portable,
0b001000 => AudioVideoDeviceClass::Car,
0b001001 => AudioVideoDeviceClass::SetTop,
0b001010 => AudioVideoDeviceClass::HiFi,
0b001011 => AudioVideoDeviceClass::VCR,
0b001100 => AudioVideoDeviceClass::VideoCamera,
0b001101 => AudioVideoDeviceClass::Camcorder,
0b001110 => AudioVideoDeviceClass::VideoMonitor,
0b001111 => AudioVideoDeviceClass::VideoDisplayLoudspeaker,
0b010000 => AudioVideoDeviceClass::VideoConferencing,
0b010001 => AudioVideoDeviceClass::Unknown,
0b010010 => AudioVideoDeviceClass::Gaming,
_ => AudioVideoDeviceClass::Unknown,
}),
0b00101 => DeviceClass::Peripheral {
keyboard: class_bits[6],
pointer: class_bits[7],
class: match class_bits[2..6].load::<u8>() {
0b0000 => PeripheralDeviceClass::Uncategorized,
0b0001 => PeripheralDeviceClass::Joystick,
0b0010 => PeripheralDeviceClass::Gamepad,
0b0011 => PeripheralDeviceClass::Remote,
0b0100 => PeripheralDeviceClass::Sensor,
0b0101 => PeripheralDeviceClass::Digitizer,
0b0110 => PeripheralDeviceClass::CardReader,
0b0111 => PeripheralDeviceClass::Pen,
0b1000 => PeripheralDeviceClass::Scanner,
0b1001 => PeripheralDeviceClass::Wand,
_ => PeripheralDeviceClass::Unknown,
},
},
0b00110 => DeviceClass::Imaging {
display: class_bits[4],
camera: class_bits[5],
scanner: class_bits[6],
printer: class_bits[7],
},
0b00111 => DeviceClass::Wearable(match class_bits[2..8].load::<u8>() {
0b0001 => WearableDeviceClass::Wristwatch,
0b0010 => WearableDeviceClass::Pager,
0b0011 => WearableDeviceClass::Jacket,
0b0100 => WearableDeviceClass::Helmet,
0b0101 => WearableDeviceClass::Glasses,
_ => WearableDeviceClass::Unknown,
}),
0b01000 => DeviceClass::Toy(match class_bits[2..8].load::<u8>() {
0b0001 => ToyDeviceClass::Robot,
0b0010 => ToyDeviceClass::Vehicle,
0b0011 => ToyDeviceClass::Doll,
0b0100 => ToyDeviceClass::Controller,
0b0101 => ToyDeviceClass::Game,
_ => ToyDeviceClass::Unknown,
}),
0b01001 => DeviceClass::Health(match class_bits[2..8].load::<u8>() {
0b000001 => HealthDeviceClass::BloodPressureMeter,
0b000010 => HealthDeviceClass::Thermometer,
0b000011 => HealthDeviceClass::WeightScale,
0b000100 => HealthDeviceClass::GlucoseMeter,
0b000101 => HealthDeviceClass::PulseOximeter,
0b000110 => HealthDeviceClass::HeartRateMonitor,
0b000111 => HealthDeviceClass::HealthDataDisplay,
0b001000 => HealthDeviceClass::StepCounter,
0b001001 => HealthDeviceClass::BodyCompositionAnalyzer,
0b001010 => HealthDeviceClass::PeakFlowMonitor,
0b001011 => HealthDeviceClass::MedicationMonitor,
0b001100 => HealthDeviceClass::KneeProsthesis,
0b001101 => HealthDeviceClass::AnkleProsthesis,
0b001110 => HealthDeviceClass::GenericHealthManager,
0b001111 => HealthDeviceClass::PersonalMobilityDevice,
_ => HealthDeviceClass::Unknown,
}),
0b11111 => DeviceClass::Uncategorized,
_ => DeviceClass::Unknown,
};
(device_class, service_classes)
}
impl Into<u16> for DeviceClass {
fn into(self) -> u16 {
let mut bits = 0u16;
match self {
DeviceClass::Computer(minor) => {
bits |= 0b00001 << 8;
match minor {
ComputerDeviceClass::Desktop => bits |= 0b000001 << 2,
ComputerDeviceClass::Server => bits |= 0b000010 << 2,
ComputerDeviceClass::Laptop => bits |= 0b000011 << 2,
ComputerDeviceClass::HandheldPDA => bits |= 0b000100 << 2,
ComputerDeviceClass::PalmPDA => bits |= 0b000101 << 2,
ComputerDeviceClass::Wearable => bits |= 0b000110 << 2,
ComputerDeviceClass::Tablet => bits |= 0b000111 << 2,
_ => (),
}
}
DeviceClass::Phone(minor) => {
bits |= 0b00010 << 8;
match minor {
PhoneDeviceClass::Cellular => bits |= 0b000001 << 2,
PhoneDeviceClass::Cordless => bits |= 0b000010 << 2,
PhoneDeviceClass::Smartphone => bits |= 0b000011 << 2,
PhoneDeviceClass::Modem => bits |= 0b000100 << 2,
PhoneDeviceClass::ISDN => bits |= 0b000101 << 2,
_ => (),
}
}
DeviceClass::AccessPoint(..) => {
// bits |= 0b00011 << 8;
unimplemented!()
}
DeviceClass::AudioVideo(minor) => {
bits |= 0b00100 << 8;
match minor {
AudioVideoDeviceClass::Headset => bits |= 0b000001 << 2,
AudioVideoDeviceClass::HandsFree => bits |= 0b000010 << 2,
// 000011 is reserved
AudioVideoDeviceClass::Microphone => bits |= 0b000100 << 2,
AudioVideoDeviceClass::Loudspeaker => bits |= 0b000101 << 2,
AudioVideoDeviceClass::Headphones => bits |= 0b000110 << 2,
AudioVideoDeviceClass::Portable => bits |= 0b000111 << 2,
AudioVideoDeviceClass::Car => bits |= 0b001000 << 2,
AudioVideoDeviceClass::SetTop => bits |= 0b001001 << 2,
AudioVideoDeviceClass::HiFi => bits |= 0b001010 << 2,
AudioVideoDeviceClass::VCR => bits |= 0b001011 << 2,
AudioVideoDeviceClass::VideoCamera => bits |= 0b001100 << 2,
AudioVideoDeviceClass::Camcorder => bits |= 0b001101 << 2,
AudioVideoDeviceClass::VideoMonitor => bits |= 0b001110 << 2,
AudioVideoDeviceClass::VideoDisplayLoudspeaker => bits |= 0b001111 << 2,
AudioVideoDeviceClass::VideoConferencing => bits |= 0b010000 << 2,
// 010001 is reserved
AudioVideoDeviceClass::Gaming => bits |= 0b010010 << 2,
_ => (),
}
}
DeviceClass::Peripheral {
keyboard,
pointer,
class,
} => {
bits |= 0b00101 << 8;
if keyboard {
bits |= 1 << 6
}
if pointer {
bits |= 1 << 7
}
match class {
PeripheralDeviceClass::Joystick => bits |= 0b0001 << 2,
PeripheralDeviceClass::Gamepad => bits |= 0b0010 << 2,
PeripheralDeviceClass::Remote => bits |= 0b0011 << 2,
PeripheralDeviceClass::Sensor => bits |= 0b0100 << 2,
PeripheralDeviceClass::Digitizer => bits |= 0b0101 << 2,
PeripheralDeviceClass::CardReader => bits |= 0b0110 << 2,
PeripheralDeviceClass::Pen => bits |= 0b0111 << 2,
PeripheralDeviceClass::Scanner => bits |= 0b1000 << 2,
PeripheralDeviceClass::Wand => bits |= 0b1001 << 2,
_ => (),
}
}
DeviceClass::Imaging {
display,
camera,
scanner,
printer,
} => {
bits |= 0b00110 << 8;
if display {
bits |= 1 << 4
}
if camera {
bits |= 1 << 5
}
if scanner {
bits |= 1 << 6
}
if printer {
bits |= 1 << 7
}
}
DeviceClass::Wearable(minor) => {
bits |= 0b00111 << 8;
match minor {
WearableDeviceClass::Wristwatch => bits |= 0b000001 << 2,
WearableDeviceClass::Pager => bits |= 0b000010 << 2,
WearableDeviceClass::Jacket => bits |= 0b000011 << 2,
WearableDeviceClass::Helmet => bits |= 0b000100 << 2,
WearableDeviceClass::Glasses => bits |= 0b000101 << 2,
_ => (),
}
}
DeviceClass::Toy(minor) => {
bits |= 0b01000 << 8;
match minor {
ToyDeviceClass::Robot => bits |= 0b000001 << 2,
ToyDeviceClass::Vehicle => bits |= 0b000010 << 2,
ToyDeviceClass::Doll => bits |= 0b000011 << 2,
ToyDeviceClass::Controller => bits |= 0b000100 << 2,
ToyDeviceClass::Game => bits |= 0b000101 << 2,
_ => (),
}
}
DeviceClass::Health(minor) => {
bits |= 0b01001 << 8;
match minor {
HealthDeviceClass::BloodPressureMeter => bits |= 0b000001 << 2,
HealthDeviceClass::Thermometer => bits |= 0b000010 << 2,
HealthDeviceClass::WeightScale => bits |= 0b000011 << 2,
HealthDeviceClass::GlucoseMeter => bits |= 0b000100 << 2,
HealthDeviceClass::PulseOximeter => bits |= 0b000101 << 2,
HealthDeviceClass::HeartRateMonitor => bits |= 0b000110 << 2,
HealthDeviceClass::HealthDataDisplay => bits |= 0b000111 << 2,
HealthDeviceClass::StepCounter => bits |= 0b001000 << 2,
HealthDeviceClass::BodyCompositionAnalyzer => bits |= 0b001001 << 2,
HealthDeviceClass::PeakFlowMonitor => bits |= 0b001010 << 2,
HealthDeviceClass::MedicationMonitor => bits |= 0b001011 << 2,
HealthDeviceClass::KneeProsthesis => bits |= 0b001100 << 2,
HealthDeviceClass::AnkleProsthesis => bits |= 0b001101 << 2,
HealthDeviceClass::GenericHealthManager => bits |= 0b001110 << 2,
HealthDeviceClass::PersonalMobilityDevice => bits |= 0b001111 << 2,
_ => (),
}
}
DeviceClass::Uncategorized => {
bits |= 0b11111 << 8;
}
DeviceClass::Unknown => (),
}
bits
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn class() {
let c = DeviceClass::Computer(ComputerDeviceClass::Laptop);
let b: u16 = c.into();
println!("{:000000000000b} (0x{:x})", b, b);
let (c1, _) = from_u32(b as u32);
assert_eq!(c, c1);
}
}
| 36.526906 | 97 | 0.538887 |
21e0e9316fa4be3a4c3d757e6cf91a9b864f9707
| 3,798 |
// Copyright 2021. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::time::Duration;
use tari_comms::peer_manager::NodeId;
use tari_core::transactions::tari_amount::MicroTari;
use tokio::sync::{broadcast, watch};
use crate::util::watch::Watch;
#[derive(Debug, Clone)]
pub enum UtxoScannerEvent {
ConnectingToBaseNode(NodeId),
ConnectedToBaseNode(NodeId, Duration),
ConnectionFailedToBaseNode {
peer: NodeId,
num_retries: usize,
retry_limit: usize,
error: String,
},
ScanningRoundFailed {
num_retries: usize,
retry_limit: usize,
error: String,
},
/// Progress of the recovery process (current_block, current_chain_height)
Progress {
current_height: u64,
tip_height: u64,
},
/// Completed Recovery (Number scanned, Num of Recovered outputs, Value of recovered outputs, Time taken)
Completed {
final_height: u64,
num_recovered: u64,
value_recovered: MicroTari,
time_taken: Duration,
},
/// Scanning process has failed and scanning process has exited
ScanningFailed,
}
#[derive(Clone)]
pub struct UtxoScannerHandle {
event_sender: broadcast::Sender<UtxoScannerEvent>,
one_sided_message_watch: Watch<String>,
recovery_message_watch: Watch<String>,
}
impl UtxoScannerHandle {
pub fn new(
event_sender: broadcast::Sender<UtxoScannerEvent>,
one_sided_message_watch: Watch<String>,
recovery_message_watch: Watch<String>,
) -> Self {
UtxoScannerHandle {
event_sender,
one_sided_message_watch,
recovery_message_watch,
}
}
pub fn get_event_receiver(&mut self) -> broadcast::Receiver<UtxoScannerEvent> {
self.event_sender.subscribe()
}
pub fn set_one_sided_payment_message(&mut self, note: String) {
self.one_sided_message_watch.send(note);
}
pub fn set_recovery_message(&mut self, note: String) {
self.recovery_message_watch.send(note);
}
pub(crate) fn get_one_sided_payment_message_watcher(&self) -> watch::Receiver<String> {
self.one_sided_message_watch.get_receiver()
}
pub(crate) fn get_recovery_message_watcher(&self) -> watch::Receiver<String> {
self.recovery_message_watch.get_receiver()
}
}
| 37.235294 | 118 | 0.718273 |
8a94a1fd3e9a985db8287e6c42a971dd985443af
| 2,281 |
use std::f32::consts;
use std::fmt::Debug;
use super::BxDFType;
use crate::geometry::{abs_cos_theta, same_hemisphere};
use crate::sampling::cosine_sample_hemisphere;
use crate::spectrum::Spectrum;
use crate::{Point2f, Vector3f};
pub trait BxDF: Debug {
/// Evaluate the BxDF for the given incoming and outgoing directions.
fn f(&self, wo: &Vector3f, wi: &Vector3f) -> Spectrum;
/// Sample the BxDF for the given outgoing direction, using the given pair of uniform samples.
///
/// The default implementation uses importance sampling by using a cosine-weighted
/// distribution.
fn sample_f(&self, wo: &Vector3f, u: Point2f) -> (Spectrum, Vector3f, f32, BxDFType) {
let mut wi = cosine_sample_hemisphere(u);
if wo.z < 0.0 {
wi.z *= -1.0;
}
let pdf = self.pdf(wo, &wi);
(self.f(wo, &wi), wi, pdf, BxDFType::empty())
}
// TODO implement rho functions
// fn rho(&self, wo: &Vector3f, n_samples: u32) -> (Point2f, Spectrum);
// fn rho_hh(&self, n_samples: u32) -> (Point2f, Point2f, Spectrum);
fn matches(&self, flags: BxDFType) -> bool {
self.get_type() & flags == self.get_type()
}
fn get_type(&self) -> BxDFType;
/// Evaluate the PDF for the given outgoing and incoming directions.
///
/// Note: this method needs to be consistent with ```BxDF::sample_f()```.
fn pdf(&self, wo: &Vector3f, wi: &Vector3f) -> f32 {
if same_hemisphere(wo, wi) {
abs_cos_theta(wi) * consts::FRAC_1_PI
} else {
0.0
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct ScaledBxDF<'a> {
bxdf: &'a dyn BxDF,
scale: Spectrum,
}
impl<'a> ScaledBxDF<'a> {
pub fn new(bxdf: &'a dyn BxDF, scale: Spectrum) -> ScaledBxDF<'a> {
ScaledBxDF { bxdf, scale }
}
}
impl<'a> BxDF for ScaledBxDF<'a> {
fn f(&self, wo: &Vector3f, wi: &Vector3f) -> Spectrum {
self.bxdf.f(wo, wi) * self.scale
}
fn sample_f(&self, wo: &Vector3f, sample: Point2f) -> (Spectrum, Vector3f, f32, BxDFType) {
let (spectrum, wi, pdf, bxdftype) = self.bxdf.sample_f(wo, sample);
(spectrum * self.scale, wi, pdf, bxdftype)
}
fn get_type(&self) -> BxDFType {
self.bxdf.get_type()
}
}
| 31.680556 | 98 | 0.604559 |
72064abb33f9eb6412ab97907d09c62c7a7630d1
| 11,487 |
use alloc::string::String;
use alloc::vec::Vec;
const MAX_FILE_NAME_LENGTH: usize = 255;
pub enum STATE {
Start,
FName,
DirCur,
DirParent,
}
#[derive(Clone, Copy)]
pub enum PathFormatError {
NotAbs,
NotRel,
EmptyFileName,
FileNameTooLong,
InvalidCharInFileName,
InvalidCharInFileExt,
EmptyPath,
ReferingRootParent,
Unknown,
}
pub fn to_string(error: PathFormatError) -> &'static str {
match error {
PathFormatError::NotAbs => "Path should start with '/'",
PathFormatError::NotRel => "Processing non-relative path with relative parser",
PathFormatError::EmptyFileName => "File name is empty",
PathFormatError::FileNameTooLong => "File name longer than 255 bytes is not allowed",
PathFormatError::InvalidCharInFileName => "Invalid char is found in file name",
PathFormatError::InvalidCharInFileExt => "Invalid char is found in file extension",
PathFormatError::EmptyPath => "Path is empty",
PathFormatError::ReferingRootParent => "Path invalid because is refering parent of root",
PathFormatError::Unknown => "unknown error",
// _ => "unknown error",
}
}
#[derive(Clone)]
pub struct Path {
pub path: Vec::<String>,
pub must_dir: bool,
pub is_abs: bool,
}
impl Path {
fn new() -> Path {
return Path {
path: Vec::<String>::new(),
must_dir: false,
is_abs: true,
};
}
pub fn purge(&mut self) -> Result<(), PathFormatError> {
let mut idx = 0;
while idx < self.path.len() {
if self.path[idx].eq("..") {
if idx == 0 && self.is_abs {
return Err(PathFormatError::ReferingRootParent);
} else {
self.path.remove(idx);
self.path.remove(idx -1);
idx -= 1;
}
} else {
idx += 1;
}
}
return Ok(());
}
pub fn to_string(&self) -> String {
let mut res = String::new();
if !self.is_abs {
res.push('.');
}
for part in self.path.iter() {
res.push('/');
res.push_str(part.as_str());
}
res
}
pub fn pop(&mut self) -> Option<Path> {
if self.path.len() != 0 {
let vt = vec![self.path.pop().unwrap()];
let p = Path {
path: vt,
must_dir: self.must_dir,
is_abs: false,
};
self.must_dir = true;
return Some(p);
} else {
return None;
}
}
}
struct PathParser {
state: STATE,
buf: String,
path: Path,
result: Option<Result<Path, PathFormatError>>,
}
fn valid_fname_char(_c: char) -> bool {
return true;
}
impl PathParser {
fn new() -> PathParser {
return PathParser {
state: STATE::Start,
buf: String::with_capacity(MAX_FILE_NAME_LENGTH),
path: Path::new(),
result: None,
};
}
fn read(&mut self, c: char) -> Option<Result<Path, PathFormatError>> {
if let Some(result) = self.result.as_ref() {
return Some(result.clone());
}
match self.state {
STATE::Start => {
if c == '/' {
self.state = STATE::FName;
return None;
} else {
self.path.is_abs = false;
if c == '.' {
self.buf.push('.');
self.state = STATE::DirCur;
return None;
} else {
if valid_fname_char(c) {
if self.buf.len() < 255 {
self.buf.push(c);
self.state = STATE::FName;
return None;
} else {
self.result = Some(Err(PathFormatError::FileNameTooLong));
return Some(Err(PathFormatError::FileNameTooLong));
}
} else {
self.result = Some(Err(PathFormatError::InvalidCharInFileName));
return Some(Err(PathFormatError::InvalidCharInFileName));
}
}
}
},
STATE::FName => {
if c == '/' {
if self.buf.len() > 0 {
self.path.path.push(self.buf.clone());
self.buf = String::with_capacity(MAX_FILE_NAME_LENGTH);
return None;
} else {
self.result = Some(Err(PathFormatError::EmptyFileName));
return Some(Err(PathFormatError::EmptyFileName));
}
} else if c == '.' && self.buf.len() == 0 {
self.state = STATE::DirCur;
return None;
} else {
if valid_fname_char(c) {
if self.buf.len() < 255 {
self.buf.push(c);
return None;
} else {
self.result = Some(Err(PathFormatError::FileNameTooLong));
return Some(Err(PathFormatError::FileNameTooLong));
}
} else {
self.result = Some(Err(PathFormatError::InvalidCharInFileName));
return Some(Err(PathFormatError::InvalidCharInFileName));
}
}
},
STATE::DirCur => {
if c == '/' {
self.state = STATE::FName;
return None;
} else if c == '.' {
self.state = STATE::DirParent;
return None;
} else if valid_fname_char(c) {
self.buf.push(c);
self.state = STATE::FName;
return None;
} else {
self.result = Some(Err(PathFormatError::InvalidCharInFileName));
return Some(Err(PathFormatError::InvalidCharInFileName));
}
},
STATE::DirParent => {
if c == '/' {
self.state = STATE::FName;
self.path.path.push(String::from(".."));
self.buf.pop();
return None;
} else if valid_fname_char(c) {
self.buf.push(c);
self.state = STATE::FName;
return None;
} else {
self.result = Some(Err(PathFormatError::InvalidCharInFileName));
return Some(Err(PathFormatError::InvalidCharInFileName));
}
}
}
}
fn finish(mut self) -> Result<Path, PathFormatError> {
if let Some(error) = self.result {
return error;
}
match self.state {
STATE::Start => {
return Err(PathFormatError::EmptyPath);
},
STATE::FName => {
if self.buf.len() == 0 {
self.path.must_dir = true;
return Ok(self.path);
} else {
self.path.path.push(self.buf);
return Ok(self.path);
}
},
STATE::DirCur => {
self.path.must_dir = true;
return Ok(self.path);
},
STATE::DirParent => {
self.path.path.push(String::from(".."));
self.path.must_dir = true;
return Ok(self.path);
}
}
}
}
pub fn parse_path(path: &str) -> Result<Path, PathFormatError> {
// debug!("parse_path: path {}", path);
let mut parser = PathParser::new();
let chars = path.chars();
for c in chars {
if c == 0 as char {
break;
}
if let Some(error) = parser.read(c) {
return error;
}
}
return parser.finish();
}
| 43.34717 | 122 | 0.323235 |
01a9dd42552b2bee4b7dc06aaccfaf49c122705b
| 4,505 |
// Copyright 2018-2022 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Builder for constructing new service orchestrators.
use crate::error::InvalidStateError;
use crate::transport::Connection;
use super::runnable::RunnableServiceOrchestrator;
use super::OrchestratableServiceFactory;
const DEFAULT_INCOMING_CAPACITY: usize = 512;
const DEFAULT_OUTGOING_CAPACITY: usize = 512;
const DEFAULT_CHANNEL_CAPACITY: usize = 512;
/// Builds new [RunnableServiceOrchestrator] instances.
#[derive(Default)]
pub struct ServiceOrchestratorBuilder {
connection: Option<Box<dyn Connection>>,
incoming_capacity: Option<usize>,
outgoing_capacity: Option<usize>,
channel_capacity: Option<usize>,
service_factories: Vec<Box<dyn OrchestratableServiceFactory>>,
}
impl ServiceOrchestratorBuilder {
/// Constructs a new builder.
pub fn new() -> Self {
Self::default()
}
/// Sets the connection for receiving service messages by the resulting ServiceOrchestrator.
///
/// This field is required to construct the final ServiceOrchestrator.
pub fn with_connection(mut self, connection: Box<dyn Connection>) -> Self {
self.connection = Some(connection);
self
}
/// Sets the incoming message capacity.
///
/// This limits the amount of messages that may be buffered by the service orchestrator before
/// blocking new messages.
pub fn with_incoming_capacity(mut self, incoming_capacity: usize) -> Self {
self.incoming_capacity = Some(incoming_capacity);
self
}
/// Sets the outgoing message capacity.
///
/// This limits the amount of messages that may be buffered by the service orchestrator when being
/// sent to external connections.
pub fn with_outgoing_capacity(mut self, outgoing_capacity: usize) -> Self {
self.outgoing_capacity = Some(outgoing_capacity);
self
}
/// Sets the internal channel capacity.
///
/// This limits the number of messages that may be buffered when passed between the internal
/// threads.
pub fn with_channel_capacity(mut self, channel_capacity: usize) -> Self {
self.channel_capacity = Some(channel_capacity);
self
}
/// Adds a service factory which will be used to create service instances.
///
/// This function may be called more than once to add additional service factories.
pub fn with_service_factory(
mut self,
service_factory: Box<dyn OrchestratableServiceFactory>,
) -> Self {
self.service_factories.push(service_factory);
self
}
/// Construct the RunnableServiceOrchestrator.
///
/// # Errors
///
/// Returns an InvalidStateError, if any required fields are missing.
pub fn build(self) -> Result<RunnableServiceOrchestrator, InvalidStateError> {
let connection = self.connection.ok_or_else(|| {
InvalidStateError::with_message("A service orchestrator requires a connection".into())
})?;
let incoming_capacity = self.incoming_capacity.unwrap_or(DEFAULT_INCOMING_CAPACITY);
let outgoing_capacity = self.outgoing_capacity.unwrap_or(DEFAULT_OUTGOING_CAPACITY);
let channel_capacity = self.channel_capacity.unwrap_or(DEFAULT_CHANNEL_CAPACITY);
let supported_service_types_vec = self
.service_factories
.iter()
.map(|factory| factory.available_service_types().to_vec())
.collect::<Vec<Vec<String>>>();
let mut supported_service_types = vec![];
for mut service_types in supported_service_types_vec {
supported_service_types.append(&mut service_types);
}
Ok(RunnableServiceOrchestrator {
connection,
service_factories: self.service_factories,
supported_service_types,
incoming_capacity,
outgoing_capacity,
channel_capacity,
})
}
}
| 36.04 | 102 | 0.695228 |
e6c913fa3f242e7858089432b257f7211f60c343
| 13,772 |
//! Tests for client.
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
extern crate bytes;
extern crate futures;
extern crate httpbis;
extern crate log;
extern crate regex;
extern crate httpbis_test;
use httpbis_test::*;
use bytes::Bytes;
use futures::channel::oneshot;
use futures::stream::StreamExt;
use futures::future;
use futures::future::TryFutureExt;
use httpbis::for_test::solicit::DEFAULT_SETTINGS;
use httpbis::for_test::*;
use httpbis::ErrorCode;
use httpbis::*;
use std::task::Poll;
use tokio::runtime::Runtime;
#[test]
fn stream_count() {
init_logger();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
assert_eq!(0, client.conn_state().streams.len());
let req = client
.start_post("/foobar", "localhost", Bytes::from(&b"xxyy"[..]))
.collect();
let headers = server_tester.recv_frame_headers_check(1, false);
assert_eq!("POST", headers.get(":method"));
assert_eq!("/foobar", headers.get(":path"));
let data = server_tester.recv_frame_data_check(1, true);
assert_eq!(b"xxyy", &data[..]);
let mut resp_headers = Headers::new();
resp_headers.add(":status", "200");
server_tester.send_headers(1, resp_headers, false);
server_tester.send_data(1, b"aabb", true);
let mut rt = Runtime::new().unwrap();
let message = rt.block_on(req).expect("r");
assert_eq!((b"aabb"[..]).to_owned(), message.body.get_bytes());
let state: ConnStateSnapshot = client.conn_state();
assert_eq!(0, state.streams.len(), "{:?}", state);
}
#[test]
fn rst_is_error() {
init_logger();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
let req = client.start_get("/fgfg", "localhost").collect();
let get = server_tester.recv_message(1);
assert_eq!("GET", get.headers.method());
server_tester.send_headers(1, Headers::ok_200(), false);
server_tester.send_rst(1, ErrorCode::InadequateSecurity);
let mut rt = Runtime::new().unwrap();
match rt.block_on(req) {
Ok(..) => panic!("expected error"),
Err(Error::RstStreamReceived(ErrorCode::InadequateSecurity)) => {}
Err(e) => panic!("wrong error: {:?}", e),
}
let state: ConnStateSnapshot = client.conn_state();
assert_eq!(0, state.streams.len(), "{:?}", state);
}
#[test]
fn handle_1xx_headers() {
init_logger();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
let req = client.start_get("/fgfg", "localhost").collect();
let get = server_tester.recv_message(1);
assert_eq!("GET", get.headers.method());
server_tester.send_headers(1, Headers::new_status(100), false);
server_tester.send_headers(1, Headers::new_status(100), false);
server_tester.send_headers(1, Headers::ok_200(), false);
server_tester.send_data(1, b"hello", true);
let mut rt = Runtime::new().unwrap();
rt.block_on(req).expect("Should be OK");
let state: ConnStateSnapshot = client.conn_state();
assert_eq!(0, state.streams.len(), "{:?}", state);
}
#[test]
fn client_call_dropped() {
init_logger();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
{
let req = client.start_get("/fgfg", "localhost");
server_tester.recv_message(1);
drop(req);
server_tester.send_headers(1, Headers::ok_200(), true);
}
let mut rt = Runtime::new().unwrap();
{
let req = client.start_get("/fgfg", "localhost").collect();
server_tester.recv_message(3);
server_tester.send_headers(3, Headers::ok_200(), true);
let resp = rt.block_on(req).expect("OK");
assert_eq!(200, resp.headers.status());
}
let state: ConnStateSnapshot = client.conn_state();
assert_eq!(0, state.streams.len(), "{:?}", state);
}
#[test]
fn reconnect_on_disconnect() {
init_logger();
let (server, client) = HttpServerTester::new_with_client();
let mut server_tester = server.accept_xchg();
let mut rt = Runtime::new().unwrap();
{
let req = client.start_get("/111", "localhost").collect();
server_tester.recv_message(1);
server_tester.send_headers(1, Headers::ok_200(), true);
let resp = rt.block_on(req).expect("OK");
assert_eq!(200, resp.headers.status());
}
// drop server connection
drop(server_tester);
// waiting for client connection to die
while let Ok(_) = rt.block_on(client.dump_state()) {
thread::sleep(Duration::from_millis(1));
}
{
let req = client.start_get("/222", "localhost").collect();
let mut server_tester = server.accept();
server_tester.recv_preface();
server_tester.settings_xchg_but_ack();
server_tester.recv_message(1);
server_tester.send_headers(1, Headers::ok_200(), true);
let resp = rt.block_on(req).expect("OK");
assert_eq!(200, resp.headers.status());
}
}
#[test]
fn reconnect_on_goaway() {
init_logger();
let (server, client) = HttpServerTester::new_with_client();
let mut rt = Runtime::new().unwrap();
{
let mut server_tester = server.accept_xchg();
let req = client.start_get("/111", "localhost").collect();
server_tester.recv_message(1);
server_tester.send_headers(1, Headers::ok_200(), true);
let resp = rt.block_on(req).expect("OK");
assert_eq!(200, resp.headers.status());
server_tester.send_goaway(1);
server_tester.recv_eof();
}
{
let connect = client.wait_for_connect();
let mut server_tester = server.accept_xchg();
rt.block_on(connect).expect("connect");
let req = client.start_get("/111", "localhost").collect();
server_tester.recv_message(1);
server_tester.send_headers(1, Headers::ok_200(), true);
let resp = rt.block_on(req).expect("OK");
assert_eq!(200, resp.headers.status());
}
}
#[test]
pub fn issue_89() {
init_logger();
let mut rt = Runtime::new().unwrap();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
let r1 = client.start_get("/r1", "localhost");
server_tester.recv_frame_headers_check(1, true);
server_tester.send_headers(1, Headers::ok_200(), false);
let (_, resp1) = rt.block_on(r1.0).unwrap();
let mut resp1 = resp1.filter_data();
assert_eq!(
server_tester.out_window_size.size(),
client.conn_state().in_window_size
);
let w = DEFAULT_SETTINGS.initial_window_size;
assert_eq!(w as i32, client.conn_state().in_window_size);
server_tester.send_data(1, &[17, 19], false);
assert_eq!(2, rt.block_on(resp1.next()).unwrap().unwrap().len());
// client does not send WINDOW_UPDATE on such small changes
assert_eq!((w - 2) as i32, client.conn_state().in_window_size);
let _r3 = client.start_get("/r3", "localhost");
// This is the cause of issue #89
assert_eq!(w as i32, client.stream_state(3).in_window_size);
// Cannot reliably check that stream actually resets
}
#[test]
fn external_event_loop() {
init_logger();
let mut rt = Runtime::new().unwrap();
let server = ServerTest::new();
let port = server.port;
let (tx, rx) = mpsc::channel();
let (shutdown_tx, shutdown_rx) = oneshot::channel();
let t = thread::spawn(move || {
let mut core = Runtime::new().expect("Core::new");
let mut clients = Vec::new();
for _ in 0..2 {
let mut client = ClientBuilder::new_plain();
client.set_addr((BIND_HOST, port)).expect("set_addr");
client.event_loop = Some(core.handle().clone());
clients.push(client.build().expect("client"));
}
tx.send(clients).expect("send clients");
core.block_on(shutdown_rx.map_err(|_| panic!("aaa")))
.expect("run");
});
for client in rx.recv().expect("rx") {
let get = client.start_get("/echo", "localhost");
assert_eq!(
200,
rt.block_on(get.collect()).expect("get").headers.status()
);
}
shutdown_tx.send(()).expect("send");
t.join().expect("join");
}
#[test]
pub fn sink_poll() {
init_logger();
let mut rt = Runtime::new().unwrap();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
let (mut sender, _response) = rt
.block_on(client.start_post_sink("/foo", "sink"))
.expect("start_post_sink");
server_tester.recv_frame_headers_check(1, false);
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(65535, client.conn_state().out_window_size);
assert_eq!(65535, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(65535, client.stream_state(1).pump_out_window_size);
assert_eq!(
Poll::Ready(Ok(())),
sender.poll(&mut NopRuntime::new().context())
);
let b = Bytes::from(vec![1; 65_535]);
sender.send_data(b.clone()).expect("send_data");
assert_eq!(
b,
Bytes::from(server_tester.recv_frames_data_check(1, 16_384, 65_535, false))
);
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(0, client.conn_state().out_window_size);
assert_eq!(0, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(0, client.stream_state(1).out_window_size);
assert_eq!(0, client.stream_state(1).pump_out_window_size);
let mut rt = Runtime::new().unwrap();
let sender = rt.block_on(future::lazy(move |cx| {
assert_eq!(Poll::Pending, sender.poll(cx));
future::ok::<_, ()>(sender)
}));
let mut sender = rt.block_on(sender).unwrap();
server_tester.send_window_update_conn(3);
server_tester.send_window_update_stream(1, 5);
rt.block_on(future::poll_fn(|cx| sender.poll(cx))).unwrap();
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(3, client.conn_state().out_window_size);
assert_eq!(3, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(5, client.stream_state(1).out_window_size);
assert_eq!(5, client.stream_state(1).pump_out_window_size);
let b = Bytes::from(vec![11, 22]);
sender.send_data(b.clone()).expect("send_data");
assert_eq!(
b,
Bytes::from(server_tester.recv_frame_data_check(1, false))
);
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(1, client.conn_state().out_window_size);
assert_eq!(1, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(3, client.stream_state(1).out_window_size);
assert_eq!(3, client.stream_state(1).pump_out_window_size);
rt.block_on(future::poll_fn(|cx| sender.poll(cx))).unwrap();
let b = Bytes::from(vec![33, 44]);
sender.send_data(b.clone()).expect("send_data");
assert_eq!(
b.slice(0..1),
Bytes::from(server_tester.recv_frame_data_check(1, false))
);
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(0, client.conn_state().out_window_size);
assert_eq!(-1, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(2, client.stream_state(1).out_window_size);
assert_eq!(1, client.stream_state(1).pump_out_window_size);
}
#[test]
fn sink_reset_by_peer() {
init_logger();
let mut rt = Runtime::new().unwrap();
let (mut server_tester, client) = HttpConnTester::new_server_with_client_xchg();
let (mut sender, _response) = rt
.block_on(client.start_post_sink("/foo", "sink"))
.expect("start_post_sink");
server_tester.recv_frame_headers_check(1, false);
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(65535, client.conn_state().out_window_size);
assert_eq!(65535, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(65535, client.stream_state(1).out_window_size);
assert_eq!(65535, client.stream_state(1).pump_out_window_size);
assert_eq!(
Poll::Ready(Ok(())),
sender.poll(&mut NopRuntime::new().context())
);
let b = Bytes::from(vec![1; 65_535 * 2]);
sender.send_data(b.clone()).expect("send_data");
assert_eq!(
b.slice(0..65_535),
Bytes::from(server_tester.recv_frames_data_check(1, 16_384, 65_535, false))
);
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(0, client.conn_state().out_window_size);
assert_eq!(-65535, client.conn_state().pump_out_window_size);
assert_eq!(65535, client.stream_state(1).in_window_size);
assert_eq!(0, client.stream_state(1).out_window_size);
assert_eq!(-65535, client.stream_state(1).pump_out_window_size);
server_tester.send_rst(1, ErrorCode::Cancel);
while client.conn_state().streams.len() != 0 {
// spin-wait
}
// pump out window must be reset to out window
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(0, client.conn_state().out_window_size);
assert_eq!(0, client.conn_state().pump_out_window_size);
// check that if more data is sent, pump_out_window_size is not exhausted
let b = Bytes::from(vec![1; 100_000]);
sender.send_data(b.clone()).expect("send_data");
assert_eq!(65535, client.conn_state().in_window_size);
assert_eq!(0, client.conn_state().out_window_size);
assert_eq!(0, client.conn_state().pump_out_window_size);
}
| 30.004357 | 84 | 0.655315 |
7293cae75ff7b66021a9bf9c3a13877d7eba3731
| 33,213 |
// Copyright (C) 2017 Christopher R. Field.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate assert_fs;
#[macro_use]
extern crate lazy_static;
extern crate predicates;
extern crate toml;
extern crate wix;
mod common;
use assert_fs::prelude::*;
use predicates::prelude::*;
use crate::common::init_logging;
use crate::common::{MISC_NAME, NO_CAPTURE_VAR_NAME, PACKAGE_NAME, TARGET_NAME};
use std::env;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use toml::Value;
use wix::create::Builder;
use wix::initialize;
use wix::{Result, CARGO_MANIFEST_FILE, WIX};
lazy_static! {
static ref TARGET_WIX_DIR: PathBuf = {
let mut p = PathBuf::from(TARGET_NAME);
p.push(WIX);
p
};
}
/// Run the _create_ subcommand with the output capture toggled by the
/// `CARGO_WIX_TEST_NO_CAPTURE` environment variable.
fn run(b: &mut Builder) -> Result<()> {
b.capture_output(env::var(NO_CAPTURE_VAR_NAME).is_err())
.build()
.run()
}
#[test]
fn default_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn debug_build_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().debug_build(true));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn debug_name_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64-debug.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().debug_name(true));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn metadata_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package_metadata();
let expected_msi_file = TARGET_WIX_DIR.join("Metadata-2.1.0-x86_64.msi");
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn output_trailing_forwardslash_works() {
init_logging();
let output_dir = PathBuf::from(TARGET_NAME).join("output_dir");
let output_dir_str = format!("{}/", output_dir.to_str().unwrap());
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = output_dir.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().output(Some(output_dir_str.as_str())));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn output_trailing_backslash_works() {
init_logging();
let output_dir = PathBuf::from(TARGET_NAME).join("output_dir");
let output_dir_str = format!("{}\\", output_dir.to_str().unwrap());
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = output_dir.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().output(Some(output_dir_str.as_str())));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn output_existing_dir_works() {
init_logging();
let output_dir = PathBuf::from("output_dir");
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = output_dir.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
fs::create_dir(&output_dir).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().output(output_dir.to_str()));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn output_file_without_extension_works() {
init_logging();
let output_dir = PathBuf::from(TARGET_NAME).join("output_dir");
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let output_file = output_dir.join(PACKAGE_NAME);
let expected_msi_file = output_dir.join(format!("{}.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().output(output_file.to_str()));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn output_file_with_extension_works() {
init_logging();
let output_dir = PathBuf::from(TARGET_NAME).join("output_dir");
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = output_dir.join(format!("{}.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Execution::default().run().unwrap();
let result = run(Builder::default().output(expected_msi_file.to_str()));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_package_section_fields_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let package_manifest = package.child("Cargo.toml");
let mut toml: Value = {
let mut cargo_toml_handle = File::open(package_manifest.path()).unwrap();
let mut cargo_toml_content = String::new();
cargo_toml_handle
.read_to_string(&mut cargo_toml_content)
.unwrap();
toml::from_str(&cargo_toml_content).unwrap()
};
{
toml.get_mut("package")
.and_then(|p| {
match p {
Value::Table(ref mut t) => {
t.insert(
String::from("description"),
Value::from("This is a description"),
);
t.insert(
String::from("documentation"),
Value::from("https://www.example.com/docs"),
);
t.insert(
String::from("homepage"),
Value::from("https://www.example.com"),
);
t.insert(String::from("license"), Value::from("MIT"));
t.insert(
String::from("repository"),
Value::from("https://www.example.com/repo"),
);
}
_ => panic!("The 'package' section is not a table"),
};
Some(p)
})
.expect("A package section for the Cargo.toml");
let toml_string = toml.to_string();
let mut cargo_toml_handle = File::create(package_manifest.path()).unwrap();
cargo_toml_handle.write_all(toml_string.as_bytes()).unwrap();
}
initialize::Execution::default().run().unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_all_options_works() {
init_logging();
const LICENSE_FILE: &str = "License_Example.txt";
const EULA_FILE: &str = "Eula_Example.rtf";
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let bin_example_path = package.path().join("bin").join("Example.exe");
fs::create_dir(bin_example_path.parent().unwrap()).unwrap();
{
let _bin_example_handle = File::create(&bin_example_path).unwrap();
}
let banner_path = package.path().join("img").join("Banner.bmp");
fs::create_dir(banner_path.parent().unwrap()).unwrap();
{
let _banner_handle = File::create(&banner_path).unwrap();
}
let dialog_path = package.path().join("img").join("Dialog.bmp");
{
let _dialog_handle = File::create(&dialog_path).unwrap();
}
let package_license = package.child(LICENSE_FILE);
{
let _license_handle = File::create(package_license.path()).unwrap();
}
let package_eula = package.child(EULA_FILE);
{
let _eula_handle = File::create(package_eula.path()).unwrap();
}
let product_icon_path = package.path().join("img").join("Product.ico");
{
let _product_icon_handle = File::create(&product_icon_path).unwrap();
}
initialize::Builder::new()
.banner(banner_path.to_str())
.binaries(bin_example_path.to_str().map(|b| vec![b]))
.description(Some("This is a description"))
.dialog(dialog_path.to_str())
.eula(package_eula.path().to_str())
.help_url(Some("http://www.example.com"))
.license(package_license.path().to_str())
.manufacturer(Some("Example Manufacturer"))
.product_icon(product_icon_path.to_str())
.product_name(Some("Example Product Name"))
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_banner_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let banner_path = package.path().join("img").join("Banner.bmp");
fs::create_dir(banner_path.parent().unwrap()).unwrap();
{
let _banner_handle = File::create(&banner_path).unwrap();
}
initialize::Builder::new()
.banner(banner_path.to_str())
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_binaries_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let bin_example_path = package.path().join("bin").join("Example.exe");
fs::create_dir(bin_example_path.parent().unwrap()).unwrap();
{
let _bin_example_handle = File::create(&bin_example_path).unwrap();
}
initialize::Builder::new()
.binaries(bin_example_path.to_str().map(|b| vec![b]))
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_multiple_binaries_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package_multiple_binaries();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::new().build().run().unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_description_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::new()
.description(Some("This is a description"))
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_dialog_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let dialog_path = package.path().join("img").join("Dialog.bmp");
fs::create_dir(dialog_path.parent().unwrap()).unwrap();
{
let _dialog_handle = File::create(&dialog_path).unwrap();
}
initialize::Builder::new()
.dialog(dialog_path.to_str())
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_eula_in_cwd_works() {
init_logging();
const EULA_FILE: &str = "Eula_Example.rtf";
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let package_eula = package.child(EULA_FILE);
{
let _eula_handle = File::create(package_eula.path()).unwrap();
}
initialize::Builder::new()
.eula(package_eula.path().to_str())
.build()
.run()
.expect("Initialization");
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_eula_in_docs_works() {
init_logging();
const EULA_FILE: &str = "Eula_Example.rtf";
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let package_docs = package.child("docs");
fs::create_dir(package_docs.path()).unwrap();
let package_eula = package_docs.path().join(EULA_FILE);
{
let _eula_handle = File::create(&package_eula).unwrap();
}
initialize::Builder::new()
.eula(package_eula.to_str())
.build()
.run()
.expect("Initialization");
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_help_url_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::new()
.help_url(Some("http://www.example.com"))
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_license_in_cwd_works() {
init_logging();
const LICENSE_FILE: &str = "License_Example.txt";
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let package_license = package.child(LICENSE_FILE);
{
let _license_handle = File::create(package_license.path()).unwrap();
}
initialize::Builder::new()
.license(package_license.path().to_str())
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_license_in_docs_works() {
init_logging();
const EULA_FILE: &str = "License_Example.txt";
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let package_docs = package.child("docs");
fs::create_dir(package_docs.path()).unwrap();
let package_license = package_docs.path().join(EULA_FILE);
{
let _license_handle = File::create(&package_license).unwrap();
}
initialize::Builder::new()
.license(package_license.to_str())
.build()
.run()
.expect("Initialization");
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_manufacturer_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::new()
.manufacturer(Some("Example Manufacturer"))
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_product_icon_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
let product_icon_path = package.path().join("img").join("Product.ico");
fs::create_dir(product_icon_path.parent().unwrap()).unwrap();
{
let _product_icon_handle = File::create(&product_icon_path).unwrap();
}
initialize::Builder::new()
.product_icon(product_icon_path.to_str())
.build()
.run()
.unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn init_with_product_name_option_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::new()
.product_name(Some("Example Product Name"))
.build()
.run()
.unwrap();
let mut wxs_handle =
File::open(package.child(PathBuf::from(WIX).join("main.wxs")).path()).unwrap();
let mut wxs_content = String::new();
wxs_handle.read_to_string(&mut wxs_content).unwrap();
println!("{}", wxs_content);
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn input_works_inside_cwd() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let package_manifest = package.child(CARGO_MANIFEST_FILE);
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default().build().run().unwrap();
let result = run(Builder::default().input(package_manifest.path().to_str()));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn input_works_outside_cwd() {
init_logging();
let package = common::create_test_package();
let package_manifest = package.child(CARGO_MANIFEST_FILE);
let expected_msi_file =
package.child(TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME)));
initialize::Builder::default()
.input(package_manifest.path().to_str())
.build()
.run()
.unwrap();
let result = run(Builder::default().input(package_manifest.path().to_str()));
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file.path())
.assert(predicate::path::exists());
}
#[test]
fn includes_works_with_wix_dir() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package_multiple_wxs_sources();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
let two_wxs = package.path().join(MISC_NAME).join("two.wxs");
let three_wxs = package.path().join(MISC_NAME).join("three.wxs");
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default().build().run().unwrap();
let result = run(Builder::default().includes(Some(vec![
two_wxs.to_str().unwrap(),
three_wxs.to_str().unwrap(),
])));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn includes_works_without_wix_dir() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package_multiple_wxs_sources();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
let one_wxs = package.path().join(MISC_NAME).join("one.wxs");
let two_wxs = package.path().join(MISC_NAME).join("two.wxs");
env::set_current_dir(package.path()).unwrap();
let result = run(Builder::default().includes(Some(vec![
one_wxs.to_str().unwrap(),
two_wxs.to_str().unwrap(),
])));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn includes_works_with_input_outside_cwd() {
init_logging();
let package = common::create_test_package_multiple_wxs_sources();
let package_manifest = package.child(CARGO_MANIFEST_FILE);
let expected_msi_file =
package.child(TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME)));
let two_wxs = package.path().join(MISC_NAME).join("two.wxs");
let three_wxs = package.path().join(MISC_NAME).join("three.wxs");
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default()
.input(package_manifest.path().to_str())
.build()
.run()
.unwrap();
let result = run(Builder::default()
.input(package_manifest.path().to_str())
.includes(Some(vec![
two_wxs.to_str().unwrap(),
three_wxs.to_str().unwrap(),
])));
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file.path())
.assert(predicate::path::exists());
}
#[test]
fn compiler_args_flags_only_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default().build().run().unwrap();
let result = run(Builder::default().compiler_args(Some(vec!["-nologo", "-wx"])));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn compiler_args_options_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default().build().run().unwrap();
let result = run(Builder::default().compiler_args(Some(vec!["-arch", "x64"])));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn linker_args_flags_only_works() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package();
let expected_msi_file = TARGET_WIX_DIR.join(format!("{}-0.1.0-x86_64.msi", PACKAGE_NAME));
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default().build().run().unwrap();
let result = run(Builder::default().linker_args(Some(vec!["-nologo", "-wx"])));
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
#[test]
fn compiler_and_linker_args_works_with_metadata() {
init_logging();
let original_working_directory = env::current_dir().unwrap();
let package = common::create_test_package_metadata();
let expected_msi_file = TARGET_WIX_DIR.join("Metadata-2.1.0-x86_64.msi");
env::set_current_dir(package.path()).unwrap();
initialize::Builder::default().build().run().unwrap();
let result = run(&mut Builder::default());
env::set_current_dir(original_working_directory).unwrap();
result.expect("OK result");
package
.child(TARGET_WIX_DIR.as_path())
.assert(predicate::path::exists());
package
.child(expected_msi_file)
.assert(predicate::path::exists());
}
| 37.06808 | 100 | 0.65095 |
488f3fb7b06c62ca666eb76ceae72ccaca93f7fe
| 3,417 |
use macroquad::prelude::*;
use macroquad_tiled as tiled;
use physics_platformer::*;
struct Player {
collider: Actor,
speed: Vec2,
}
struct Platform {
collider: Solid,
speed: f32,
}
#[macroquad::main("Platformer")]
async fn main() {
let tileset = load_texture("examples/tileset.png").await;
set_texture_filter(tileset, FilterMode::Nearest);
let tiled_map_json = load_string("examples/map.json").await.unwrap();
let tiled_map = tiled::load_map(&tiled_map_json, &[("tileset.png", tileset)], &[]).unwrap();
let mut static_colliders = vec![];
for (_x, _y, tile) in tiled_map.tiles("main layer", None) {
static_colliders.push(tile.is_some());
}
let mut world = World::new();
world.add_static_tiled_layer(static_colliders, 8., 8., 40, 1);
let mut player = Player {
collider: world.add_actor(vec2(50.0, 80.0), 8, 8),
speed: vec2(0., 0.),
};
let mut platform = Platform {
collider: world.add_solid(vec2(170.0, 130.0), 32, 8),
speed: 50.,
};
let camera = Camera2D::from_display_rect(Rect::new(0.0, 0.0, 320.0, 152.0));
loop {
clear_background(BLACK);
set_camera(camera);
tiled_map.draw_tiles("main layer", Rect::new(0.0, 0.0, 320.0, 152.0), None);
// draw platform
{
let pos = world.solid_pos(platform.collider);
tiled_map.spr_ex(
"tileset",
Rect::new(6.0 * 8.0, 0.0, 32.0, 8.0),
Rect::new(pos.x, pos.y, 32.0, 8.0),
)
}
// draw player
{
// sprite id from tiled
const PLAYER_SPRITE: u32 = 120;
let pos = world.actor_pos(player.collider);
if player.speed.x >= 0.0 {
tiled_map.spr("tileset", PLAYER_SPRITE, Rect::new(pos.x, pos.y, 8.0, 8.0));
} else {
tiled_map.spr(
"tileset",
PLAYER_SPRITE,
Rect::new(pos.x + 8.0, pos.y, -8.0, 8.0),
);
}
}
// player movement control
{
let pos = world.actor_pos(player.collider);
let on_ground = world.collide_check(player.collider, pos + vec2(0., 1.));
if on_ground == false {
player.speed.y += 500. * get_frame_time();
}
if is_key_down(KeyCode::Right) {
player.speed.x = 100.0;
} else if is_key_down(KeyCode::Left) {
player.speed.x = -100.0;
} else {
player.speed.x = 0.;
}
if is_key_pressed(KeyCode::Space) {
if on_ground {
player.speed.y = -120.;
}
}
world.move_h(player.collider, player.speed.x * get_frame_time());
world.move_v(player.collider, player.speed.y * get_frame_time());
}
// platform movement
{
world.solid_move(platform.collider, platform.speed * get_frame_time(), 0.0);
let pos = world.solid_pos(platform.collider);
if platform.speed > 1. && pos.x >= 220. {
platform.speed *= -1.;
}
if platform.speed < -1. && pos.x <= 150. {
platform.speed *= -1.;
}
}
next_frame().await
}
}
| 28.239669 | 96 | 0.508341 |
cc9fb769f62dbcd9925bb2792d3982262280f01c
| 522 |
pub fn global<'b, R>(r: R<'b>) -> R<'static> {
unsafe{core::mem::transmute::<R<'b>, R<'static>>(r)}
}
pub fn local<'b, 'c, R>(r: &'b mut R<'static>)
-> &'b mut R<'c> {
unsafe{core::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)}
}
pub unsafe fn mutt<T>(e: &T) -> &mut T{
(core::mem::transmute<*const T,*mut T>(&e as *const T)) as &mut T
}
pub fn duplicate<T>(a: &T) -> [&T; 2]{
unsafe{
let n = a as *const T as u64;
let na = [n as *const T as &T,n as *const T as &T];
na
}
}
| 30.705882 | 69 | 0.503831 |
09182f93ba6c02ff6399350ffec1fc41e2acff50
| 7,238 |
use std::cmp;
use termion::color;
use unicode_segmentation::UnicodeSegmentation;
use crate::editor::SearchDirection;
use crate::highlighting;
#[derive(Default)]
pub struct Row {
string: String,
highlights: Vec<highlighting::Type>,
len: usize,
}
impl From<&str> for Row {
fn from(slice: &str) -> Self {
Self {
string: String::from(slice),
highlights: Vec::new(),
len: slice.graphemes(true).count(),
}
}
}
impl Row {
pub fn render(&self, start: usize, end: usize) -> String {
let end = cmp::min(end, self.string.len());
let start = cmp::min(start, end);
let mut result = String::new();
let mut current_highlighting = &highlighting::Type::None;
#[allow(clippy::integer_arithmetic)]
for (index, grapheme) in self.string[..]
.graphemes(true)
.enumerate()
.skip(start)
.take(end - start) {
if let Some(c) = grapheme.chars().next() {
let highlighting_type = self.highlights.get(index)
.unwrap_or(&highlighting::Type::None);
if highlighting_type != current_highlighting {
current_highlighting = highlighting_type;
let start_highlight = format!("{}", termion::color::Fg(highlighting_type.to_color()));
result.push_str(&start_highlight[..]);
}
if c == '\t' {
result.push_str(" ");
} else {
result.push_str(grapheme);
}
}
}
let end_highlight = format!("{}", termion::color::Fg(color::Reset));
result.push_str(&end_highlight[..]);
result
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn len(&self) -> usize{
self.len
}
pub fn insert(&mut self, at: usize, c: char){
if at >= self.len() {
self.string.push(c);
self.len += 1;
return;
}
let mut result: String = String::new();
let mut length = 0;
for (index, grapheme) in self.string[..].graphemes(true).enumerate() {
length += 1;
if index == at {
length += 1;
result.push(c);
}
result.push_str(grapheme);
}
self.len = length;
self.string = result;
}
#[allow(clippy::integer_arithmetic)]
pub fn delete(&mut self, at: usize){
if at >= self.len() {
return;
}
let mut result: String = String::new();
let mut length = 0;
for (index, grapheme) in self.string[..].graphemes(true).enumerate() {
if index != at {
length += 1;
result.push_str(grapheme);
}
}
self.len = length;
self.string = result;
}
pub fn append(&mut self, new: &Self) {
self.string = format!("{}{}", self.string, new.string);
self.len += new.len;
}
pub fn split(&mut self, at: usize) -> Self {
let mut row: String = String::new();
let mut length = 0;
let mut splitted_row: String = String::new();
let mut splitted_length = 0;
for (index, grapheme) in self.string[..].graphemes(true).enumerate() {
if index < at {
length += 1;
row.push_str(grapheme);
} else {
splitted_length += 1;
splitted_row.push_str(grapheme);
}
}
self.string = row;
self.len = length;
Self {
string: splitted_row,
highlights: Vec::new(),
len: splitted_length,
}
}
pub fn as_bytes(&self) -> &[u8] {
self.string.as_bytes()
}
pub fn find(&self, query: &str, at: usize, direction: SearchDirection) -> Option<usize> {
if at > self.len || query.is_empty() {
return None;
}
let start = if direction == SearchDirection::Forward {
at
} else {
0
};
let end = if direction == SearchDirection::Forward {
self.len
} else {
at
};
#[allow(clippy::integer_arithmetic)]
let substring: String = self.string[..]
.graphemes(true)
.skip(start)
.take(end - start)
.collect();
let matching_byte_index = if direction == SearchDirection::Forward {
substring.find(query)
} else {
substring.rfind(query)
};
if let Some(matching_byte_index) = matching_byte_index {
for (grapheme_index, (byte_index, _)) in
substring[..].grapheme_indices(true).enumerate() {
if matching_byte_index == byte_index {
#[allow(clippy::integer_arithmetic)]
return Some(start + grapheme_index);
}
}
}
None
}
pub fn highlight(&mut self, word: Option<&str>) {
let mut highlights = Vec::new();
let chars: Vec<char> = self.string.chars().collect();
let mut matches = Vec::new();
let mut search_index = 0;
if let Some(word) = word {
while let Some(search_match) = self.find(word, search_index, SearchDirection::Forward) {
matches.push(search_match);
if let Some(next_index) = search_match.checked_add(word[..].graphemes(true).count()) {
search_index = next_index;
} else {
break;
}
}
}
let mut index = 0;
while let Some(c) = chars.get(index) {
if let Some(word) = word {
if matches.contains(&index){
for _ in word[..].graphemes(true) {
index += 1;
highlights.push(highlighting::Type::Match);
}
continue;
}
}
if c.is_ascii_digit() {
highlights.push(highlighting::Type::Number);
} else {
highlights.push(highlighting::Type::None);
}
index += 1;
}
self.highlights = highlights;
}
}
| 33.981221 | 110 | 0.43092 |
618d9246e567b1da62f79eaa1c41fe0ab598cd8f
| 20,309 |
use std::io::{self, Write};
use std::time;
use log::{debug, info};
use url::Url;
#[cfg(feature = "cookies")]
use cookie::Cookie;
use crate::body::{self, BodySize, Payload, SizedReader};
use crate::error::{Error, ErrorKind};
use crate::header;
use crate::header::{get_header, Header};
use crate::resolve::ArcResolver;
use crate::response::Response;
use crate::stream::{self, connect_test, Stream};
use crate::Agent;
/// A Unit is fully-built Request, ready to execute.
///
/// *Internal API*
#[derive(Clone)]
pub(crate) struct Unit {
pub agent: Agent,
pub method: String,
pub url: Url,
is_chunked: bool,
headers: Vec<Header>,
pub deadline: Option<time::Instant>,
}
impl Unit {
//
pub(crate) fn new(
agent: &Agent,
method: &str,
url: &Url,
headers: &[Header],
body: &SizedReader,
deadline: Option<time::Instant>,
) -> Self {
//
let (is_transfer_encoding_set, mut is_chunked) = get_header(headers, "transfer-encoding")
// if the user has set an encoding header, obey that.
.map(|enc| {
let is_transfer_encoding_set = !enc.is_empty();
let last_encoding = enc.split(',').last();
let is_chunked = last_encoding
.map(|last_enc| last_enc.trim() == "chunked")
.unwrap_or(false);
(is_transfer_encoding_set, is_chunked)
})
// otherwise, no chunking.
.unwrap_or((false, false));
let extra_headers = {
let mut extra = vec![];
// chunking and Content-Length headers are mutually exclusive
// also don't write this if the user has set it themselves
if !is_chunked && get_header(headers, "content-length").is_none() {
// if the payload is of known size (everything beside an unsized reader), set
// Content-Length,
// otherwise, use the chunked Transfer-Encoding (only if no other Transfer-Encoding
// has been set
match body.size {
BodySize::Known(size) => {
extra.push(Header::new("Content-Length", &format!("{}", size)))
}
BodySize::Unknown => {
if !is_transfer_encoding_set {
extra.push(Header::new("Transfer-Encoding", "chunked"));
is_chunked = true;
}
}
BodySize::Empty => {}
}
}
let username = url.username();
let password = url.password().unwrap_or("");
if (!username.is_empty() || !password.is_empty())
&& get_header(headers, "authorization").is_none()
{
let encoded = base64::encode(&format!("{}:{}", username, password));
extra.push(Header::new("Authorization", &format!("Basic {}", encoded)));
}
#[cfg(feature = "cookies")]
extra.extend(extract_cookies(agent, &url).into_iter());
extra
};
let headers: Vec<_> = headers
.iter()
.chain(extra_headers.iter())
.cloned()
.collect();
Unit {
agent: agent.clone(),
method: method.to_string(),
url: url.clone(),
is_chunked,
headers,
deadline,
}
}
pub fn is_head(&self) -> bool {
self.method.eq_ignore_ascii_case("head")
}
pub fn resolver(&self) -> ArcResolver {
self.agent.state.resolver.clone()
}
#[cfg(test)]
pub fn header(&self, name: &str) -> Option<&str> {
header::get_header(&self.headers, name)
}
#[cfg(test)]
pub fn has(&self, name: &str) -> bool {
header::has_header(&self.headers, name)
}
#[cfg(test)]
pub fn all(&self, name: &str) -> Vec<&str> {
header::get_all_headers(&self.headers, name)
}
// Returns true if this request, with the provided body, is retryable.
pub(crate) fn is_retryable(&self, body: &SizedReader) -> bool {
// Per https://tools.ietf.org/html/rfc7231#section-8.1.3
// these methods are idempotent.
let idempotent = match self.method.as_str() {
"DELETE" | "GET" | "HEAD" | "OPTIONS" | "PUT" | "TRACE" => true,
_ => false,
};
// Unsized bodies aren't retryable because we can't rewind the reader.
// Sized bodies are retryable only if they are zero-length because of
// coincidences of the current implementation - the function responsible
// for retries doesn't have a way to replay a Payload.
let retryable_body = match body.size {
BodySize::Unknown => false,
BodySize::Known(0) => true,
BodySize::Known(_) => false,
BodySize::Empty => true,
};
idempotent && retryable_body
}
}
/// Perform a connection. Follows redirects.
pub(crate) fn connect(
mut unit: Unit,
use_pooled: bool,
mut body: SizedReader,
) -> Result<Response, Error> {
let mut history = vec![];
let mut resp = loop {
let resp = connect_inner(&unit, use_pooled, body, &history)?;
// handle redirects
if !(300..399).contains(&resp.status()) || unit.agent.config.redirects == 0 {
break resp;
}
if history.len() + 1 >= unit.agent.config.redirects as usize {
return Err(ErrorKind::TooManyRedirects.new());
}
// the location header
let location = match resp.header("location") {
Some(l) => l,
None => break resp,
};
let url = &unit.url;
let method = &unit.method;
// join location header to current url in case it is relative
let new_url = url.join(location).map_err(|e| {
ErrorKind::InvalidUrl
.msg(&format!("Bad redirection: {}", location))
.src(e)
})?;
// perform the redirect differently depending on 3xx code.
let new_method = match resp.status() {
// this is to follow how curl does it. POST, PUT etc change
// to GET on a redirect.
301 | 302 | 303 => match &method[..] {
"GET" | "HEAD" => unit.method,
_ => "GET".into(),
},
// never change the method for 307/308
// only resend the request if it cannot have a body
// NOTE: DELETE is intentionally excluded: https://stackoverflow.com/questions/299628
307 | 308 if ["GET", "HEAD", "OPTIONS", "TRACE"].contains(&method.as_str()) => {
unit.method
}
_ => break resp,
};
debug!("redirect {} {} -> {}", resp.status(), url, new_url);
history.push(unit.url.to_string());
body = Payload::Empty.into_read();
unit.headers.retain(|h| h.name() != "Content-Length");
// recreate the unit to get a new hostname and cookies for the new host.
unit = Unit::new(
&unit.agent,
&new_method,
&new_url,
&unit.headers,
&body,
unit.deadline,
);
};
resp.history = history;
Ok(resp)
}
/// Perform a connection. Does not follow redirects.
fn connect_inner(
unit: &Unit,
use_pooled: bool,
body: SizedReader,
previous: &[String],
) -> Result<Response, Error> {
let host = unit
.url
.host_str()
// This unwrap is ok because Request::parse_url() ensure there is always a host present.
.unwrap();
let url = &unit.url;
let method = &unit.method;
// open socket
let (mut stream, is_recycled) = connect_socket(unit, host, use_pooled)?;
if is_recycled {
info!("sending request (reused connection) {} {}", method, url);
} else {
info!("sending request {} {}", method, url);
}
let send_result = send_prelude(unit, &mut stream, !previous.is_empty());
if let Err(err) = send_result {
if is_recycled {
debug!("retrying request early {} {}: {}", method, url, err);
// we try open a new connection, this time there will be
// no connection in the pool. don't use it.
// NOTE: this recurses at most once because `use_pooled` is `false`.
return connect_inner(unit, false, body, previous);
} else {
// not a pooled connection, propagate the error.
return Err(err.into());
}
}
let retryable = unit.is_retryable(&body);
// send the body (which can be empty now depending on redirects)
body::send_body(body, unit.is_chunked, &mut stream)?;
// start reading the response to process cookies and redirects.
let result = Response::do_from_request(unit.clone(), stream);
// https://tools.ietf.org/html/rfc7230#section-6.3.1
// When an inbound connection is closed prematurely, a client MAY
// open a new connection and automatically retransmit an aborted
// sequence of requests if all of those requests have idempotent
// methods.
//
// We choose to retry only requests that used a recycled connection
// from the ConnectionPool, since those are most likely to have
// reached a server-side timeout. Note that this means we may do
// up to N+1 total tries, where N is max_idle_connections_per_host.
let resp = match result {
Err(err) if err.connection_closed() && retryable && is_recycled => {
debug!("retrying request {} {}: {}", method, url, err);
let empty = Payload::Empty.into_read();
// NOTE: this recurses at most once because `use_pooled` is `false`.
return connect_inner(unit, false, empty, previous);
}
Err(e) => return Err(e),
Ok(resp) => resp,
};
// squirrel away cookies
#[cfg(feature = "cookies")]
save_cookies(&unit, &resp);
debug!("response {} to {} {}", resp.status(), method, url);
// release the response
Ok(resp)
}
#[cfg(feature = "cookies")]
fn extract_cookies(agent: &Agent, url: &Url) -> Option<Header> {
let header_value = agent
.state
.cookie_tin
.get_request_cookies(url)
.iter()
// This guards against sending rfc non-compliant cookies, even if the user has
// "prepped" their local cookie store with such cookies.
.filter(|c| {
let is_ok = is_cookie_rfc_compliant(c);
if !is_ok {
debug!("do not send non compliant cookie: {:?}", c);
}
is_ok
})
.map(|c| c.to_string())
.collect::<Vec<_>>()
.join(";");
match header_value.as_str() {
"" => None,
val => Some(Header::new("Cookie", val)),
}
}
/// Connect the socket, either by using the pool or grab a new one.
fn connect_socket(unit: &Unit, hostname: &str, use_pooled: bool) -> Result<(Stream, bool), Error> {
match unit.url.scheme() {
"http" | "https" | "test" => (),
scheme => return Err(ErrorKind::UnknownScheme.msg(&format!("unknown scheme '{}'", scheme))),
};
if use_pooled {
let pool = &unit.agent.state.pool;
let proxy = &unit.agent.config.proxy;
// The connection may have been closed by the server
// due to idle timeout while it was sitting in the pool.
// Loop until we find one that is still good or run out of connections.
while let Some(stream) = pool.try_get_connection(&unit.url, proxy.clone()) {
let server_closed = stream.server_closed()?;
if !server_closed {
return Ok((stream, true));
}
debug!("dropping stream from pool; closed by server: {:?}", stream);
}
}
let stream = match unit.url.scheme() {
"http" => stream::connect_http(unit, hostname),
"https" => stream::connect_https(unit, hostname),
"test" => connect_test(unit),
scheme => Err(ErrorKind::UnknownScheme.msg(&format!("unknown scheme {}", scheme))),
};
Ok((stream?, false))
}
/// Send request line + headers (all up until the body).
#[allow(clippy::write_with_newline)]
fn send_prelude(unit: &Unit, stream: &mut Stream, redir: bool) -> io::Result<()> {
// build into a buffer and send in one go.
let mut prelude: Vec<u8> = vec![];
// request line
write!(
prelude,
"{} {}{}{} HTTP/1.1\r\n",
unit.method,
unit.url.path(),
if unit.url.query().is_some() { "?" } else { "" },
unit.url.query().unwrap_or_default(),
)?;
// host header if not set by user.
if !header::has_header(&unit.headers, "host") {
let host = unit.url.host().unwrap();
match unit.url.port() {
Some(port) => {
let scheme_default: u16 = match unit.url.scheme() {
"http" => 80,
"https" => 443,
_ => 0,
};
if scheme_default != 0 && scheme_default == port {
write!(prelude, "Host: {}\r\n", host)?;
} else {
write!(prelude, "Host: {}:{}\r\n", host, port)?;
}
}
None => {
write!(prelude, "Host: {}\r\n", host)?;
}
}
}
if !header::has_header(&unit.headers, "user-agent") {
write!(prelude, "User-Agent: {}\r\n", &unit.agent.config.user_agent)?;
}
if !header::has_header(&unit.headers, "accept") {
write!(prelude, "Accept: */*\r\n")?;
}
// other headers
for header in &unit.headers {
if !redir || !header.is_name("Authorization") {
if let Some(v) = header.value() {
write!(prelude, "{}: {}\r\n", header.name(), v)?;
}
}
}
// finish
write!(prelude, "\r\n")?;
debug!("writing prelude: {}", String::from_utf8_lossy(&prelude));
// write all to the wire
stream.write_all(&prelude[..])?;
Ok(())
}
/// Investigate a response for "Set-Cookie" headers.
#[cfg(feature = "cookies")]
fn save_cookies(unit: &Unit, resp: &Response) {
//
let headers = resp.all("set-cookie");
// Avoid locking if there are no cookie headers
if headers.is_empty() {
return;
}
let cookies = headers.into_iter().flat_map(|header_value| {
debug!(
"received 'set-cookie: {}' from {} {}",
header_value, unit.method, unit.url
);
match Cookie::parse(header_value.to_string()) {
Err(_) => None,
Ok(c) => {
// This guards against accepting rfc non-compliant cookies from a host.
if is_cookie_rfc_compliant(&c) {
Some(c)
} else {
debug!("ignore incoming non compliant cookie: {:?}", c);
None
}
}
}
});
unit.agent
.state
.cookie_tin
.store_response_cookies(cookies, &unit.url.clone());
}
#[cfg(feature = "cookies")]
fn is_cookie_rfc_compliant(cookie: &Cookie) -> bool {
// https://tools.ietf.org/html/rfc6265#page-9
// set-cookie-header = "Set-Cookie:" SP set-cookie-string
// set-cookie-string = cookie-pair *( ";" SP cookie-av )
// cookie-pair = cookie-name "=" cookie-value
// cookie-name = token
// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
// ; US-ASCII characters excluding CTLs,
// ; whitespace DQUOTE, comma, semicolon,
// ; and backslash
// token = <token, defined in [RFC2616], Section 2.2>
// https://tools.ietf.org/html/rfc2616#page-17
// CHAR = <any US-ASCII character (octets 0 - 127)>
// ...
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
// ...
// token = 1*<any CHAR except CTLs or separators>
// separators = "(" | ")" | "<" | ">" | "@"
// | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "="
// | "{" | "}" | SP | HT
fn is_valid_name(b: &u8) -> bool {
header::is_tchar(b)
}
fn is_valid_value(b: &u8) -> bool {
b.is_ascii()
&& !b.is_ascii_control()
&& !b.is_ascii_whitespace()
&& *b != b'"'
&& *b != b','
&& *b != b';'
&& *b != b'\\'
}
let name = cookie.name().as_bytes();
let valid_name = name.iter().all(is_valid_name);
if !valid_name {
log::trace!("cookie name is not valid: {:?}", cookie.name());
return false;
}
let value = cookie.value().as_bytes();
let valid_value = value.iter().all(is_valid_value);
if !valid_value {
log::trace!("cookie value is not valid: {:?}", cookie.value());
return false;
}
true
}
#[cfg(test)]
#[cfg(feature = "cookies")]
mod tests {
use cookie::Cookie;
use cookie_store::CookieStore;
use super::*;
use crate::Agent;
///////////////////// COOKIE TESTS //////////////////////////////
#[test]
fn match_cookies_returns_one_header() {
let agent = Agent::new();
let url: Url = "https://crates.io/".parse().unwrap();
let cookie1: Cookie = "cookie1=value1; Domain=crates.io; Path=/".parse().unwrap();
let cookie2: Cookie = "cookie2=value2; Domain=crates.io; Path=/".parse().unwrap();
agent
.state
.cookie_tin
.store_response_cookies(vec![cookie1, cookie2].into_iter(), &url);
// There's no guarantee to the order in which cookies are defined.
// Ensure that they're either in one order or the other.
let result = extract_cookies(&agent, &url);
let order1 = "cookie1=value1;cookie2=value2";
let order2 = "cookie2=value2;cookie1=value1";
assert!(
result == Some(Header::new("Cookie", order1))
|| result == Some(Header::new("Cookie", order2))
);
}
#[test]
fn not_send_illegal_cookies() {
// This prepares a cookie store with a cookie that isn't legal
// according to the relevant rfcs. ureq should not send this.
let empty = b"";
let mut store = CookieStore::load_json(&empty[..]).unwrap();
let url = Url::parse("https://mydomain.com").unwrap();
let cookie = Cookie::new("borked///", "illegal<>//");
store.insert_raw(&cookie, &url).unwrap();
let agent = crate::builder().cookie_store(store).build();
let cookies = extract_cookies(&agent, &url);
assert_eq!(cookies, None);
}
#[test]
fn check_cookie_crate_allows_illegal() {
// This test is there to see whether the cookie crate enforces
// https://tools.ietf.org/html/rfc6265#page-9
// https://tools.ietf.org/html/rfc2616#page-17
// for cookie name or cookie value.
// As long as it doesn't, we do additional filtering in ureq
// to not let non-compliant cookies through.
let cookie = Cookie::parse("borked///=illegal\\,").unwrap();
// these should not be allowed according to the RFCs.
assert_eq!(cookie.name(), "borked///");
assert_eq!(cookie.value(), "illegal\\,");
}
#[test]
fn illegal_cookie_name() {
let cookie = Cookie::parse("borked/=value").unwrap();
assert!(!is_cookie_rfc_compliant(&cookie));
}
#[test]
fn illegal_cookie_value() {
let cookie = Cookie::parse("name=borked,").unwrap();
assert!(!is_cookie_rfc_compliant(&cookie));
}
#[test]
fn legal_cookie_name_value() {
let cookie = Cookie::parse("name=value").unwrap();
assert!(is_cookie_rfc_compliant(&cookie));
}
}
| 34.247892 | 100 | 0.536659 |
292bb8f4166e3d6d4e5bcd30e045c973ae2643be
| 18,880 |
#![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit = "256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use pallet_grandpa::{
fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList,
};
use sp_api::impl_runtime_apis;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify},
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, MultiSignature,
};
use sp_std::prelude::*;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
// A few exports that help ease life for downstream crates.
pub use frame_support::{
construct_runtime, parameter_types,
traits::{KeyOwnerProofSystem, Randomness, StorageInfo},
weights::{
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
IdentityFee, Weight,
},
StorageValue,
};
pub use pallet_balances::Call as BalancesCall;
pub use pallet_timestamp::Call as TimestampCall;
use pallet_transaction_payment::CurrencyAdapter;
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Perbill, Permill};
/// Import the template pallet.
pub use pallet_template;
/// An index to a block.
pub type BlockNumber = u32;
/// Alias to 512-bit hash when used in the context of a transaction signature on the chain.
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
/// Balance of an account.
pub type Balance = u128;
/// Index of a transaction in the chain.
pub type Index = u32;
/// A hash of some data used by the chain.
pub type Hash = sp_core::H256;
/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know
/// the specifics of the runtime. They can then be made to be agnostic over specific formats
/// of data like extrinsics, allowing for them to continue syncing the network through upgrades
/// to even the core data structures.
pub mod opaque {
use super::*;
pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic;
/// Opaque block header type.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Opaque block type.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// Opaque block identifier type.
pub type BlockId = generic::BlockId<Block>;
impl_opaque_keys! {
pub struct SessionKeys {
pub aura: Aura,
pub grandpa: Grandpa,
}
}
}
// To learn more about runtime versioning and what each of the following value means:
// https://substrate.dev/docs/en/knowledgebase/runtime/upgrades#runtime-versioning
#[sp_version::runtime_version]
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("node-template"),
impl_name: create_runtime_str!("node-template"),
authoring_version: 1,
// The version of the runtime specification. A full node will not attempt to use its native
// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`,
// `spec_version`, and `authoring_version` are the same between Wasm and native.
// This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use
// the compatible custom types.
spec_version: 100,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
};
/// This determines the average expected block time that we are targeting.
/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`.
/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked
/// up by `pallet_aura` to implement `fn slot_duration()`.
///
/// Change this to adjust the block time.
pub const MILLISECS_PER_BLOCK: u64 = 6000;
// NOTE: Currently it is not possible to change the slot duration after the chain has started.
// Attempting to do so will brick block production.
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
// Time is measured by number of blocks.
pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
parameter_types! {
pub const Version: RuntimeVersion = VERSION;
pub const BlockHashCount: BlockNumber = 2400;
/// We allow for 2 seconds of compute with a 6 second average block time.
pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights
::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO);
pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength
::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO);
pub const SS58Prefix: u8 = 42;
}
// Configure FRAME pallets to include in runtime.
impl frame_system::Config for Runtime {
/// The basic call filter to use in dispatchable.
type BaseCallFilter = frame_support::traits::Everything;
/// Block & extrinsics weights: base values and limits.
type BlockWeights = BlockWeights;
/// The maximum length of a block (in bytes).
type BlockLength = BlockLength;
/// The identifier used to distinguish between accounts.
type AccountId = AccountId;
/// The aggregated dispatch type that is available for extrinsics.
type Call = Call;
/// The lookup mechanism to get account ID from whatever is passed in dispatchers.
type Lookup = AccountIdLookup<AccountId, ()>;
/// The index type for storing how many extrinsics an account has signed.
type Index = Index;
/// The index type for blocks.
type BlockNumber = BlockNumber;
/// The type for hashing blocks and tries.
type Hash = Hash;
/// The hashing algorithm used.
type Hashing = BlakeTwo256;
/// The header type.
type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// The ubiquitous event type.
type Event = Event;
/// The ubiquitous origin type.
type Origin = Origin;
/// Maximum number of block number to block hash mappings to keep (oldest pruned first).
type BlockHashCount = BlockHashCount;
/// The weight of database operations that the runtime can invoke.
type DbWeight = RocksDbWeight;
/// Version of the runtime.
type Version = Version;
/// Converts a module to the index of the module in `construct_runtime!`.
///
/// This type is being generated by `construct_runtime!`.
type PalletInfo = PalletInfo;
/// What to do if a new account is created.
type OnNewAccount = ();
/// What to do if an account is fully reaped from the system.
type OnKilledAccount = ();
/// The data to be stored in an account.
type AccountData = pallet_balances::AccountData<Balance>;
/// Weight information for the extrinsics of this pallet.
type SystemWeightInfo = ();
/// This is used as an identifier of the chain. 42 is the generic substrate prefix.
type SS58Prefix = SS58Prefix;
/// The set code logic, just the default since we're not a parachain.
type OnSetCode = ();
}
impl pallet_randomness_collective_flip::Config for Runtime {}
parameter_types! {
pub const MaxAuthorities: u32 = 32;
}
impl pallet_aura::Config for Runtime {
type AuthorityId = AuraId;
type DisabledValidators = ();
type MaxAuthorities = MaxAuthorities;
}
impl pallet_grandpa::Config for Runtime {
type Event = Event;
type Call = Call;
type KeyOwnerProofSystem = ();
type KeyOwnerProof =
<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
KeyTypeId,
GrandpaId,
)>>::IdentificationTuple;
type HandleEquivocation = ();
type WeightInfo = ();
type MaxAuthorities = MaxAuthorities;
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
impl pallet_timestamp::Config for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500;
pub const MaxLocks: u32 = 50;
}
impl pallet_balances::Config for Runtime {
type MaxLocks = MaxLocks;
type MaxReserves = ();
type ReserveIdentifier = [u8; 8];
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = pallet_bal-ances::weights::SubstrateWeight<Runtime>;
}
parameter_types! {
pub const TransactionByteFee: Balance = 1;
}
impl pallet_transaction_payment::Config for Runtime {
type OnChargeTransaction = CurrencyAdapter<Balances, ()>;
type TransactionByteFee = TransactionByteFee;
type WeightToFee = IdentityFee<Balance>;
type FeeMultiplierUpdate = ();
}
impl pallet_sudo::Config for Runtime {
type Event = Event;
type Call = Call;
}
/// Configure the pallet-template in pallets/template.
impl pallet_template::Config for Runtime {
type Event = Event;
}
// custom constants set by developers
parameter_types! {
// Choose a fee that incentivizes desireable behavior.
pub const NickReservationFee: u128 = 100;
pub const MinNickLength: u32 = 8;
// Maximum bounds on storage are important to secure your chain.
pub const MaxNickLength: u32 = 32;
}
/// Configure the pallet-nicks in pallets/template.
impl pallet_nicks::Config for Runtime {
// The Balances pallet implements the ReservableCurrency trait.
// `Balances` is defined in `construct_runtime!` macro. See below.
// https://docs.substrate.io/rustdocs/latest/pallet_balances/index.html#implementations-2
type Currency = Balances;
// Use the NickReservationFee from the parameter_types block.
type ReservationFee = NickReservationFee;
// No action is taken when deposits are forfeited.
type Slashed = ();
// Configure the FRAME System Root origin as the Nick pallet admin.
// https://docs.substrate.io/rustdocs/latest/frame_system/enum.RawOrigin.html#variant.Root
type ForceOrigin = frame_system::EnsureRoot<AccountId>;
// Use the MinNickLength from the parameter_types block.
type MinLength = MinNickLength;
// Use the MaxNickLength from the parameter_types block.
type MaxLength = MaxNickLength;
// The ubiquitous event type.
type Event = Event;
}
// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = opaque::Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage},
Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
Aura: pallet_aura::{Pallet, Config<T>},
Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
TransactionPayment: pallet_transaction_payment::{Pallet, Storage},
Sudo: pallet_sudo::{Pallet, Call, Config<T>, Storage, Event<T>},
// Include the custom logic from the pallet-template in the runtime.
TemplateModule: pallet_template::{Pallet, Call, Storage, Event<T>},
/*** Add This Line ***/
Nicks: pallet_nicks::{Pallet, Call, Storage, Event<T>},
}
);
/// The address format for describing accounts.
pub type Address = sp_runtime::MultiAddress<AccountId, ()>;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllPallets,
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block);
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
OpaqueMetadata::new(Runtime::metadata().into())
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: sp_inherents::InherentData,
) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
block_hash: <Block as BlockT>::Hash,
) -> TransactionValidity {
Executive::validate_transaction(source, tx, block_hash)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
fn slot_duration() -> sp_consensus_aura::SlotDuration {
sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
}
fn authorities() -> Vec<AuraId> {
Aura::authorities().into_inner()
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
opaque::SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
opaque::SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl fg_primitives::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> GrandpaAuthorityList {
Grandpa::grandpa_authorities()
}
fn current_set_id() -> fg_primitives::SetId {
Grandpa::current_set_id()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: fg_primitives::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: fg_primitives::SetId,
_authority_id: GrandpaId,
) -> Option<fg_primitives::OpaqueKeyOwnershipProof> {
// NOTE: this is the only implementation possible since we've
// defined our key owner proof type as a bottom type (i.e. a type
// with no values).
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime {
fn account_nonce(account: AccountId) -> Index {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
fn query_fee_details(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment::FeeDetails<Balance> {
TransactionPayment::query_fee_details(uxt, len)
}
}
#[cfg(feature = "runtime-benchmarks")]
impl frame_benchmarking::Benchmark<Block> for Runtime {
fn benchmark_metadata(extra: bool) -> (
Vec<frame_benchmarking::BenchmarkList>,
Vec<frame_support::traits::StorageInfo>,
) {
use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList};
use frame_support::traits::StorageInfoTrait;
use frame_system_benchmarking::Pallet as SystemBench;
let mut list = Vec::<BenchmarkList>::new();
list_benchmark!(list, extra, frame_system, SystemBench::<Runtime>);
list_benchmark!(list, extra, pallet_balances, Balances);
list_benchmark!(list, extra, pallet_timestamp, Timestamp);
list_benchmark!(list, extra, pallet_template, TemplateModule);
let storage_info = AllPalletsWithSystem::storage_info();
return (list, storage_info)
}
fn dispatch_benchmark(
config: frame_benchmarking::BenchmarkConfig
) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey};
use frame_system_benchmarking::Pallet as SystemBench;
impl frame_system_benchmarking::Config for Runtime {}
let whitelist: Vec<TrackedStorageKey> = vec![
// Block Number
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
// Total Issuance
hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
// Execution Phase
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
// Event Count
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
// System Events
hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
];
let mut batches = Vec::<BenchmarkBatch>::new();
let params = (&config, &whitelist);
add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
add_benchmark!(params, batches, pallet_balances, Balances);
add_benchmark!(params, batches, pallet_timestamp, Timestamp);
add_benchmark!(params, batches, pallet_template, TemplateModule);
if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
Ok(batches)
}
}
}
| 34.141049 | 106 | 0.74714 |
0a30f772082f2e5056fb8532c13528c9b4e3e2e5
| 404 |
// https://leetcode-cn.com/problems/is-subsequence/
pub fn is_subsequence(s: String, t: String) -> bool {
todo!()
}
// binary_search dynamic_programming greedy
#[test]
#[ignore]
fn test1_392() {
assert_eq!(
is_subsequence("abc".to_string(), "ahbgdc".to_string()),
true
);
assert_eq!(
is_subsequence("axc".to_string(), "ahbgdc".to_string()),
false
);
}
| 22.444444 | 64 | 0.613861 |
906d2ff2cfc9b6903db17a3614ee78e19eb8337e
| 873 |
use super::XMLBuilder;
use super::XmlEvent;
impl XMLBuilder {
// i.e. <cp:properties xmlns:vt="http://schemas.openxmlformats.org/package/2006/relationships">
open!(
open_core_properties,
"cp:coreProperties",
"xmlns:cp",
"xmlns:dc",
"xmlns:dcterms",
"xmlns:dcmitype",
"xmlns:xsi"
);
closed_with_child!(dcterms_created, "dcterms:created", "xsi:type");
closed_with_child!(dc_creator, "dc:creator");
closed_with_child!(dc_description, "dc:description");
closed_with_child!(dc_language, "dc:language");
closed_with_child!(cp_last_modified_by, "cp:lastModifiedBy");
closed_with_child!(dcterms_modified, "dcterms:modified", "xsi:type");
closed_with_child!(cp_revision, "cp:revision");
closed_with_child!(dc_subject, "dc:subject");
closed_with_child!(dc_title, "dc:title");
}
| 34.92 | 99 | 0.678121 |
1c22ad1198a4f48a0ea3cf0ae9de84b60420a0c0
| 607 |
#[derive(Clone, Debug, PartialEq)]
pub(super) struct Selection {
pub(super) count: usize,
pub(super) kind: SelectionKind,
}
#[derive(Clone, Debug, PartialEq)]
pub(super) enum SelectionKind {
Left,
Down,
Up,
Right,
WordEnd,
ForwardWord,
BackWord,
Word,
Line,
LineRemain,
}
impl SelectionKind {
pub(super) fn once(self) -> Selection {
Selection {
count: 1,
kind: self,
}
}
pub(super) fn nth(self, n: usize) -> Selection {
Selection {
count: n,
kind: self,
}
}
}
| 16.861111 | 52 | 0.530478 |
b95e15d20f4ff939b707581662b0d755f0288f81
| 212,399 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(std::fmt::Debug)]
pub(crate) struct Handle<C = aws_hyper::DynConnector> {
client: aws_hyper::Client<C>,
conf: crate::Config,
}
#[derive(Clone, std::fmt::Debug)]
pub struct Client<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<Handle<C>>,
}
impl<C> Client<C> {
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let client = aws_hyper::Client::new(conn);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl Client {
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_env() -> Self {
Self::from_conf(crate::Config::builder().build())
}
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let client = aws_hyper::Client::https();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl<C> Client<C>
where
C: aws_hyper::SmithyConnector,
{
pub fn activate_key_signing_key(&self) -> fluent_builders::ActivateKeySigningKey<C> {
fluent_builders::ActivateKeySigningKey::new(self.handle.clone())
}
pub fn associate_vpc_with_hosted_zone(&self) -> fluent_builders::AssociateVPCWithHostedZone<C> {
fluent_builders::AssociateVPCWithHostedZone::new(self.handle.clone())
}
pub fn change_resource_record_sets(&self) -> fluent_builders::ChangeResourceRecordSets<C> {
fluent_builders::ChangeResourceRecordSets::new(self.handle.clone())
}
pub fn change_tags_for_resource(&self) -> fluent_builders::ChangeTagsForResource<C> {
fluent_builders::ChangeTagsForResource::new(self.handle.clone())
}
pub fn create_health_check(&self) -> fluent_builders::CreateHealthCheck<C> {
fluent_builders::CreateHealthCheck::new(self.handle.clone())
}
pub fn create_hosted_zone(&self) -> fluent_builders::CreateHostedZone<C> {
fluent_builders::CreateHostedZone::new(self.handle.clone())
}
pub fn create_key_signing_key(&self) -> fluent_builders::CreateKeySigningKey<C> {
fluent_builders::CreateKeySigningKey::new(self.handle.clone())
}
pub fn create_query_logging_config(&self) -> fluent_builders::CreateQueryLoggingConfig<C> {
fluent_builders::CreateQueryLoggingConfig::new(self.handle.clone())
}
pub fn create_reusable_delegation_set(
&self,
) -> fluent_builders::CreateReusableDelegationSet<C> {
fluent_builders::CreateReusableDelegationSet::new(self.handle.clone())
}
pub fn create_traffic_policy(&self) -> fluent_builders::CreateTrafficPolicy<C> {
fluent_builders::CreateTrafficPolicy::new(self.handle.clone())
}
pub fn create_traffic_policy_instance(
&self,
) -> fluent_builders::CreateTrafficPolicyInstance<C> {
fluent_builders::CreateTrafficPolicyInstance::new(self.handle.clone())
}
pub fn create_traffic_policy_version(&self) -> fluent_builders::CreateTrafficPolicyVersion<C> {
fluent_builders::CreateTrafficPolicyVersion::new(self.handle.clone())
}
pub fn create_vpc_association_authorization(
&self,
) -> fluent_builders::CreateVPCAssociationAuthorization<C> {
fluent_builders::CreateVPCAssociationAuthorization::new(self.handle.clone())
}
pub fn deactivate_key_signing_key(&self) -> fluent_builders::DeactivateKeySigningKey<C> {
fluent_builders::DeactivateKeySigningKey::new(self.handle.clone())
}
pub fn delete_health_check(&self) -> fluent_builders::DeleteHealthCheck<C> {
fluent_builders::DeleteHealthCheck::new(self.handle.clone())
}
pub fn delete_hosted_zone(&self) -> fluent_builders::DeleteHostedZone<C> {
fluent_builders::DeleteHostedZone::new(self.handle.clone())
}
pub fn delete_key_signing_key(&self) -> fluent_builders::DeleteKeySigningKey<C> {
fluent_builders::DeleteKeySigningKey::new(self.handle.clone())
}
pub fn delete_query_logging_config(&self) -> fluent_builders::DeleteQueryLoggingConfig<C> {
fluent_builders::DeleteQueryLoggingConfig::new(self.handle.clone())
}
pub fn delete_reusable_delegation_set(
&self,
) -> fluent_builders::DeleteReusableDelegationSet<C> {
fluent_builders::DeleteReusableDelegationSet::new(self.handle.clone())
}
pub fn delete_traffic_policy(&self) -> fluent_builders::DeleteTrafficPolicy<C> {
fluent_builders::DeleteTrafficPolicy::new(self.handle.clone())
}
pub fn delete_traffic_policy_instance(
&self,
) -> fluent_builders::DeleteTrafficPolicyInstance<C> {
fluent_builders::DeleteTrafficPolicyInstance::new(self.handle.clone())
}
pub fn delete_vpc_association_authorization(
&self,
) -> fluent_builders::DeleteVPCAssociationAuthorization<C> {
fluent_builders::DeleteVPCAssociationAuthorization::new(self.handle.clone())
}
pub fn disable_hosted_zone_dnssec(&self) -> fluent_builders::DisableHostedZoneDNSSEC<C> {
fluent_builders::DisableHostedZoneDNSSEC::new(self.handle.clone())
}
pub fn disassociate_vpc_from_hosted_zone(
&self,
) -> fluent_builders::DisassociateVPCFromHostedZone<C> {
fluent_builders::DisassociateVPCFromHostedZone::new(self.handle.clone())
}
pub fn enable_hosted_zone_dnssec(&self) -> fluent_builders::EnableHostedZoneDNSSEC<C> {
fluent_builders::EnableHostedZoneDNSSEC::new(self.handle.clone())
}
pub fn get_account_limit(&self) -> fluent_builders::GetAccountLimit<C> {
fluent_builders::GetAccountLimit::new(self.handle.clone())
}
pub fn get_change(&self) -> fluent_builders::GetChange<C> {
fluent_builders::GetChange::new(self.handle.clone())
}
pub fn get_checker_ip_ranges(&self) -> fluent_builders::GetCheckerIpRanges<C> {
fluent_builders::GetCheckerIpRanges::new(self.handle.clone())
}
pub fn get_dnssec(&self) -> fluent_builders::GetDNSSEC<C> {
fluent_builders::GetDNSSEC::new(self.handle.clone())
}
pub fn get_geo_location(&self) -> fluent_builders::GetGeoLocation<C> {
fluent_builders::GetGeoLocation::new(self.handle.clone())
}
pub fn get_health_check(&self) -> fluent_builders::GetHealthCheck<C> {
fluent_builders::GetHealthCheck::new(self.handle.clone())
}
pub fn get_health_check_count(&self) -> fluent_builders::GetHealthCheckCount<C> {
fluent_builders::GetHealthCheckCount::new(self.handle.clone())
}
pub fn get_health_check_last_failure_reason(
&self,
) -> fluent_builders::GetHealthCheckLastFailureReason<C> {
fluent_builders::GetHealthCheckLastFailureReason::new(self.handle.clone())
}
pub fn get_health_check_status(&self) -> fluent_builders::GetHealthCheckStatus<C> {
fluent_builders::GetHealthCheckStatus::new(self.handle.clone())
}
pub fn get_hosted_zone(&self) -> fluent_builders::GetHostedZone<C> {
fluent_builders::GetHostedZone::new(self.handle.clone())
}
pub fn get_hosted_zone_count(&self) -> fluent_builders::GetHostedZoneCount<C> {
fluent_builders::GetHostedZoneCount::new(self.handle.clone())
}
pub fn get_hosted_zone_limit(&self) -> fluent_builders::GetHostedZoneLimit<C> {
fluent_builders::GetHostedZoneLimit::new(self.handle.clone())
}
pub fn get_query_logging_config(&self) -> fluent_builders::GetQueryLoggingConfig<C> {
fluent_builders::GetQueryLoggingConfig::new(self.handle.clone())
}
pub fn get_reusable_delegation_set(&self) -> fluent_builders::GetReusableDelegationSet<C> {
fluent_builders::GetReusableDelegationSet::new(self.handle.clone())
}
pub fn get_reusable_delegation_set_limit(
&self,
) -> fluent_builders::GetReusableDelegationSetLimit<C> {
fluent_builders::GetReusableDelegationSetLimit::new(self.handle.clone())
}
pub fn get_traffic_policy(&self) -> fluent_builders::GetTrafficPolicy<C> {
fluent_builders::GetTrafficPolicy::new(self.handle.clone())
}
pub fn get_traffic_policy_instance(&self) -> fluent_builders::GetTrafficPolicyInstance<C> {
fluent_builders::GetTrafficPolicyInstance::new(self.handle.clone())
}
pub fn get_traffic_policy_instance_count(
&self,
) -> fluent_builders::GetTrafficPolicyInstanceCount<C> {
fluent_builders::GetTrafficPolicyInstanceCount::new(self.handle.clone())
}
pub fn list_geo_locations(&self) -> fluent_builders::ListGeoLocations<C> {
fluent_builders::ListGeoLocations::new(self.handle.clone())
}
pub fn list_health_checks(&self) -> fluent_builders::ListHealthChecks<C> {
fluent_builders::ListHealthChecks::new(self.handle.clone())
}
pub fn list_hosted_zones(&self) -> fluent_builders::ListHostedZones<C> {
fluent_builders::ListHostedZones::new(self.handle.clone())
}
pub fn list_hosted_zones_by_name(&self) -> fluent_builders::ListHostedZonesByName<C> {
fluent_builders::ListHostedZonesByName::new(self.handle.clone())
}
pub fn list_hosted_zones_by_vpc(&self) -> fluent_builders::ListHostedZonesByVPC<C> {
fluent_builders::ListHostedZonesByVPC::new(self.handle.clone())
}
pub fn list_query_logging_configs(&self) -> fluent_builders::ListQueryLoggingConfigs<C> {
fluent_builders::ListQueryLoggingConfigs::new(self.handle.clone())
}
pub fn list_resource_record_sets(&self) -> fluent_builders::ListResourceRecordSets<C> {
fluent_builders::ListResourceRecordSets::new(self.handle.clone())
}
pub fn list_reusable_delegation_sets(&self) -> fluent_builders::ListReusableDelegationSets<C> {
fluent_builders::ListReusableDelegationSets::new(self.handle.clone())
}
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C> {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
pub fn list_tags_for_resources(&self) -> fluent_builders::ListTagsForResources<C> {
fluent_builders::ListTagsForResources::new(self.handle.clone())
}
pub fn list_traffic_policies(&self) -> fluent_builders::ListTrafficPolicies<C> {
fluent_builders::ListTrafficPolicies::new(self.handle.clone())
}
pub fn list_traffic_policy_instances(&self) -> fluent_builders::ListTrafficPolicyInstances<C> {
fluent_builders::ListTrafficPolicyInstances::new(self.handle.clone())
}
pub fn list_traffic_policy_instances_by_hosted_zone(
&self,
) -> fluent_builders::ListTrafficPolicyInstancesByHostedZone<C> {
fluent_builders::ListTrafficPolicyInstancesByHostedZone::new(self.handle.clone())
}
pub fn list_traffic_policy_instances_by_policy(
&self,
) -> fluent_builders::ListTrafficPolicyInstancesByPolicy<C> {
fluent_builders::ListTrafficPolicyInstancesByPolicy::new(self.handle.clone())
}
pub fn list_traffic_policy_versions(&self) -> fluent_builders::ListTrafficPolicyVersions<C> {
fluent_builders::ListTrafficPolicyVersions::new(self.handle.clone())
}
pub fn list_vpc_association_authorizations(
&self,
) -> fluent_builders::ListVPCAssociationAuthorizations<C> {
fluent_builders::ListVPCAssociationAuthorizations::new(self.handle.clone())
}
pub fn test_dns_answer(&self) -> fluent_builders::TestDNSAnswer<C> {
fluent_builders::TestDNSAnswer::new(self.handle.clone())
}
pub fn update_health_check(&self) -> fluent_builders::UpdateHealthCheck<C> {
fluent_builders::UpdateHealthCheck::new(self.handle.clone())
}
pub fn update_hosted_zone_comment(&self) -> fluent_builders::UpdateHostedZoneComment<C> {
fluent_builders::UpdateHostedZoneComment::new(self.handle.clone())
}
pub fn update_traffic_policy_comment(&self) -> fluent_builders::UpdateTrafficPolicyComment<C> {
fluent_builders::UpdateTrafficPolicyComment::new(self.handle.clone())
}
pub fn update_traffic_policy_instance(
&self,
) -> fluent_builders::UpdateTrafficPolicyInstance<C> {
fluent_builders::UpdateTrafficPolicyInstance::new(self.handle.clone())
}
}
pub mod fluent_builders {
#[derive(std::fmt::Debug)]
pub struct ActivateKeySigningKey<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::activate_key_signing_key_input::Builder,
}
impl<C> ActivateKeySigningKey<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ActivateKeySigningKeyOutput,
smithy_http::result::SdkError<crate::error::ActivateKeySigningKeyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A string used to identify a key-signing key (KSK). <code>Name</code> can include numbers, letters, and underscores (_). <code>Name</code> must be unique for each key-signing key in the same
/// hosted zone.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct AssociateVPCWithHostedZone<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::associate_vpc_with_hosted_zone_input::Builder,
}
impl<C> AssociateVPCWithHostedZone<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::AssociateVpcWithHostedZoneOutput,
smithy_http::result::SdkError<crate::error::AssociateVPCWithHostedZoneError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the private hosted zone that you want to associate an Amazon VPC with.</p>
/// <p>Note that you can't associate a VPC with a hosted zone that doesn't have an existing VPC association.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A complex type that contains information about the VPC that you want to associate with a private hosted zone.</p>
pub fn vpc(mut self, input: crate::model::Vpc) -> Self {
self.inner = self.inner.vpc(input);
self
}
pub fn set_vpc(mut self, input: std::option::Option<crate::model::Vpc>) -> Self {
self.inner = self.inner.set_vpc(input);
self
}
/// <p>
/// <i>Optional:</i> A comment about the association request.</p>
pub fn comment(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.comment(input);
self
}
pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_comment(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ChangeResourceRecordSets<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::change_resource_record_sets_input::Builder,
}
impl<C> ChangeResourceRecordSets<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ChangeResourceRecordSetsOutput,
smithy_http::result::SdkError<crate::error::ChangeResourceRecordSetsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that contains the resource record sets that you want to change.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A complex type that contains an optional comment and the <code>Changes</code> element.</p>
pub fn change_batch(mut self, input: crate::model::ChangeBatch) -> Self {
self.inner = self.inner.change_batch(input);
self
}
pub fn set_change_batch(
mut self,
input: std::option::Option<crate::model::ChangeBatch>,
) -> Self {
self.inner = self.inner.set_change_batch(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ChangeTagsForResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::change_tags_for_resource_input::Builder,
}
impl<C> ChangeTagsForResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ChangeTagsForResourceOutput,
smithy_http::result::SdkError<crate::error::ChangeTagsForResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The type of the resource.</p>
/// <ul>
/// <li>
/// <p>The resource type for health checks is <code>healthcheck</code>.</p>
/// </li>
/// <li>
/// <p>The resource type for hosted zones is <code>hostedzone</code>.</p>
/// </li>
/// </ul>
pub fn resource_type(mut self, input: crate::model::TagResourceType) -> Self {
self.inner = self.inner.resource_type(input);
self
}
pub fn set_resource_type(
mut self,
input: std::option::Option<crate::model::TagResourceType>,
) -> Self {
self.inner = self.inner.set_resource_type(input);
self
}
/// <p>The ID of the resource for which you want to add, change, or delete tags.</p>
pub fn resource_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_id(input);
self
}
pub fn set_resource_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_id(input);
self
}
/// <p>A complex type that contains a list of the tags that you want to add to the specified health check or hosted zone and/or the tags
/// that you want to edit <code>Value</code> for.</p>
/// <p>You can add a maximum of 10 tags to a health check or a hosted zone.</p>
pub fn add_tags(mut self, inp: impl Into<crate::model::Tag>) -> Self {
self.inner = self.inner.add_tags(inp);
self
}
pub fn set_add_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.inner = self.inner.set_add_tags(input);
self
}
/// <p>A complex type that contains a list of the tags that you want to delete from the specified health check or hosted zone.
/// You can specify up to 10 keys.</p>
pub fn remove_tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.remove_tag_keys(inp);
self
}
pub fn set_remove_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_remove_tag_keys(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateHealthCheck<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_health_check_input::Builder,
}
impl<C> CreateHealthCheck<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateHealthCheckOutput,
smithy_http::result::SdkError<crate::error::CreateHealthCheckError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string that identifies the request and that allows you to retry a failed <code>CreateHealthCheck</code> request
/// without the risk of creating two identical health checks:</p>
/// <ul>
/// <li>
/// <p>If you send a <code>CreateHealthCheck</code> request with the same <code>CallerReference</code> and settings
/// as a previous request, and if the health check doesn't exist, Amazon Route 53 creates the health check. If the health check does exist,
/// Route 53 returns the settings for the existing health check.</p>
/// </li>
/// <li>
/// <p>If you send a <code>CreateHealthCheck</code> request with the same <code>CallerReference</code> as a deleted health check,
/// regardless of the settings, Route 53 returns a <code>HealthCheckAlreadyExists</code> error.</p>
/// </li>
/// <li>
/// <p>If you send a <code>CreateHealthCheck</code> request with the same <code>CallerReference</code> as an existing health check
/// but with different settings, Route 53 returns a <code>HealthCheckAlreadyExists</code> error.</p>
/// </li>
/// <li>
/// <p>If you send a <code>CreateHealthCheck</code> request with a unique <code>CallerReference</code> but settings identical to
/// an existing health check, Route 53 creates the health check.</p>
/// </li>
/// </ul>
pub fn caller_reference(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.caller_reference(input);
self
}
pub fn set_caller_reference(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_caller_reference(input);
self
}
/// <p>A complex type that contains settings for a new health check.</p>
pub fn health_check_config(mut self, input: crate::model::HealthCheckConfig) -> Self {
self.inner = self.inner.health_check_config(input);
self
}
pub fn set_health_check_config(
mut self,
input: std::option::Option<crate::model::HealthCheckConfig>,
) -> Self {
self.inner = self.inner.set_health_check_config(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateHostedZone<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_hosted_zone_input::Builder,
}
impl<C> CreateHostedZone<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateHostedZoneOutput,
smithy_http::result::SdkError<crate::error::CreateHostedZoneError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the domain. Specify a fully qualified domain name, for example, <i>www.example.com</i>.
/// The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Route 53 treats
/// <i>www.example.com</i> (without a trailing dot) and <i>www.example.com.</i> (with a trailing dot) as identical.</p>
/// <p>If you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name
/// is registered with a registrar other than Route 53, change the name servers for your domain to the set of <code>NameServers</code> that
/// <code>CreateHostedZone</code> returns in <code>DelegationSet</code>.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>(Private hosted zones only) A complex type that contains information about the Amazon VPC that you're associating with this hosted zone.</p>
/// <p>You can specify only one Amazon VPC when you create a private hosted zone. To associate additional Amazon VPCs with the hosted zone,
/// use <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_AssociateVPCWithHostedZone.html">AssociateVPCWithHostedZone</a>
/// after you create a hosted zone.</p>
pub fn vpc(mut self, input: crate::model::Vpc) -> Self {
self.inner = self.inner.vpc(input);
self
}
pub fn set_vpc(mut self, input: std::option::Option<crate::model::Vpc>) -> Self {
self.inner = self.inner.set_vpc(input);
self
}
/// <p>A unique string that identifies the request and that allows failed <code>CreateHostedZone</code> requests to be retried without
/// the risk of executing the operation twice. You must use a unique <code>CallerReference</code> string every time you submit a
/// <code>CreateHostedZone</code> request. <code>CallerReference</code> can be any unique string, for example, a date/time stamp.</p>
pub fn caller_reference(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.caller_reference(input);
self
}
pub fn set_caller_reference(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_caller_reference(input);
self
}
/// <p>(Optional) A complex type that contains the following optional values:</p>
/// <ul>
/// <li>
/// <p>For public and private hosted zones, an optional comment</p>
/// </li>
/// <li>
/// <p>For private hosted zones, an optional <code>PrivateZone</code> element</p>
/// </li>
/// </ul>
/// <p>If you don't specify a comment or the <code>PrivateZone</code> element, omit <code>HostedZoneConfig</code> and
/// the other elements.</p>
pub fn hosted_zone_config(mut self, input: crate::model::HostedZoneConfig) -> Self {
self.inner = self.inner.hosted_zone_config(input);
self
}
pub fn set_hosted_zone_config(
mut self,
input: std::option::Option<crate::model::HostedZoneConfig>,
) -> Self {
self.inner = self.inner.set_hosted_zone_config(input);
self
}
/// <p>If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set
/// when you created it. For more information about reusable delegation sets, see
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html">CreateReusableDelegationSet</a>.</p>
pub fn delegation_set_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.delegation_set_id(input);
self
}
pub fn set_delegation_set_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_delegation_set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateKeySigningKey<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_key_signing_key_input::Builder,
}
impl<C> CreateKeySigningKey<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateKeySigningKeyOutput,
smithy_http::result::SdkError<crate::error::CreateKeySigningKeyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string that identifies the request.</p>
pub fn caller_reference(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.caller_reference(input);
self
}
pub fn set_caller_reference(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_caller_reference(input);
self
}
/// <p>The unique string (ID) used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>The Amazon resource name (ARN) for a customer managed customer master key (CMK) in Key Management Service (KMS).
/// The <code>KeyManagementServiceArn</code> must be unique for each key-signing key (KSK) in a single hosted zone.
/// To see an example of <code>KeyManagementServiceArn</code> that grants the correct permissions for DNSSEC,
/// scroll down to <b>Example</b>. </p>
/// <p>You must configure the customer managed CMK as follows:</p>
/// <dl>
/// <dt>Status</dt>
/// <dd>
/// <p>Enabled</p>
/// </dd>
/// <dt>Key spec</dt>
/// <dd>
/// <p>ECC_NIST_P256</p>
/// </dd>
/// <dt>Key usage</dt>
/// <dd>
/// <p>Sign and verify</p>
/// </dd>
/// <dt>Key policy</dt>
/// <dd>
/// <p>The key policy must give permission for the following actions:</p>
/// <ul>
/// <li>
/// <p>DescribeKey</p>
/// </li>
/// <li>
/// <p>GetPublicKey</p>
/// </li>
/// <li>
/// <p>Sign</p>
/// </li>
/// </ul>
/// <p>The key policy must also include the Amazon Route 53 service in the principal for your account.
/// Specify the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>"Service": "dnssec-route53.amazonaws.com"</code>
/// </p>
/// </li>
/// </ul>
/// </dd>
/// </dl>
/// <p>For more information about working with a customer managed CMK in KMS, see
/// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html">Key Management Service concepts</a>.</p>
pub fn key_management_service_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.key_management_service_arn(input);
self
}
pub fn set_key_management_service_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_key_management_service_arn(input);
self
}
/// <p>A string used to identify a key-signing key (KSK). <code>Name</code> can include numbers, letters, and underscores (_). <code>Name</code> must be unique for each key-signing key in the same
/// hosted zone.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>A string specifying the initial status of the key-signing key (KSK). You can set the value to <code>ACTIVE</code> or <code>INACTIVE</code>.</p>
pub fn status(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.status(input);
self
}
pub fn set_status(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_status(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateQueryLoggingConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_query_logging_config_input::Builder,
}
impl<C> CreateQueryLoggingConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateQueryLoggingConfigOutput,
smithy_http::result::SdkError<crate::error::CreateQueryLoggingConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that you want to log queries for. You can log queries only for public hosted zones.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>The Amazon Resource Name (ARN) for the log group that you want to Amazon Route 53 to send query logs to. This is the format
/// of the ARN:</p>
/// <p>arn:aws:logs:<i>region</i>:<i>account-id</i>:log-group:<i>log_group_name</i>
/// </p>
/// <p>To get the ARN for a log group, you can use the CloudWatch console, the
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogGroups.html">DescribeLogGroups</a> API action,
/// the <a href="https://docs.aws.amazon.com/cli/latest/reference/logs/describe-log-groups.html">describe-log-groups</a> command,
/// or the applicable command in one of the Amazon Web Services SDKs.</p>
pub fn cloud_watch_logs_log_group_arn(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.cloud_watch_logs_log_group_arn(input);
self
}
pub fn set_cloud_watch_logs_log_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_cloud_watch_logs_log_group_arn(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateReusableDelegationSet<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_reusable_delegation_set_input::Builder,
}
impl<C> CreateReusableDelegationSet<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateReusableDelegationSetOutput,
smithy_http::result::SdkError<crate::error::CreateReusableDelegationSetError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string that identifies the request, and that allows you to retry failed
/// <code>CreateReusableDelegationSet</code> requests without the risk of executing the
/// operation twice. You must use a unique <code>CallerReference</code> string every time you
/// submit a <code>CreateReusableDelegationSet</code> request. <code>CallerReference</code> can be
/// any unique string, for example a date/time stamp.</p>
pub fn caller_reference(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.caller_reference(input);
self
}
pub fn set_caller_reference(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_caller_reference(input);
self
}
/// <p>If you want to mark the delegation set for an existing hosted zone as reusable, the ID
/// for that hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateTrafficPolicy<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_traffic_policy_input::Builder,
}
impl<C> CreateTrafficPolicy<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateTrafficPolicyOutput,
smithy_http::result::SdkError<crate::error::CreateTrafficPolicyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the traffic policy.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>The definition of this traffic policy in JSON format. For more information, see
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html">Traffic Policy Document Format</a>.</p>
pub fn document(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.document(input);
self
}
pub fn set_document(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_document(input);
self
}
/// <p>(Optional) Any comments that you want to include about the traffic policy.</p>
pub fn comment(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.comment(input);
self
}
pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_comment(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateTrafficPolicyInstance<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_traffic_policy_instance_input::Builder,
}
impl<C> CreateTrafficPolicyInstance<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateTrafficPolicyInstanceOutput,
smithy_http::result::SdkError<crate::error::CreateTrafficPolicyInstanceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that you want Amazon Route 53 to create resource record sets in by using the configuration in a traffic policy.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>The domain name (such as example.com) or subdomain name (such as www.example.com) for which Amazon Route 53 responds to DNS queries by using
/// the resource record sets that Route 53 creates for this traffic policy instance.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>(Optional) The TTL that you want Amazon Route 53 to assign to all of the resource record sets that it creates in the specified hosted zone.</p>
pub fn ttl(mut self, input: i64) -> Self {
self.inner = self.inner.ttl(input);
self
}
pub fn set_ttl(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_ttl(input);
self
}
/// <p>The ID of the traffic policy that you want to use to create resource record sets in the specified hosted zone.</p>
pub fn traffic_policy_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.traffic_policy_id(input);
self
}
pub fn set_traffic_policy_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_id(input);
self
}
/// <p>The version of the traffic policy that you want to use to create resource record sets in the specified hosted zone.</p>
pub fn traffic_policy_version(mut self, input: i32) -> Self {
self.inner = self.inner.traffic_policy_version(input);
self
}
pub fn set_traffic_policy_version(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_traffic_policy_version(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateTrafficPolicyVersion<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_traffic_policy_version_input::Builder,
}
impl<C> CreateTrafficPolicyVersion<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateTrafficPolicyVersionOutput,
smithy_http::result::SdkError<crate::error::CreateTrafficPolicyVersionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy for which you want to create a new version.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>The definition of this version of the traffic policy, in JSON format. You specified the JSON in the <code>CreateTrafficPolicyVersion</code>
/// request. For more information about the JSON format, see
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateTrafficPolicy.html">CreateTrafficPolicy</a>.</p>
pub fn document(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.document(input);
self
}
pub fn set_document(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_document(input);
self
}
/// <p>The comment that you specified in the <code>CreateTrafficPolicyVersion</code> request, if any.</p>
pub fn comment(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.comment(input);
self
}
pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_comment(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateVPCAssociationAuthorization<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_vpc_association_authorization_input::Builder,
}
impl<C> CreateVPCAssociationAuthorization<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateVpcAssociationAuthorizationOutput,
smithy_http::result::SdkError<crate::error::CreateVPCAssociationAuthorizationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the private hosted zone that you want to authorize associating a VPC with.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A complex type that contains the VPC ID and region for the VPC that you want to authorize associating
/// with your hosted zone.</p>
pub fn vpc(mut self, input: crate::model::Vpc) -> Self {
self.inner = self.inner.vpc(input);
self
}
pub fn set_vpc(mut self, input: std::option::Option<crate::model::Vpc>) -> Self {
self.inner = self.inner.set_vpc(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeactivateKeySigningKey<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::deactivate_key_signing_key_input::Builder,
}
impl<C> DeactivateKeySigningKey<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeactivateKeySigningKeyOutput,
smithy_http::result::SdkError<crate::error::DeactivateKeySigningKeyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A string used to identify a key-signing key (KSK).</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteHealthCheck<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_health_check_input::Builder,
}
impl<C> DeleteHealthCheck<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteHealthCheckOutput,
smithy_http::result::SdkError<crate::error::DeleteHealthCheckError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the health check that you want to delete.</p>
pub fn health_check_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.health_check_id(input);
self
}
pub fn set_health_check_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_health_check_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteHostedZone<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_hosted_zone_input::Builder,
}
impl<C> DeleteHostedZone<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteHostedZoneOutput,
smithy_http::result::SdkError<crate::error::DeleteHostedZoneError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone you want to delete.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteKeySigningKey<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_key_signing_key_input::Builder,
}
impl<C> DeleteKeySigningKey<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteKeySigningKeyOutput,
smithy_http::result::SdkError<crate::error::DeleteKeySigningKeyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A string used to identify a key-signing key (KSK).</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteQueryLoggingConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_query_logging_config_input::Builder,
}
impl<C> DeleteQueryLoggingConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteQueryLoggingConfigOutput,
smithy_http::result::SdkError<crate::error::DeleteQueryLoggingConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the configuration that you want to delete. </p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteReusableDelegationSet<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_reusable_delegation_set_input::Builder,
}
impl<C> DeleteReusableDelegationSet<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteReusableDelegationSetOutput,
smithy_http::result::SdkError<crate::error::DeleteReusableDelegationSetError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the reusable delegation set that you want to delete.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteTrafficPolicy<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_traffic_policy_input::Builder,
}
impl<C> DeleteTrafficPolicy<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteTrafficPolicyOutput,
smithy_http::result::SdkError<crate::error::DeleteTrafficPolicyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy that you want to delete.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>The version number of the traffic policy that you want to delete.</p>
pub fn version(mut self, input: i32) -> Self {
self.inner = self.inner.version(input);
self
}
pub fn set_version(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_version(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteTrafficPolicyInstance<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_traffic_policy_instance_input::Builder,
}
impl<C> DeleteTrafficPolicyInstance<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteTrafficPolicyInstanceOutput,
smithy_http::result::SdkError<crate::error::DeleteTrafficPolicyInstanceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy instance that you want to delete. </p>
/// <important>
/// <p>When you delete a traffic policy instance, Amazon Route 53 also deletes all of the resource record sets that were created when you created
/// the traffic policy instance.</p>
/// </important>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteVPCAssociationAuthorization<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_vpc_association_authorization_input::Builder,
}
impl<C> DeleteVPCAssociationAuthorization<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteVpcAssociationAuthorizationOutput,
smithy_http::result::SdkError<crate::error::DeleteVPCAssociationAuthorizationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>When removing authorization to associate a VPC that was created by one Amazon Web Services account with a hosted zone
/// that was created with a different Amazon Web Services account, the ID of the hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>When removing authorization to associate a VPC that was created by one Amazon Web Services account with a hosted zone
/// that was created with a different Amazon Web Services account, a complex type that includes the ID and region of the VPC.</p>
pub fn vpc(mut self, input: crate::model::Vpc) -> Self {
self.inner = self.inner.vpc(input);
self
}
pub fn set_vpc(mut self, input: std::option::Option<crate::model::Vpc>) -> Self {
self.inner = self.inner.set_vpc(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DisableHostedZoneDNSSEC<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::disable_hosted_zone_dnssec_input::Builder,
}
impl<C> DisableHostedZoneDNSSEC<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DisableHostedZoneDnssecOutput,
smithy_http::result::SdkError<crate::error::DisableHostedZoneDNSSECError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DisassociateVPCFromHostedZone<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::disassociate_vpc_from_hosted_zone_input::Builder,
}
impl<C> DisassociateVPCFromHostedZone<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DisassociateVpcFromHostedZoneOutput,
smithy_http::result::SdkError<crate::error::DisassociateVPCFromHostedZoneError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the private hosted zone that you want to disassociate a VPC from.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>A complex type that contains information about the VPC that you're disassociating
/// from the specified hosted zone.</p>
pub fn vpc(mut self, input: crate::model::Vpc) -> Self {
self.inner = self.inner.vpc(input);
self
}
pub fn set_vpc(mut self, input: std::option::Option<crate::model::Vpc>) -> Self {
self.inner = self.inner.set_vpc(input);
self
}
/// <p>
/// <i>Optional:</i> A comment about the disassociation request.</p>
pub fn comment(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.comment(input);
self
}
pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_comment(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct EnableHostedZoneDNSSEC<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::enable_hosted_zone_dnssec_input::Builder,
}
impl<C> EnableHostedZoneDNSSEC<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::EnableHostedZoneDnssecOutput,
smithy_http::result::SdkError<crate::error::EnableHostedZoneDNSSECError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetAccountLimit<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_account_limit_input::Builder,
}
impl<C> GetAccountLimit<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetAccountLimitOutput,
smithy_http::result::SdkError<crate::error::GetAccountLimitError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The limit that you want to get. Valid values include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>MAX_HEALTH_CHECKS_BY_OWNER</b>: The maximum number of health checks that you can create
/// using the current account.</p>
/// </li>
/// <li>
/// <p>
/// <b>MAX_HOSTED_ZONES_BY_OWNER</b>: The maximum number of hosted zones that you can create
/// using the current account.</p>
/// </li>
/// <li>
/// <p>
/// <b>MAX_REUSABLE_DELEGATION_SETS_BY_OWNER</b>: The maximum number of reusable delegation sets
/// that you can create using the current account.</p>
/// </li>
/// <li>
/// <p>
/// <b>MAX_TRAFFIC_POLICIES_BY_OWNER</b>: The maximum number of traffic policies
/// that you can create using the current account.</p>
/// </li>
/// <li>
/// <p>
/// <b>MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER</b>: The maximum number of traffic policy instances
/// that you can create using the current account. (Traffic policy instances are referred to as traffic flow policy records in the
/// Amazon Route 53 console.)</p>
/// </li>
/// </ul>
pub fn r#type(mut self, input: crate::model::AccountLimitType) -> Self {
self.inner = self.inner.r#type(input);
self
}
pub fn set_type(
mut self,
input: std::option::Option<crate::model::AccountLimitType>,
) -> Self {
self.inner = self.inner.set_type(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetChange<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_change_input::Builder,
}
impl<C> GetChange<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetChangeOutput,
smithy_http::result::SdkError<crate::error::GetChangeError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the change batch request. The value that you specify here is the value that <code>ChangeResourceRecordSets</code>
/// returned in the <code>Id</code> element when you submitted the request.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetCheckerIpRanges<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_checker_ip_ranges_input::Builder,
}
impl<C> GetCheckerIpRanges<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetCheckerIpRangesOutput,
smithy_http::result::SdkError<crate::error::GetCheckerIpRangesError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
}
#[derive(std::fmt::Debug)]
pub struct GetDNSSEC<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_dnssec_input::Builder,
}
impl<C> GetDNSSEC<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetDnssecOutput,
smithy_http::result::SdkError<crate::error::GetDNSSECError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A unique string used to identify a hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetGeoLocation<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_geo_location_input::Builder,
}
impl<C> GetGeoLocation<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetGeoLocationOutput,
smithy_http::result::SdkError<crate::error::GetGeoLocationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>For geolocation resource record sets, a two-letter abbreviation that identifies a continent. Amazon Route 53 supports the following continent codes:</p>
/// <ul>
/// <li>
/// <p>
/// <b>AF</b>: Africa</p>
/// </li>
/// <li>
/// <p>
/// <b>AN</b>: Antarctica</p>
/// </li>
/// <li>
/// <p>
/// <b>AS</b>: Asia</p>
/// </li>
/// <li>
/// <p>
/// <b>EU</b>: Europe</p>
/// </li>
/// <li>
/// <p>
/// <b>OC</b>: Oceania</p>
/// </li>
/// <li>
/// <p>
/// <b>NA</b>: North America</p>
/// </li>
/// <li>
/// <p>
/// <b>SA</b>: South America</p>
/// </li>
/// </ul>
pub fn continent_code(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.continent_code(input);
self
}
pub fn set_continent_code(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_continent_code(input);
self
}
/// <p>Amazon Route 53 uses the two-letter country codes that are specified in
/// <a href="https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2">ISO standard 3166-1 alpha-2</a>.</p>
pub fn country_code(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.country_code(input);
self
}
pub fn set_country_code(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_country_code(input);
self
}
/// <p>The code for the subdivision, such as a particular state within the United States. For a list of US state abbreviations, see <a href="https://pe.usps.com/text/pub28/28apb.htm">Appendix B: Two–Letter State and Possession Abbreviations</a> on the United States Postal Service website. For a list of all supported subdivision codes, use the <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListGeoLocations.html">ListGeoLocations</a> API.</p>
pub fn subdivision_code(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.subdivision_code(input);
self
}
pub fn set_subdivision_code(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_subdivision_code(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetHealthCheck<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_health_check_input::Builder,
}
impl<C> GetHealthCheck<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHealthCheckOutput,
smithy_http::result::SdkError<crate::error::GetHealthCheckError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The identifier that Amazon Route 53 assigned to the health check when you created it. When you add or update a resource record set,
/// you use this value to specify which health check to use. The value can be up to 64 characters long.</p>
pub fn health_check_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.health_check_id(input);
self
}
pub fn set_health_check_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_health_check_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetHealthCheckCount<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_health_check_count_input::Builder,
}
impl<C> GetHealthCheckCount<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHealthCheckCountOutput,
smithy_http::result::SdkError<crate::error::GetHealthCheckCountError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
}
#[derive(std::fmt::Debug)]
pub struct GetHealthCheckLastFailureReason<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_health_check_last_failure_reason_input::Builder,
}
impl<C> GetHealthCheckLastFailureReason<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHealthCheckLastFailureReasonOutput,
smithy_http::result::SdkError<crate::error::GetHealthCheckLastFailureReasonError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID for the health check for which you want the last failure reason. When you created the health check,
/// <code>CreateHealthCheck</code> returned the ID in the response, in the <code>HealthCheckId</code> element.</p>
/// <note>
/// <p>If you want to get the last failure reason for a calculated health check, you must use the Amazon Route 53 console or the
/// CloudWatch console. You can't use <code>GetHealthCheckLastFailureReason</code> for a calculated health check.</p>
/// </note>
pub fn health_check_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.health_check_id(input);
self
}
pub fn set_health_check_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_health_check_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetHealthCheckStatus<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_health_check_status_input::Builder,
}
impl<C> GetHealthCheckStatus<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHealthCheckStatusOutput,
smithy_http::result::SdkError<crate::error::GetHealthCheckStatusError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID for the health check that you want the current status for. When you created the health check,
/// <code>CreateHealthCheck</code> returned the ID in the response, in the <code>HealthCheckId</code> element.</p>
/// <note>
/// <p>If you want to check the status of a calculated health check, you must use the Amazon Route 53 console or the CloudWatch console.
/// You can't use <code>GetHealthCheckStatus</code> to get the status of a calculated health check.</p>
/// </note>
pub fn health_check_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.health_check_id(input);
self
}
pub fn set_health_check_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_health_check_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetHostedZone<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_hosted_zone_input::Builder,
}
impl<C> GetHostedZone<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHostedZoneOutput,
smithy_http::result::SdkError<crate::error::GetHostedZoneError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that you want to get information about.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetHostedZoneCount<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_hosted_zone_count_input::Builder,
}
impl<C> GetHostedZoneCount<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHostedZoneCountOutput,
smithy_http::result::SdkError<crate::error::GetHostedZoneCountError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
}
#[derive(std::fmt::Debug)]
pub struct GetHostedZoneLimit<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_hosted_zone_limit_input::Builder,
}
impl<C> GetHostedZoneLimit<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetHostedZoneLimitOutput,
smithy_http::result::SdkError<crate::error::GetHostedZoneLimitError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The limit that you want to get. Valid values include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <b>MAX_RRSETS_BY_ZONE</b>: The maximum number of records that you can create
/// in the specified hosted zone.</p>
/// </li>
/// <li>
/// <p>
/// <b>MAX_VPCS_ASSOCIATED_BY_ZONE</b>: The maximum number of Amazon VPCs that you can
/// associate with the specified private hosted zone.</p>
/// </li>
/// </ul>
pub fn r#type(mut self, input: crate::model::HostedZoneLimitType) -> Self {
self.inner = self.inner.r#type(input);
self
}
pub fn set_type(
mut self,
input: std::option::Option<crate::model::HostedZoneLimitType>,
) -> Self {
self.inner = self.inner.set_type(input);
self
}
/// <p>The ID of the hosted zone that you want to get a limit for.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetQueryLoggingConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_query_logging_config_input::Builder,
}
impl<C> GetQueryLoggingConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetQueryLoggingConfigOutput,
smithy_http::result::SdkError<crate::error::GetQueryLoggingConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the configuration for DNS query logging that you want to get information about.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetReusableDelegationSet<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_reusable_delegation_set_input::Builder,
}
impl<C> GetReusableDelegationSet<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetReusableDelegationSetOutput,
smithy_http::result::SdkError<crate::error::GetReusableDelegationSetError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the reusable delegation set that you want to get a list of name servers for.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetReusableDelegationSetLimit<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_reusable_delegation_set_limit_input::Builder,
}
impl<C> GetReusableDelegationSetLimit<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetReusableDelegationSetLimitOutput,
smithy_http::result::SdkError<crate::error::GetReusableDelegationSetLimitError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Specify <code>MAX_ZONES_BY_REUSABLE_DELEGATION_SET</code> to get the maximum number of hosted zones that you can associate
/// with the specified reusable delegation set.</p>
pub fn r#type(mut self, input: crate::model::ReusableDelegationSetLimitType) -> Self {
self.inner = self.inner.r#type(input);
self
}
pub fn set_type(
mut self,
input: std::option::Option<crate::model::ReusableDelegationSetLimitType>,
) -> Self {
self.inner = self.inner.set_type(input);
self
}
/// <p>The ID of the delegation set that you want to get the limit for.</p>
pub fn delegation_set_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.delegation_set_id(input);
self
}
pub fn set_delegation_set_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_delegation_set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetTrafficPolicy<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_traffic_policy_input::Builder,
}
impl<C> GetTrafficPolicy<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetTrafficPolicyOutput,
smithy_http::result::SdkError<crate::error::GetTrafficPolicyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy that you want to get information about.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>The version number of the traffic policy that you want to get information about.</p>
pub fn version(mut self, input: i32) -> Self {
self.inner = self.inner.version(input);
self
}
pub fn set_version(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_version(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetTrafficPolicyInstance<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_traffic_policy_instance_input::Builder,
}
impl<C> GetTrafficPolicyInstance<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetTrafficPolicyInstanceOutput,
smithy_http::result::SdkError<crate::error::GetTrafficPolicyInstanceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy instance that you want to get information about.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetTrafficPolicyInstanceCount<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_traffic_policy_instance_count_input::Builder,
}
impl<C> GetTrafficPolicyInstanceCount<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetTrafficPolicyInstanceCountOutput,
smithy_http::result::SdkError<crate::error::GetTrafficPolicyInstanceCountError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
}
#[derive(std::fmt::Debug)]
pub struct ListGeoLocations<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_geo_locations_input::Builder,
}
impl<C> ListGeoLocations<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListGeoLocationsOutput,
smithy_http::result::SdkError<crate::error::ListGeoLocationsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The code for the continent with which you want to start listing locations that Amazon Route 53 supports for geolocation. If Route 53 has already
/// returned a page or more of results, if <code>IsTruncated</code> is true, and if <code>NextContinentCode</code> from the previous
/// response has a value, enter that value in <code>startcontinentcode</code> to return the next page of results.</p>
/// <p>Include <code>startcontinentcode</code> only if you want to list continents. Don't include <code>startcontinentcode</code>
/// when you're listing countries or countries with their subdivisions.</p>
pub fn start_continent_code(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.start_continent_code(input);
self
}
pub fn set_start_continent_code(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_start_continent_code(input);
self
}
/// <p>The code for the country with which you want to start listing locations that Amazon Route 53 supports for geolocation. If Route 53 has already
/// returned a page or more of results, if <code>IsTruncated</code> is <code>true</code>, and if <code>NextCountryCode</code> from the
/// previous response has a value, enter that value in <code>startcountrycode</code> to return the next page of results.</p>
pub fn start_country_code(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.start_country_code(input);
self
}
pub fn set_start_country_code(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_start_country_code(input);
self
}
/// <p>The code for the state of the United States with which you want to start listing locations that Amazon Route 53 supports
/// for geolocation. If Route 53 has already returned a page or more of results, if <code>IsTruncated</code> is <code>true</code>, and if
/// <code>NextSubdivisionCode</code> from the previous response has a value, enter that value in <code>startsubdivisioncode</code>
/// to return the next page of results.</p>
/// <p>To list subdivisions (U.S. states), you must include both <code>startcountrycode</code> and <code>startsubdivisioncode</code>.</p>
pub fn start_subdivision_code(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.start_subdivision_code(input);
self
}
pub fn set_start_subdivision_code(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_start_subdivision_code(input);
self
}
/// <p>(Optional) The maximum number of geolocations to be included in the response body for this request. If more than <code>maxitems</code>
/// geolocations remain to be listed, then the value of the <code>IsTruncated</code> element in the response is <code>true</code>.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListHealthChecks<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_health_checks_input::Builder,
}
impl<C> ListHealthChecks<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListHealthChecksOutput,
smithy_http::result::SdkError<crate::error::ListHealthChecksError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more health checks. To get another group,
/// submit another <code>ListHealthChecks</code> request. </p>
/// <p>For the value of <code>marker</code>, specify the value of <code>NextMarker</code> from the previous response,
/// which is the ID of the first health check that Amazon Route 53 will return if you submit another request.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more health checks to get.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of health checks that you want <code>ListHealthChecks</code> to return in response to the current request.
/// Amazon Route 53 returns a maximum of 100 items. If you set <code>MaxItems</code> to a value greater than 100, Route 53 returns only the first 100 health checks. </p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListHostedZones<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_hosted_zones_input::Builder,
}
impl<C> ListHostedZones<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListHostedZonesOutput,
smithy_http::result::SdkError<crate::error::ListHostedZonesError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more hosted zones.
/// To get more hosted zones, submit another <code>ListHostedZones</code> request. </p>
/// <p>For the value of <code>marker</code>, specify the value of <code>NextMarker</code> from the previous response,
/// which is the ID of the first hosted zone that Amazon Route 53 will return if you submit another request.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more hosted zones to get.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>(Optional) The maximum number of hosted zones that you want Amazon Route 53 to return. If you have more than <code>maxitems</code>
/// hosted zones, the value of <code>IsTruncated</code> in the response is <code>true</code>, and the value of <code>NextMarker</code>
/// is the hosted zone ID of the first hosted zone that Route 53 will return if you submit another request.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
/// <p>If you're using reusable delegation sets and you want to list all of the hosted zones that are associated
/// with a reusable delegation set, specify the ID of that reusable delegation set. </p>
pub fn delegation_set_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.delegation_set_id(input);
self
}
pub fn set_delegation_set_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_delegation_set_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListHostedZonesByName<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_hosted_zones_by_name_input::Builder,
}
impl<C> ListHostedZonesByName<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListHostedZonesByNameOutput,
smithy_http::result::SdkError<crate::error::ListHostedZonesByNameError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>(Optional) For your first request to <code>ListHostedZonesByName</code>, include the <code>dnsname</code> parameter only if you want to
/// specify the name of the first hosted zone in the response. If you don't include the <code>dnsname</code> parameter, Amazon Route 53 returns all of
/// the hosted zones that were created by the current Amazon Web Services account, in ASCII order. For subsequent requests, include both <code>dnsname</code> and
/// <code>hostedzoneid</code> parameters. For <code>dnsname</code>, specify the value of <code>NextDNSName</code> from the previous response.</p>
pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.dns_name(input);
self
}
pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_dns_name(input);
self
}
/// <p>(Optional) For your first request to <code>ListHostedZonesByName</code>, do not include the <code>hostedzoneid</code> parameter.</p>
/// <p>If you have more hosted zones than the value of <code>maxitems</code>, <code>ListHostedZonesByName</code> returns only the first
/// <code>maxitems</code> hosted zones. To get the next group of <code>maxitems</code> hosted zones, submit another request to
/// <code>ListHostedZonesByName</code> and include both <code>dnsname</code> and <code>hostedzoneid</code> parameters. For the value of
/// <code>hostedzoneid</code>, specify the value of the <code>NextHostedZoneId</code> element from the previous response.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>The maximum number of hosted zones to be included in the response body for this request. If you have more than <code>maxitems</code>
/// hosted zones, then the value of the <code>IsTruncated</code> element in the response is true, and the values of <code>NextDNSName</code> and
/// <code>NextHostedZoneId</code> specify the first hosted zone in the next group of <code>maxitems</code> hosted zones. </p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListHostedZonesByVPC<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_hosted_zones_by_vpc_input::Builder,
}
impl<C> ListHostedZonesByVPC<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListHostedZonesByVpcOutput,
smithy_http::result::SdkError<crate::error::ListHostedZonesByVPCError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the Amazon VPC that you want to list hosted zones for.</p>
pub fn vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.vpc_id(input);
self
}
pub fn set_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_vpc_id(input);
self
}
/// <p>For the Amazon VPC that you specified for <code>VPCId</code>, the Amazon Web Services Region that you created the VPC in. </p>
pub fn vpc_region(mut self, input: crate::model::VpcRegion) -> Self {
self.inner = self.inner.vpc_region(input);
self
}
pub fn set_vpc_region(
mut self,
input: std::option::Option<crate::model::VpcRegion>,
) -> Self {
self.inner = self.inner.set_vpc_region(input);
self
}
/// <p>(Optional) The maximum number of hosted zones that you want Amazon Route 53 to return. If the specified VPC is associated with
/// more than <code>MaxItems</code> hosted zones, the response includes a <code>NextToken</code> element. <code>NextToken</code> contains
/// an encrypted token that identifies the first hosted zone that Route 53 will return if you submit another request.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
/// <p>If the previous response included a <code>NextToken</code> element, the specified VPC is associated with more hosted zones.
/// To get more hosted zones, submit another <code>ListHostedZonesByVPC</code> request. </p>
/// <p>For the value of <code>NextToken</code>, specify the value of <code>NextToken</code> from the previous response.</p>
/// <p>If the previous response didn't include a <code>NextToken</code> element, there are no more hosted zones to get.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListQueryLoggingConfigs<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_query_logging_configs_input::Builder,
}
impl<C> ListQueryLoggingConfigs<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListQueryLoggingConfigsOutput,
smithy_http::result::SdkError<crate::error::ListQueryLoggingConfigsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>(Optional) If you want to list the query logging configuration that is associated with a hosted zone, specify the ID in
/// <code>HostedZoneId</code>. </p>
/// <p>If you don't specify a hosted zone ID, <code>ListQueryLoggingConfigs</code> returns all of the configurations
/// that are associated with the current Amazon Web Services account.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>(Optional) If the current Amazon Web Services account has more than <code>MaxResults</code> query logging configurations, use <code>NextToken</code>
/// to get the second and subsequent pages of results.</p>
/// <p>For the first <code>ListQueryLoggingConfigs</code> request, omit this value.</p>
/// <p>For the second and subsequent requests, get the value of <code>NextToken</code> from the previous response and specify that value
/// for <code>NextToken</code> in the request.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>(Optional) The maximum number of query logging configurations that you want Amazon Route 53 to return in response to the current request.
/// If the current Amazon Web Services account has more than <code>MaxResults</code> configurations, use the value of
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListQueryLoggingConfigs.html#API_ListQueryLoggingConfigs_RequestSyntax">NextToken</a>
/// in the response to get the next page of results.</p>
/// <p>If you don't specify a value for <code>MaxResults</code>, Route 53 returns up to 100 configurations.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListResourceRecordSets<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_resource_record_sets_input::Builder,
}
impl<C> ListResourceRecordSets<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListResourceRecordSetsOutput,
smithy_http::result::SdkError<crate::error::ListResourceRecordSetsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that contains the resource record sets that you want to list.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>The first name in the lexicographic ordering of resource record sets that you want to list.
/// If the specified record name doesn't exist, the results begin with the first resource record set that has a name
/// greater than the value of <code>name</code>.</p>
pub fn start_record_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.start_record_name(input);
self
}
pub fn set_start_record_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_start_record_name(input);
self
}
/// <p>The type of resource record set to begin the record listing from.</p>
/// <p>Valid values for basic resource record sets: <code>A</code> | <code>AAAA</code> | <code>CAA</code> | <code>CNAME</code> | <code>MX</code> |
/// <code>NAPTR</code> | <code>NS</code> | <code>PTR</code> | <code>SOA</code> | <code>SPF</code> | <code>SRV</code> | <code>TXT</code>
/// </p>
/// <p>Values for weighted, latency, geolocation, and failover resource record sets: <code>A</code> | <code>AAAA</code> | <code>CAA</code> | <code>CNAME</code> |
/// <code>MX</code> | <code>NAPTR</code> | <code>PTR</code> | <code>SPF</code> | <code>SRV</code> | <code>TXT</code>
/// </p>
/// <p>Values for alias resource record sets: </p>
/// <ul>
/// <li>
/// <p>
/// <b>API Gateway custom regional API or edge-optimized API</b>: A</p>
/// </li>
/// <li>
/// <p>
/// <b>CloudFront distribution</b>: A or AAAA</p>
/// </li>
/// <li>
/// <p>
/// <b>Elastic Beanstalk environment that has a regionalized subdomain</b>: A</p>
/// </li>
/// <li>
/// <p>
/// <b>Elastic Load Balancing load balancer</b>: A | AAAA</p>
/// </li>
/// <li>
/// <p>
/// <b>S3 bucket</b>: A</p>
/// </li>
/// <li>
/// <p>
/// <b>VPC interface VPC endpoint</b>: A</p>
/// </li>
/// <li>
/// <p>
/// <b>Another resource record set in this hosted zone:</b> The type of the resource record set
/// that the alias references.</p>
/// </li>
/// </ul>
/// <p>Constraint: Specifying <code>type</code> without specifying <code>name</code> returns an <code>InvalidInput</code> error.</p>
pub fn start_record_type(mut self, input: crate::model::RrType) -> Self {
self.inner = self.inner.start_record_type(input);
self
}
pub fn set_start_record_type(
mut self,
input: std::option::Option<crate::model::RrType>,
) -> Self {
self.inner = self.inner.set_start_record_type(input);
self
}
/// <p>
/// <i>Resource record sets that have a routing policy other than simple:</i> If results were truncated for a given DNS name and type,
/// specify the value of <code>NextRecordIdentifier</code> from the previous response to get the next resource record set that has the current
/// DNS name and type.</p>
pub fn start_record_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.start_record_identifier(input);
self
}
pub fn set_start_record_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_start_record_identifier(input);
self
}
/// <p>(Optional) The maximum number of resource records sets to include in the response body for this request. If the response includes
/// more than <code>maxitems</code> resource record sets, the value of the <code>IsTruncated</code> element in the response is <code>true</code>,
/// and the values of the <code>NextRecordName</code> and <code>NextRecordType</code> elements in the response identify the first
/// resource record set in the next group of <code>maxitems</code> resource record sets.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListReusableDelegationSets<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_reusable_delegation_sets_input::Builder,
}
impl<C> ListReusableDelegationSets<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListReusableDelegationSetsOutput,
smithy_http::result::SdkError<crate::error::ListReusableDelegationSetsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more reusable delegation sets.
/// To get another group, submit another <code>ListReusableDelegationSets</code> request. </p>
/// <p>For the value of <code>marker</code>, specify the value of <code>NextMarker</code> from the previous response,
/// which is the ID of the first reusable delegation set that Amazon Route 53 will return if you submit another request.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more reusable delegation sets to get.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The number of reusable delegation sets that you want Amazon Route 53 to return in the response to this request. If you specify a value
/// greater than 100, Route 53 returns only the first 100 reusable delegation sets.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTagsForResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl<C> ListTagsForResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The type of the resource.</p>
/// <ul>
/// <li>
/// <p>The resource type for health checks is <code>healthcheck</code>.</p>
/// </li>
/// <li>
/// <p>The resource type for hosted zones is <code>hostedzone</code>.</p>
/// </li>
/// </ul>
pub fn resource_type(mut self, input: crate::model::TagResourceType) -> Self {
self.inner = self.inner.resource_type(input);
self
}
pub fn set_resource_type(
mut self,
input: std::option::Option<crate::model::TagResourceType>,
) -> Self {
self.inner = self.inner.set_resource_type(input);
self
}
/// <p>The ID of the resource for which you want to retrieve tags.</p>
pub fn resource_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_id(input);
self
}
pub fn set_resource_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTagsForResources<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_tags_for_resources_input::Builder,
}
impl<C> ListTagsForResources<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourcesOutput,
smithy_http::result::SdkError<crate::error::ListTagsForResourcesError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The type of the resources.</p>
/// <ul>
/// <li>
/// <p>The resource type for health checks is <code>healthcheck</code>.</p>
/// </li>
/// <li>
/// <p>The resource type for hosted zones is <code>hostedzone</code>.</p>
/// </li>
/// </ul>
pub fn resource_type(mut self, input: crate::model::TagResourceType) -> Self {
self.inner = self.inner.resource_type(input);
self
}
pub fn set_resource_type(
mut self,
input: std::option::Option<crate::model::TagResourceType>,
) -> Self {
self.inner = self.inner.set_resource_type(input);
self
}
/// <p>A complex type that contains the ResourceId element for each resource for which you want to get a list of tags.</p>
pub fn resource_ids(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_ids(inp);
self
}
pub fn set_resource_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_resource_ids(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTrafficPolicies<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_traffic_policies_input::Builder,
}
impl<C> ListTrafficPolicies<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTrafficPoliciesOutput,
smithy_http::result::SdkError<crate::error::ListTrafficPoliciesError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>(Conditional) For your first request to <code>ListTrafficPolicies</code>, don't include the <code>TrafficPolicyIdMarker</code> parameter.</p>
/// <p>If you have more traffic policies than the value of <code>MaxItems</code>, <code>ListTrafficPolicies</code> returns only the first
/// <code>MaxItems</code> traffic policies. To get the next group of policies, submit another request to <code>ListTrafficPolicies</code>.
/// For the value of <code>TrafficPolicyIdMarker</code>, specify the value of <code>TrafficPolicyIdMarker</code> that was returned in the
/// previous response.</p>
pub fn traffic_policy_id_marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.traffic_policy_id_marker(input);
self
}
pub fn set_traffic_policy_id_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_id_marker(input);
self
}
/// <p>(Optional) The maximum number of traffic policies that you want Amazon Route 53 to return in response to this request. If you have more than
/// <code>MaxItems</code> traffic policies, the value of <code>IsTruncated</code> in the response is <code>true</code>, and the
/// value of <code>TrafficPolicyIdMarker</code> is the ID of the first traffic policy that Route 53 will return if you submit
/// another request.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTrafficPolicyInstances<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_traffic_policy_instances_input::Builder,
}
impl<C> ListTrafficPolicyInstances<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTrafficPolicyInstancesOutput,
smithy_http::result::SdkError<crate::error::ListTrafficPolicyInstancesError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstances</code> request. For the value of <code>HostedZoneId</code>,
/// specify the value of <code>HostedZoneIdMarker</code> from the previous response, which is the hosted zone ID of the first traffic policy instance
/// in the next group of traffic policy instances.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn hosted_zone_id_marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id_marker(input);
self
}
pub fn set_hosted_zone_id_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id_marker(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstances</code> request. For the value of <code>trafficpolicyinstancename</code>,
/// specify the value of <code>TrafficPolicyInstanceNameMarker</code> from the previous response, which is the name of the first traffic policy instance
/// in the next group of traffic policy instances.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn traffic_policy_instance_name_marker(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.traffic_policy_instance_name_marker(input);
self
}
pub fn set_traffic_policy_instance_name_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_instance_name_marker(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstances</code> request. For the value of <code>trafficpolicyinstancetype</code>,
/// specify the value of <code>TrafficPolicyInstanceTypeMarker</code> from the previous response, which is the type of the first traffic policy instance
/// in the next group of traffic policy instances.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn traffic_policy_instance_type_marker(mut self, input: crate::model::RrType) -> Self {
self.inner = self.inner.traffic_policy_instance_type_marker(input);
self
}
pub fn set_traffic_policy_instance_type_marker(
mut self,
input: std::option::Option<crate::model::RrType>,
) -> Self {
self.inner = self.inner.set_traffic_policy_instance_type_marker(input);
self
}
/// <p>The maximum number of traffic policy instances that you want Amazon Route 53 to return in response to a <code>ListTrafficPolicyInstances</code> request.
/// If you have more than <code>MaxItems</code> traffic policy instances, the value of the <code>IsTruncated</code> element in the response is
/// <code>true</code>, and the values of <code>HostedZoneIdMarker</code>, <code>TrafficPolicyInstanceNameMarker</code>, and
/// <code>TrafficPolicyInstanceTypeMarker</code> represent the first traffic policy instance in the next group of <code>MaxItems</code>
/// traffic policy instances.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTrafficPolicyInstancesByHostedZone<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_traffic_policy_instances_by_hosted_zone_input::Builder,
}
impl<C> ListTrafficPolicyInstancesByHostedZone<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTrafficPolicyInstancesByHostedZoneOutput,
smithy_http::result::SdkError<
crate::error::ListTrafficPolicyInstancesByHostedZoneError,
>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that you want to list traffic policy instances for.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response is true, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstances</code> request. For the value of <code>trafficpolicyinstancename</code>,
/// specify the value of <code>TrafficPolicyInstanceNameMarker</code> from the previous response, which is the name of the first traffic policy instance
/// in the next group of traffic policy instances.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn traffic_policy_instance_name_marker(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.traffic_policy_instance_name_marker(input);
self
}
pub fn set_traffic_policy_instance_name_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_instance_name_marker(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response is true, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstances</code> request. For the value of <code>trafficpolicyinstancetype</code>,
/// specify the value of <code>TrafficPolicyInstanceTypeMarker</code> from the previous response, which is the type of the first traffic policy instance
/// in the next group of traffic policy instances.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn traffic_policy_instance_type_marker(mut self, input: crate::model::RrType) -> Self {
self.inner = self.inner.traffic_policy_instance_type_marker(input);
self
}
pub fn set_traffic_policy_instance_type_marker(
mut self,
input: std::option::Option<crate::model::RrType>,
) -> Self {
self.inner = self.inner.set_traffic_policy_instance_type_marker(input);
self
}
/// <p>The maximum number of traffic policy instances to be included in the response body for this request. If you have more than
/// <code>MaxItems</code> traffic policy instances, the value of the <code>IsTruncated</code> element in the response is <code>true</code>,
/// and the values of <code>HostedZoneIdMarker</code>, <code>TrafficPolicyInstanceNameMarker</code>, and <code>TrafficPolicyInstanceTypeMarker</code>
/// represent the first traffic policy instance that Amazon Route 53 will return if you submit another request.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTrafficPolicyInstancesByPolicy<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_traffic_policy_instances_by_policy_input::Builder,
}
impl<C> ListTrafficPolicyInstancesByPolicy<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTrafficPolicyInstancesByPolicyOutput,
smithy_http::result::SdkError<crate::error::ListTrafficPolicyInstancesByPolicyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy for which you want to list traffic policy instances.</p>
pub fn traffic_policy_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.traffic_policy_id(input);
self
}
pub fn set_traffic_policy_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_id(input);
self
}
/// <p>The version of the traffic policy for which you want to list traffic policy instances. The version must be associated with the
/// traffic policy that is specified by <code>TrafficPolicyId</code>.</p>
pub fn traffic_policy_version(mut self, input: i32) -> Self {
self.inner = self.inner.traffic_policy_version(input);
self
}
pub fn set_traffic_policy_version(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_traffic_policy_version(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstancesByPolicy</code> request. </p>
/// <p>For the value of <code>hostedzoneid</code>, specify the value of <code>HostedZoneIdMarker</code> from the previous response,
/// which is the hosted zone ID of the first traffic policy instance that Amazon Route 53 will return if you submit another request.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn hosted_zone_id_marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id_marker(input);
self
}
pub fn set_hosted_zone_id_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id_marker(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstancesByPolicy</code> request.</p>
/// <p>For the value of <code>trafficpolicyinstancename</code>, specify the value of <code>TrafficPolicyInstanceNameMarker</code>
/// from the previous response, which is the name of the first traffic policy instance that Amazon Route 53 will return if you submit another request.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn traffic_policy_instance_name_marker(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.traffic_policy_instance_name_marker(input);
self
}
pub fn set_traffic_policy_instance_name_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_instance_name_marker(input);
self
}
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>true</code>, you have more traffic policy instances.
/// To get more traffic policy instances, submit another <code>ListTrafficPolicyInstancesByPolicy</code> request.</p>
/// <p>For the value of <code>trafficpolicyinstancetype</code>, specify the value of <code>TrafficPolicyInstanceTypeMarker</code>
/// from the previous response, which is the name of the first traffic policy instance that Amazon Route 53 will return if you submit another request.</p>
/// <p>If the value of <code>IsTruncated</code> in the previous response was <code>false</code>, there are no more traffic policy instances to get.</p>
pub fn traffic_policy_instance_type_marker(mut self, input: crate::model::RrType) -> Self {
self.inner = self.inner.traffic_policy_instance_type_marker(input);
self
}
pub fn set_traffic_policy_instance_type_marker(
mut self,
input: std::option::Option<crate::model::RrType>,
) -> Self {
self.inner = self.inner.set_traffic_policy_instance_type_marker(input);
self
}
/// <p>The maximum number of traffic policy instances to be included in the response body for this request. If you have more than
/// <code>MaxItems</code> traffic policy instances, the value of the <code>IsTruncated</code> element in the response is <code>true</code>,
/// and the values of <code>HostedZoneIdMarker</code>, <code>TrafficPolicyInstanceNameMarker</code>, and <code>TrafficPolicyInstanceTypeMarker</code>
/// represent the first traffic policy instance that Amazon Route 53 will return if you submit another request.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTrafficPolicyVersions<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_traffic_policy_versions_input::Builder,
}
impl<C> ListTrafficPolicyVersions<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTrafficPolicyVersionsOutput,
smithy_http::result::SdkError<crate::error::ListTrafficPolicyVersionsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Specify the value of <code>Id</code> of the traffic policy for which you want to list all versions.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>For your first request to <code>ListTrafficPolicyVersions</code>, don't include the <code>TrafficPolicyVersionMarker</code> parameter.</p>
/// <p>If you have more traffic policy versions than the value of <code>MaxItems</code>, <code>ListTrafficPolicyVersions</code> returns only
/// the first group of <code>MaxItems</code> versions. To get more traffic policy versions, submit another <code>ListTrafficPolicyVersions</code>
/// request. For the value of <code>TrafficPolicyVersionMarker</code>, specify the value of <code>TrafficPolicyVersionMarker</code> in the previous
/// response.</p>
pub fn traffic_policy_version_marker(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.traffic_policy_version_marker(input);
self
}
pub fn set_traffic_policy_version_marker(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_version_marker(input);
self
}
/// <p>The maximum number of traffic policy versions that you want Amazon Route 53 to include in the response body for this request. If the specified
/// traffic policy has more than <code>MaxItems</code> versions, the value of <code>IsTruncated</code> in the response is <code>true</code>,
/// and the value of the <code>TrafficPolicyVersionMarker</code> element is the ID of the first version that Route 53 will return if you submit
/// another request.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListVPCAssociationAuthorizations<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_vpc_association_authorizations_input::Builder,
}
impl<C> ListVPCAssociationAuthorizations<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListVpcAssociationAuthorizationsOutput,
smithy_http::result::SdkError<crate::error::ListVPCAssociationAuthorizationsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone for which you want a list of VPCs that can be associated with the hosted zone.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>
/// <i>Optional</i>: If a response includes a <code>NextToken</code> element, there are more VPCs
/// that can be associated with the specified hosted zone. To get the next page of results, submit another request,
/// and include the value of <code>NextToken</code> from the response in the <code>nexttoken</code> parameter
/// in another <code>ListVPCAssociationAuthorizations</code> request.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>
/// <i>Optional</i>: An integer that specifies the maximum number of VPCs that you want Amazon Route 53 to return.
/// If you don't specify a value for <code>MaxResults</code>, Route 53 returns up to 50 VPCs per page.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct TestDNSAnswer<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::test_dns_answer_input::Builder,
}
impl<C> TestDNSAnswer<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::TestDnsAnswerOutput,
smithy_http::result::SdkError<crate::error::TestDNSAnswerError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the hosted zone that you want Amazon Route 53 to simulate a query for.</p>
pub fn hosted_zone_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.hosted_zone_id(input);
self
}
pub fn set_hosted_zone_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_hosted_zone_id(input);
self
}
/// <p>The name of the resource record set that you want Amazon Route 53 to simulate a query for.</p>
pub fn record_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.record_name(input);
self
}
pub fn set_record_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_record_name(input);
self
}
/// <p>The type of the resource record set.</p>
pub fn record_type(mut self, input: crate::model::RrType) -> Self {
self.inner = self.inner.record_type(input);
self
}
pub fn set_record_type(mut self, input: std::option::Option<crate::model::RrType>) -> Self {
self.inner = self.inner.set_record_type(input);
self
}
/// <p>If you want to simulate a request from a specific DNS resolver, specify the IP address for that resolver.
/// If you omit this value, <code>TestDnsAnswer</code> uses the IP address of a DNS resolver in the Amazon Web Services US East (N. Virginia) Region
/// (<code>us-east-1</code>).</p>
pub fn resolver_ip(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resolver_ip(input);
self
}
pub fn set_resolver_ip(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resolver_ip(input);
self
}
/// <p>If the resolver that you specified for resolverip supports EDNS0, specify the IPv4 or IPv6 address of a client
/// in the applicable location, for example, <code>192.0.2.44</code> or <code>2001:db8:85a3::8a2e:370:7334</code>.</p>
pub fn edns0_client_subnet_ip(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.edns0_client_subnet_ip(input);
self
}
pub fn set_edns0_client_subnet_ip(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_edns0_client_subnet_ip(input);
self
}
/// <p>If you specify an IP address for <code>edns0clientsubnetip</code>, you can optionally specify the number of bits of the IP address
/// that you want the checking tool to include in the DNS query. For example, if you specify <code>192.0.2.44</code> for
/// <code>edns0clientsubnetip</code> and <code>24</code> for <code>edns0clientsubnetmask</code>, the checking tool will simulate a request from
/// 192.0.2.0/24. The default value is 24 bits for IPv4 addresses and 64 bits for IPv6 addresses.</p>
/// <p>The range of valid values depends on whether <code>edns0clientsubnetip</code> is an IPv4 or an IPv6 address:</p>
/// <ul>
/// <li>
/// <p>
/// <b>IPv4</b>: Specify a value between 0 and 32</p>
/// </li>
/// <li>
/// <p>
/// <b>IPv6</b>: Specify a value between 0 and 128</p>
/// </li>
/// </ul>
pub fn edns0_client_subnet_mask(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.edns0_client_subnet_mask(input);
self
}
pub fn set_edns0_client_subnet_mask(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_edns0_client_subnet_mask(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateHealthCheck<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_health_check_input::Builder,
}
impl<C> UpdateHealthCheck<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateHealthCheckOutput,
smithy_http::result::SdkError<crate::error::UpdateHealthCheckError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID for the health check for which you want detailed information. When you created the health check,
/// <code>CreateHealthCheck</code> returned the ID in the response, in the <code>HealthCheckId</code> element.</p>
pub fn health_check_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.health_check_id(input);
self
}
pub fn set_health_check_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_health_check_id(input);
self
}
/// <p>A sequential counter that Amazon Route 53 sets to <code>1</code> when you create a health check and increments by 1 each time you
/// update settings for the health check.</p>
/// <p>We recommend that you use <code>GetHealthCheck</code> or <code>ListHealthChecks</code> to get the current value of
/// <code>HealthCheckVersion</code> for the health check that you want to update, and that you include that value in your
/// <code>UpdateHealthCheck</code> request. This prevents Route 53 from overwriting an intervening update:</p>
/// <ul>
/// <li>
/// <p>If the value in the <code>UpdateHealthCheck</code> request matches the value of <code>HealthCheckVersion</code> in the
/// health check, Route 53 updates the health check with the new settings.</p>
/// </li>
/// <li>
/// <p>If the value of <code>HealthCheckVersion</code> in the health check is greater, the health check was changed after you
/// got the version number. Route 53 does not update the health check, and it returns a <code>HealthCheckVersionMismatch</code> error.</p>
/// </li>
/// </ul>
pub fn health_check_version(mut self, input: i64) -> Self {
self.inner = self.inner.health_check_version(input);
self
}
pub fn set_health_check_version(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_health_check_version(input);
self
}
/// <p>The IPv4 or IPv6 IP address for the endpoint that you want Amazon Route 53 to perform health checks on. If you don't specify a value for
/// <code>IPAddress</code>, Route 53 sends a DNS request to resolve the domain name that you specify in <code>FullyQualifiedDomainName</code>
/// at the interval that you specify in <code>RequestInterval</code>. Using an IP address that is returned by DNS, Route 53 then
/// checks the health of the endpoint.</p>
/// <p>Use one of the following formats for the value of <code>IPAddress</code>: </p>
/// <ul>
/// <li>
/// <p>
/// <b>IPv4 address</b>: four values between 0 and 255, separated by periods (.),
/// for example, <code>192.0.2.44</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>IPv6 address</b>: eight groups of four hexadecimal values, separated by colons (:),
/// for example, <code>2001:0db8:85a3:0000:0000:abcd:0001:2345</code>. You can also shorten IPv6 addresses as described in RFC 5952,
/// for example, <code>2001:db8:85a3::abcd:1:2345</code>.</p>
/// </li>
/// </ul>
/// <p>If the endpoint is an EC2 instance, we recommend that you create an Elastic IP address, associate it with your EC2 instance, and
/// specify the Elastic IP address for <code>IPAddress</code>. This ensures that the IP address of your instance never changes. For more information,
/// see the applicable documentation:</p>
/// <ul>
/// <li>
/// <p>Linux: <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html">Elastic IP Addresses (EIP)</a> in the
/// <i>Amazon EC2 User Guide for Linux Instances</i>
/// </p>
/// </li>
/// <li>
/// <p>Windows: <a href="https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-ip-addresses-eip.html">Elastic IP Addresses (EIP)</a> in the
/// <i>Amazon EC2 User Guide for Windows Instances</i>
/// </p>
/// </li>
/// </ul>
/// <note>
/// <p>If a health check already has a value for <code>IPAddress</code>, you can change the value. However, you can't update an
/// existing health check to add or remove the value of <code>IPAddress</code>. </p>
/// </note>
/// <p>For more information, see
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-FullyQualifiedDomainName">FullyQualifiedDomainName</a>.
/// </p>
/// <p>Constraints: Route 53 can't check the health of endpoints for which the IP address is in local, private, non-routable, or
/// multicast ranges. For more information about IP addresses for which you can't create health checks, see the following
/// documents:</p>
/// <ul>
/// <li>
/// <p>
/// <a href="https://tools.ietf.org/html/rfc5735">RFC 5735, Special Use IPv4 Addresses</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="https://tools.ietf.org/html/rfc6598">RFC 6598, IANA-Reserved IPv4 Prefix for Shared Address Space</a>
/// </p>
/// </li>
/// <li>
/// <p>
/// <a href="https://tools.ietf.org/html/rfc5156">RFC 5156, Special-Use IPv6 Addresses</a>
/// </p>
/// </li>
/// </ul>
pub fn ip_address(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.ip_address(input);
self
}
pub fn set_ip_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_ip_address(input);
self
}
/// <p>The port on the endpoint that you want Amazon Route 53 to perform health checks on.</p>
/// <note>
/// <p>Don't specify a value for <code>Port</code> when you specify a value for <code>Type</code> of <code>CLOUDWATCH_METRIC</code> or
/// <code>CALCULATED</code>.</p>
/// </note>
pub fn port(mut self, input: i32) -> Self {
self.inner = self.inner.port(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_port(input);
self
}
/// <p>The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint
/// will return an HTTP status code of 2xx or 3xx when the endpoint is healthy, for example the file /docs/route53-health-check.html.
/// You can also include query string parameters, for example, <code>/welcome.html?language=jp&login=y</code>. </p>
/// <p>Specify this value only if you want to change it.</p>
pub fn resource_path(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_path(input);
self
}
pub fn set_resource_path(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_resource_path(input);
self
}
/// <p>Amazon Route 53 behavior depends on whether you specify a value for <code>IPAddress</code>.</p>
/// <note>
/// <p>If a health check already has a value for <code>IPAddress</code>, you can change the value. However, you can't update an
/// existing health check to add or remove the value of <code>IPAddress</code>. </p>
/// </note>
/// <p>
/// <b>If you specify a value for</b>
/// <code>IPAddress</code>:</p>
/// <p>Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of <code>FullyQualifiedDomainName</code>
/// in the <code>Host</code> header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint
/// on which you want Route 53 to perform health checks.</p>
/// <p>When Route 53 checks the health of an endpoint, here is how it constructs the <code>Host</code> header:</p>
/// <ul>
/// <li>
/// <p>If you specify a value of <code>80</code> for <code>Port</code> and <code>HTTP</code> or <code>HTTP_STR_MATCH</code> for
/// <code>Type</code>, Route 53 passes the value of <code>FullyQualifiedDomainName</code> to the endpoint in the <code>Host</code> header.</p>
/// </li>
/// <li>
/// <p>If you specify a value of <code>443</code> for <code>Port</code> and <code>HTTPS</code> or <code>HTTPS_STR_MATCH</code> for
/// <code>Type</code>, Route 53 passes the value of <code>FullyQualifiedDomainName</code> to the endpoint in the <code>Host</code> header.</p>
/// </li>
/// <li>
/// <p>If you specify another value for <code>Port</code> and any value except <code>TCP</code> for <code>Type</code>, Route 53 passes
/// <i>
/// <code>FullyQualifiedDomainName</code>:<code>Port</code>
/// </i> to the endpoint in the <code>Host</code> header.</p>
/// </li>
/// </ul>
/// <p>If you don't specify a value for <code>FullyQualifiedDomainName</code>, Route 53 substitutes the value of <code>IPAddress</code>
/// in the <code>Host</code> header in each of the above cases.</p>
/// <p>
/// <b>If you don't specify a value for</b>
/// <code>IPAddress</code>:</p>
/// <p>If you don't specify a value for <code>IPAddress</code>, Route 53 sends a DNS request to the domain that you specify in
/// <code>FullyQualifiedDomainName</code> at the interval you specify in <code>RequestInterval</code>. Using an IPv4 address that is
/// returned by DNS, Route 53 then checks the health of the endpoint.</p>
/// <note>
/// <p>If you don't specify a value for <code>IPAddress</code>, Route 53 uses only IPv4 to send health checks to the endpoint.
/// If there's no resource record set with a type of A for the name that you specify for <code>FullyQualifiedDomainName</code>,
/// the health check fails with a "DNS resolution failed" error.</p>
/// </note>
/// <p>If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by
/// <code>FullyQualifiedDomainName</code>, we recommend that you create a separate health check for each endpoint. For example, create a
/// health check for each HTTP server that is serving content for www.example.com. For the value of <code>FullyQualifiedDomainName</code>,
/// specify the domain name of the server (such as <code>us-east-2-www.example.com</code>), not the name of the resource record sets (www.example.com).</p>
/// <important>
/// <p>In this configuration, if the value of <code>FullyQualifiedDomainName</code> matches the name of the resource record sets and
/// you then associate the health check with those resource record sets, health check results will be unpredictable.</p>
/// </important>
/// <p>In addition, if the value of <code>Type</code> is <code>HTTP</code>, <code>HTTPS</code>, <code>HTTP_STR_MATCH</code>, or
/// <code>HTTPS_STR_MATCH</code>, Route 53 passes the value of <code>FullyQualifiedDomainName</code> in the <code>Host</code> header, as it does
/// when you specify a value for <code>IPAddress</code>. If the value of <code>Type</code> is <code>TCP</code>, Route 53 doesn't pass a
/// <code>Host</code> header.</p>
pub fn fully_qualified_domain_name(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.fully_qualified_domain_name(input);
self
}
pub fn set_fully_qualified_domain_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_fully_qualified_domain_name(input);
self
}
/// <p>If the value of <code>Type</code> is <code>HTTP_STR_MATCH</code> or <code>HTTPS_STR_MATCH</code>, the string that you want
/// Amazon Route 53 to search for in the response body from the specified resource. If the string appears in the response body, Route 53 considers
/// the resource healthy. (You can't change the value of <code>Type</code> when you update a health check.)</p>
pub fn search_string(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.search_string(input);
self
}
pub fn set_search_string(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_search_string(input);
self
}
/// <p>The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint
/// from unhealthy to healthy or vice versa. For more information, see
/// <a href="https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html">How Amazon Route 53 Determines Whether an Endpoint Is Healthy</a>
/// in the <i>Amazon Route 53 Developer Guide</i>.</p>
/// <p>If you don't specify a value for <code>FailureThreshold</code>, the default value is three health checks.</p>
pub fn failure_threshold(mut self, input: i32) -> Self {
self.inner = self.inner.failure_threshold(input);
self
}
pub fn set_failure_threshold(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_failure_threshold(input);
self
}
/// <p>Specify whether you want Amazon Route 53 to invert the status of a health check, for example, to consider a health check unhealthy when it
/// otherwise would be considered healthy.</p>
pub fn inverted(mut self, input: bool) -> Self {
self.inner = self.inner.inverted(input);
self
}
pub fn set_inverted(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_inverted(input);
self
}
/// <p>Stops Route 53 from performing health checks. When you disable a health check, here's what happens:</p>
/// <ul>
/// <li>
/// <p>
/// <b>Health checks that check the health of endpoints:</b>
/// Route 53 stops submitting requests to your application, server, or other resource.</p>
/// </li>
/// <li>
/// <p>
/// <b>Calculated health checks:</b>
/// Route 53 stops aggregating the status of the referenced health checks.</p>
/// </li>
/// <li>
/// <p>
/// <b>Health checks that monitor CloudWatch alarms:</b>
/// Route 53 stops monitoring the corresponding CloudWatch metrics.</p>
/// </li>
/// </ul>
/// <p>After you disable a health check, Route 53 considers the status of the health check to always be healthy. If you configured DNS failover,
/// Route 53 continues to route traffic to the corresponding resources. If you want to stop routing traffic to a resource, change the value of
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-Inverted">Inverted</a>.
/// </p>
/// <p>Charges for a health check still apply when the health check is disabled. For more information, see
/// <a href="http://aws.amazon.com/route53/pricing/">Amazon Route 53 Pricing</a>.</p>
pub fn disabled(mut self, input: bool) -> Self {
self.inner = self.inner.disabled(input);
self
}
pub fn set_disabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_disabled(input);
self
}
/// <p>The number of child health checks that are associated with a <code>CALCULATED</code> health that Amazon Route 53 must consider healthy for the
/// <code>CALCULATED</code> health check to be considered healthy. To specify the child health checks that you want to associate with a
/// <code>CALCULATED</code> health check, use the <code>ChildHealthChecks</code> and <code>ChildHealthCheck</code> elements.</p>
/// <p>Note the following:</p>
/// <ul>
/// <li>
/// <p>If you specify a number greater than the number of child health checks, Route 53 always considers this health check to be unhealthy.</p>
/// </li>
/// <li>
/// <p>If you specify <code>0</code>, Route 53 always considers this health check to be healthy.</p>
/// </li>
/// </ul>
pub fn health_threshold(mut self, input: i32) -> Self {
self.inner = self.inner.health_threshold(input);
self
}
pub fn set_health_threshold(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_health_threshold(input);
self
}
/// <p>A complex type that contains one <code>ChildHealthCheck</code> element for each health check that you want to associate with a
/// <code>CALCULATED</code> health check.</p>
pub fn child_health_checks(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.child_health_checks(inp);
self
}
pub fn set_child_health_checks(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_child_health_checks(input);
self
}
/// <p>Specify whether you want Amazon Route 53 to send the value of <code>FullyQualifiedDomainName</code> to the endpoint in the <code>client_hello</code>
/// message during <code>TLS</code> negotiation. This allows the endpoint to respond to <code>HTTPS</code> health check requests with the applicable
/// SSL/TLS certificate.</p>
/// <p>Some endpoints require that HTTPS requests include the host name in the <code>client_hello</code> message. If you don't enable SNI,
/// the status of the health check will be SSL alert <code>handshake_failure</code>. A health check can also have that status for other reasons.
/// If SNI is enabled and you're still getting the error, check the SSL/TLS configuration on your endpoint and confirm that your certificate is valid.</p>
/// <p>The SSL/TLS certificate on your endpoint includes a domain name in the <code>Common Name</code> field and possibly several more
/// in the <code>Subject Alternative Names</code> field. One of the domain names in the certificate should match the value that you specify for
/// <code>FullyQualifiedDomainName</code>. If the endpoint responds to the <code>client_hello</code> message with a certificate that does not
/// include the domain name that you specified in <code>FullyQualifiedDomainName</code>, a health checker will retry the handshake. In the
/// second attempt, the health checker will omit <code>FullyQualifiedDomainName</code> from the <code>client_hello</code> message.</p>
pub fn enable_sni(mut self, input: bool) -> Self {
self.inner = self.inner.enable_sni(input);
self
}
pub fn set_enable_sni(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_enable_sni(input);
self
}
/// <p>A complex type that contains one <code>Region</code> element for each region that you want Amazon Route 53 health checkers to check
/// the specified endpoint from.</p>
pub fn regions(mut self, inp: impl Into<crate::model::HealthCheckRegion>) -> Self {
self.inner = self.inner.regions(inp);
self
}
pub fn set_regions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::HealthCheckRegion>>,
) -> Self {
self.inner = self.inner.set_regions(input);
self
}
/// <p>A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether
/// the specified health check is healthy.</p>
pub fn alarm_identifier(mut self, input: crate::model::AlarmIdentifier) -> Self {
self.inner = self.inner.alarm_identifier(input);
self
}
pub fn set_alarm_identifier(
mut self,
input: std::option::Option<crate::model::AlarmIdentifier>,
) -> Self {
self.inner = self.inner.set_alarm_identifier(input);
self
}
/// <p>When CloudWatch has insufficient data about the metric to determine the alarm state, the status that you want Amazon Route 53 to assign
/// to the health check:</p>
/// <ul>
/// <li>
/// <p>
/// <code>Healthy</code>: Route 53 considers the health check to be healthy.</p>
/// </li>
/// <li>
/// <p>
/// <code>Unhealthy</code>: Route 53 considers the health check to be unhealthy.</p>
/// </li>
/// <li>
/// <p>
/// <code>LastKnownStatus</code>: Route 53 uses the status of the health check from the last time CloudWatch had sufficient data
/// to determine the alarm state. For new health checks that have no last known status, the default status for the health check is healthy.</p>
/// </li>
/// </ul>
pub fn insufficient_data_health_status(
mut self,
input: crate::model::InsufficientDataHealthStatus,
) -> Self {
self.inner = self.inner.insufficient_data_health_status(input);
self
}
pub fn set_insufficient_data_health_status(
mut self,
input: std::option::Option<crate::model::InsufficientDataHealthStatus>,
) -> Self {
self.inner = self.inner.set_insufficient_data_health_status(input);
self
}
/// <p>A complex type that contains one <code>ResettableElementName</code> element for each element that you want to reset to the default value.
/// Valid values for <code>ResettableElementName</code> include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ChildHealthChecks</code>: Amazon Route 53 resets
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-ChildHealthChecks">ChildHealthChecks</a>
/// to null.</p>
/// </li>
/// <li>
/// <p>
/// <code>FullyQualifiedDomainName</code>: Route 53 resets
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-FullyQualifiedDomainName">FullyQualifiedDomainName</a>.
/// to null.</p>
/// </li>
/// <li>
/// <p>
/// <code>Regions</code>: Route 53 resets the
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions">Regions</a>
/// list to the default set of regions. </p>
/// </li>
/// <li>
/// <p>
/// <code>ResourcePath</code>: Route 53 resets
/// <a href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-ResourcePath">ResourcePath</a>
/// to null.</p>
/// </li>
/// </ul>
pub fn reset_elements(
mut self,
inp: impl Into<crate::model::ResettableElementName>,
) -> Self {
self.inner = self.inner.reset_elements(inp);
self
}
pub fn set_reset_elements(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ResettableElementName>>,
) -> Self {
self.inner = self.inner.set_reset_elements(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateHostedZoneComment<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_hosted_zone_comment_input::Builder,
}
impl<C> UpdateHostedZoneComment<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateHostedZoneCommentOutput,
smithy_http::result::SdkError<crate::error::UpdateHostedZoneCommentError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID for the hosted zone that you want to update the comment for.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>The new comment for the hosted zone. If you don't specify a value for <code>Comment</code>, Amazon Route 53 deletes the existing value of the
/// <code>Comment</code> element, if any.</p>
pub fn comment(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.comment(input);
self
}
pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_comment(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateTrafficPolicyComment<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_traffic_policy_comment_input::Builder,
}
impl<C> UpdateTrafficPolicyComment<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateTrafficPolicyCommentOutput,
smithy_http::result::SdkError<crate::error::UpdateTrafficPolicyCommentError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The value of <code>Id</code> for the traffic policy that you want to update the comment for.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>The value of <code>Version</code> for the traffic policy that you want to update the comment for.</p>
pub fn version(mut self, input: i32) -> Self {
self.inner = self.inner.version(input);
self
}
pub fn set_version(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_version(input);
self
}
/// <p>The new comment for the specified traffic policy and version.</p>
pub fn comment(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.comment(input);
self
}
pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_comment(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateTrafficPolicyInstance<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_traffic_policy_instance_input::Builder,
}
impl<C> UpdateTrafficPolicyInstance<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateTrafficPolicyInstanceOutput,
smithy_http::result::SdkError<crate::error::UpdateTrafficPolicyInstanceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ID of the traffic policy instance that you want to update.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input);
self
}
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// <p>The TTL that you want Amazon Route 53 to assign to all of the updated resource record sets.</p>
pub fn ttl(mut self, input: i64) -> Self {
self.inner = self.inner.ttl(input);
self
}
pub fn set_ttl(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_ttl(input);
self
}
/// <p>The ID of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.</p>
pub fn traffic_policy_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.traffic_policy_id(input);
self
}
pub fn set_traffic_policy_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_traffic_policy_id(input);
self
}
/// <p>The version of the traffic policy that you want Amazon Route 53 to use to update resource record sets for the specified traffic policy instance.</p>
pub fn traffic_policy_version(mut self, input: i32) -> Self {
self.inner = self.inner.traffic_policy_version(input);
self
}
pub fn set_traffic_policy_version(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_traffic_policy_version(input);
self
}
}
}
| 45.530332 | 475 | 0.582729 |
215aad31ca2e80f0ee6af7518d017e63fb7ae0f6
| 130,392 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateAssessmentTargetOutput {}
impl std::fmt::Debug for UpdateAssessmentTargetOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateAssessmentTargetOutput");
formatter.finish()
}
}
/// See [`UpdateAssessmentTargetOutput`](crate::output::UpdateAssessmentTargetOutput)
pub mod update_assessment_target_output {
/// A builder for [`UpdateAssessmentTargetOutput`](crate::output::UpdateAssessmentTargetOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`UpdateAssessmentTargetOutput`](crate::output::UpdateAssessmentTargetOutput)
pub fn build(self) -> crate::output::UpdateAssessmentTargetOutput {
crate::output::UpdateAssessmentTargetOutput {}
}
}
}
impl UpdateAssessmentTargetOutput {
/// Creates a new builder-style object to manufacture [`UpdateAssessmentTargetOutput`](crate::output::UpdateAssessmentTargetOutput)
pub fn builder() -> crate::output::update_assessment_target_output::Builder {
crate::output::update_assessment_target_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UnsubscribeFromEventOutput {}
impl std::fmt::Debug for UnsubscribeFromEventOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UnsubscribeFromEventOutput");
formatter.finish()
}
}
/// See [`UnsubscribeFromEventOutput`](crate::output::UnsubscribeFromEventOutput)
pub mod unsubscribe_from_event_output {
/// A builder for [`UnsubscribeFromEventOutput`](crate::output::UnsubscribeFromEventOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`UnsubscribeFromEventOutput`](crate::output::UnsubscribeFromEventOutput)
pub fn build(self) -> crate::output::UnsubscribeFromEventOutput {
crate::output::UnsubscribeFromEventOutput {}
}
}
}
impl UnsubscribeFromEventOutput {
/// Creates a new builder-style object to manufacture [`UnsubscribeFromEventOutput`](crate::output::UnsubscribeFromEventOutput)
pub fn builder() -> crate::output::unsubscribe_from_event_output::Builder {
crate::output::unsubscribe_from_event_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SubscribeToEventOutput {}
impl std::fmt::Debug for SubscribeToEventOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SubscribeToEventOutput");
formatter.finish()
}
}
/// See [`SubscribeToEventOutput`](crate::output::SubscribeToEventOutput)
pub mod subscribe_to_event_output {
/// A builder for [`SubscribeToEventOutput`](crate::output::SubscribeToEventOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`SubscribeToEventOutput`](crate::output::SubscribeToEventOutput)
pub fn build(self) -> crate::output::SubscribeToEventOutput {
crate::output::SubscribeToEventOutput {}
}
}
}
impl SubscribeToEventOutput {
/// Creates a new builder-style object to manufacture [`SubscribeToEventOutput`](crate::output::SubscribeToEventOutput)
pub fn builder() -> crate::output::subscribe_to_event_output::Builder {
crate::output::subscribe_to_event_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StopAssessmentRunOutput {}
impl std::fmt::Debug for StopAssessmentRunOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StopAssessmentRunOutput");
formatter.finish()
}
}
/// See [`StopAssessmentRunOutput`](crate::output::StopAssessmentRunOutput)
pub mod stop_assessment_run_output {
/// A builder for [`StopAssessmentRunOutput`](crate::output::StopAssessmentRunOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`StopAssessmentRunOutput`](crate::output::StopAssessmentRunOutput)
pub fn build(self) -> crate::output::StopAssessmentRunOutput {
crate::output::StopAssessmentRunOutput {}
}
}
}
impl StopAssessmentRunOutput {
/// Creates a new builder-style object to manufacture [`StopAssessmentRunOutput`](crate::output::StopAssessmentRunOutput)
pub fn builder() -> crate::output::stop_assessment_run_output::Builder {
crate::output::stop_assessment_run_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StartAssessmentRunOutput {
/// <p>The ARN of the assessment run that has been started.</p>
pub assessment_run_arn: std::option::Option<std::string::String>,
}
impl StartAssessmentRunOutput {
/// <p>The ARN of the assessment run that has been started.</p>
pub fn assessment_run_arn(&self) -> std::option::Option<&str> {
self.assessment_run_arn.as_deref()
}
}
impl std::fmt::Debug for StartAssessmentRunOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StartAssessmentRunOutput");
formatter.field("assessment_run_arn", &self.assessment_run_arn);
formatter.finish()
}
}
/// See [`StartAssessmentRunOutput`](crate::output::StartAssessmentRunOutput)
pub mod start_assessment_run_output {
/// A builder for [`StartAssessmentRunOutput`](crate::output::StartAssessmentRunOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_run_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN of the assessment run that has been started.</p>
pub fn assessment_run_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.assessment_run_arn = Some(input.into());
self
}
/// <p>The ARN of the assessment run that has been started.</p>
pub fn set_assessment_run_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.assessment_run_arn = input;
self
}
/// Consumes the builder and constructs a [`StartAssessmentRunOutput`](crate::output::StartAssessmentRunOutput)
pub fn build(self) -> crate::output::StartAssessmentRunOutput {
crate::output::StartAssessmentRunOutput {
assessment_run_arn: self.assessment_run_arn,
}
}
}
}
impl StartAssessmentRunOutput {
/// Creates a new builder-style object to manufacture [`StartAssessmentRunOutput`](crate::output::StartAssessmentRunOutput)
pub fn builder() -> crate::output::start_assessment_run_output::Builder {
crate::output::start_assessment_run_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SetTagsForResourceOutput {}
impl std::fmt::Debug for SetTagsForResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SetTagsForResourceOutput");
formatter.finish()
}
}
/// See [`SetTagsForResourceOutput`](crate::output::SetTagsForResourceOutput)
pub mod set_tags_for_resource_output {
/// A builder for [`SetTagsForResourceOutput`](crate::output::SetTagsForResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`SetTagsForResourceOutput`](crate::output::SetTagsForResourceOutput)
pub fn build(self) -> crate::output::SetTagsForResourceOutput {
crate::output::SetTagsForResourceOutput {}
}
}
}
impl SetTagsForResourceOutput {
/// Creates a new builder-style object to manufacture [`SetTagsForResourceOutput`](crate::output::SetTagsForResourceOutput)
pub fn builder() -> crate::output::set_tags_for_resource_output::Builder {
crate::output::set_tags_for_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RemoveAttributesFromFindingsOutput {
/// <p>Attributes details that cannot be described. An error code is provided for each
/// failed item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl RemoveAttributesFromFindingsOutput {
/// <p>Attributes details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for RemoveAttributesFromFindingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RemoveAttributesFromFindingsOutput");
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`RemoveAttributesFromFindingsOutput`](crate::output::RemoveAttributesFromFindingsOutput)
pub mod remove_attributes_from_findings_output {
/// A builder for [`RemoveAttributesFromFindingsOutput`](crate::output::RemoveAttributesFromFindingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Attributes details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Attributes details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`RemoveAttributesFromFindingsOutput`](crate::output::RemoveAttributesFromFindingsOutput)
pub fn build(self) -> crate::output::RemoveAttributesFromFindingsOutput {
crate::output::RemoveAttributesFromFindingsOutput {
failed_items: self.failed_items,
}
}
}
}
impl RemoveAttributesFromFindingsOutput {
/// Creates a new builder-style object to manufacture [`RemoveAttributesFromFindingsOutput`](crate::output::RemoveAttributesFromFindingsOutput)
pub fn builder() -> crate::output::remove_attributes_from_findings_output::Builder {
crate::output::remove_attributes_from_findings_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RegisterCrossAccountAccessRoleOutput {}
impl std::fmt::Debug for RegisterCrossAccountAccessRoleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RegisterCrossAccountAccessRoleOutput");
formatter.finish()
}
}
/// See [`RegisterCrossAccountAccessRoleOutput`](crate::output::RegisterCrossAccountAccessRoleOutput)
pub mod register_cross_account_access_role_output {
/// A builder for [`RegisterCrossAccountAccessRoleOutput`](crate::output::RegisterCrossAccountAccessRoleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`RegisterCrossAccountAccessRoleOutput`](crate::output::RegisterCrossAccountAccessRoleOutput)
pub fn build(self) -> crate::output::RegisterCrossAccountAccessRoleOutput {
crate::output::RegisterCrossAccountAccessRoleOutput {}
}
}
}
impl RegisterCrossAccountAccessRoleOutput {
/// Creates a new builder-style object to manufacture [`RegisterCrossAccountAccessRoleOutput`](crate::output::RegisterCrossAccountAccessRoleOutput)
pub fn builder() -> crate::output::register_cross_account_access_role_output::Builder {
crate::output::register_cross_account_access_role_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PreviewAgentsOutput {
/// <p>The resulting list of agents.</p>
pub agent_previews: std::option::Option<std::vec::Vec<crate::model::AgentPreview>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl PreviewAgentsOutput {
/// <p>The resulting list of agents.</p>
pub fn agent_previews(&self) -> std::option::Option<&[crate::model::AgentPreview]> {
self.agent_previews.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for PreviewAgentsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PreviewAgentsOutput");
formatter.field("agent_previews", &self.agent_previews);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`PreviewAgentsOutput`](crate::output::PreviewAgentsOutput)
pub mod preview_agents_output {
/// A builder for [`PreviewAgentsOutput`](crate::output::PreviewAgentsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) agent_previews: std::option::Option<std::vec::Vec<crate::model::AgentPreview>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `agent_previews`.
///
/// To override the contents of this collection use [`set_agent_previews`](Self::set_agent_previews).
///
/// <p>The resulting list of agents.</p>
pub fn agent_previews(mut self, input: impl Into<crate::model::AgentPreview>) -> Self {
let mut v = self.agent_previews.unwrap_or_default();
v.push(input.into());
self.agent_previews = Some(v);
self
}
/// <p>The resulting list of agents.</p>
pub fn set_agent_previews(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AgentPreview>>,
) -> Self {
self.agent_previews = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`PreviewAgentsOutput`](crate::output::PreviewAgentsOutput)
pub fn build(self) -> crate::output::PreviewAgentsOutput {
crate::output::PreviewAgentsOutput {
agent_previews: self.agent_previews,
next_token: self.next_token,
}
}
}
}
impl PreviewAgentsOutput {
/// Creates a new builder-style object to manufacture [`PreviewAgentsOutput`](crate::output::PreviewAgentsOutput)
pub fn builder() -> crate::output::preview_agents_output::Builder {
crate::output::preview_agents_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListTagsForResourceOutput {
/// <p>A collection of key and value pairs.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl ListTagsForResourceOutput {
/// <p>A collection of key and value pairs.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
}
impl std::fmt::Debug for ListTagsForResourceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListTagsForResourceOutput");
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub mod list_tags_for_resource_output {
/// A builder for [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A collection of key and value pairs.</p>
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
/// <p>A collection of key and value pairs.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub fn build(self) -> crate::output::ListTagsForResourceOutput {
crate::output::ListTagsForResourceOutput { tags: self.tags }
}
}
}
impl ListTagsForResourceOutput {
/// Creates a new builder-style object to manufacture [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput)
pub fn builder() -> crate::output::list_tags_for_resource_output::Builder {
crate::output::list_tags_for_resource_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListRulesPackagesOutput {
/// <p>The list of ARNs that specifies the rules packages returned by the action.</p>
pub rules_package_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListRulesPackagesOutput {
/// <p>The list of ARNs that specifies the rules packages returned by the action.</p>
pub fn rules_package_arns(&self) -> std::option::Option<&[std::string::String]> {
self.rules_package_arns.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListRulesPackagesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListRulesPackagesOutput");
formatter.field("rules_package_arns", &self.rules_package_arns);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListRulesPackagesOutput`](crate::output::ListRulesPackagesOutput)
pub mod list_rules_packages_output {
/// A builder for [`ListRulesPackagesOutput`](crate::output::ListRulesPackagesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rules_package_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `rules_package_arns`.
///
/// To override the contents of this collection use [`set_rules_package_arns`](Self::set_rules_package_arns).
///
/// <p>The list of ARNs that specifies the rules packages returned by the action.</p>
pub fn rules_package_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.rules_package_arns.unwrap_or_default();
v.push(input.into());
self.rules_package_arns = Some(v);
self
}
/// <p>The list of ARNs that specifies the rules packages returned by the action.</p>
pub fn set_rules_package_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.rules_package_arns = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListRulesPackagesOutput`](crate::output::ListRulesPackagesOutput)
pub fn build(self) -> crate::output::ListRulesPackagesOutput {
crate::output::ListRulesPackagesOutput {
rules_package_arns: self.rules_package_arns,
next_token: self.next_token,
}
}
}
}
impl ListRulesPackagesOutput {
/// Creates a new builder-style object to manufacture [`ListRulesPackagesOutput`](crate::output::ListRulesPackagesOutput)
pub fn builder() -> crate::output::list_rules_packages_output::Builder {
crate::output::list_rules_packages_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListFindingsOutput {
/// <p>A list of ARNs that specifies the findings returned by the action.</p>
pub finding_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListFindingsOutput {
/// <p>A list of ARNs that specifies the findings returned by the action.</p>
pub fn finding_arns(&self) -> std::option::Option<&[std::string::String]> {
self.finding_arns.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListFindingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListFindingsOutput");
formatter.field("finding_arns", &self.finding_arns);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListFindingsOutput`](crate::output::ListFindingsOutput)
pub mod list_findings_output {
/// A builder for [`ListFindingsOutput`](crate::output::ListFindingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) finding_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `finding_arns`.
///
/// To override the contents of this collection use [`set_finding_arns`](Self::set_finding_arns).
///
/// <p>A list of ARNs that specifies the findings returned by the action.</p>
pub fn finding_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.finding_arns.unwrap_or_default();
v.push(input.into());
self.finding_arns = Some(v);
self
}
/// <p>A list of ARNs that specifies the findings returned by the action.</p>
pub fn set_finding_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.finding_arns = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListFindingsOutput`](crate::output::ListFindingsOutput)
pub fn build(self) -> crate::output::ListFindingsOutput {
crate::output::ListFindingsOutput {
finding_arns: self.finding_arns,
next_token: self.next_token,
}
}
}
}
impl ListFindingsOutput {
/// Creates a new builder-style object to manufacture [`ListFindingsOutput`](crate::output::ListFindingsOutput)
pub fn builder() -> crate::output::list_findings_output::Builder {
crate::output::list_findings_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListExclusionsOutput {
/// <p>A list of exclusions' ARNs returned by the action.</p>
pub exclusion_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListExclusionsOutput {
/// <p>A list of exclusions' ARNs returned by the action.</p>
pub fn exclusion_arns(&self) -> std::option::Option<&[std::string::String]> {
self.exclusion_arns.as_deref()
}
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListExclusionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListExclusionsOutput");
formatter.field("exclusion_arns", &self.exclusion_arns);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListExclusionsOutput`](crate::output::ListExclusionsOutput)
pub mod list_exclusions_output {
/// A builder for [`ListExclusionsOutput`](crate::output::ListExclusionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) exclusion_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `exclusion_arns`.
///
/// To override the contents of this collection use [`set_exclusion_arns`](Self::set_exclusion_arns).
///
/// <p>A list of exclusions' ARNs returned by the action.</p>
pub fn exclusion_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.exclusion_arns.unwrap_or_default();
v.push(input.into());
self.exclusion_arns = Some(v);
self
}
/// <p>A list of exclusions' ARNs returned by the action.</p>
pub fn set_exclusion_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.exclusion_arns = input;
self
}
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListExclusionsOutput`](crate::output::ListExclusionsOutput)
pub fn build(self) -> crate::output::ListExclusionsOutput {
crate::output::ListExclusionsOutput {
exclusion_arns: self.exclusion_arns,
next_token: self.next_token,
}
}
}
}
impl ListExclusionsOutput {
/// Creates a new builder-style object to manufacture [`ListExclusionsOutput`](crate::output::ListExclusionsOutput)
pub fn builder() -> crate::output::list_exclusions_output::Builder {
crate::output::list_exclusions_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListEventSubscriptionsOutput {
/// <p>Details of the returned event subscriptions.</p>
pub subscriptions: std::option::Option<std::vec::Vec<crate::model::Subscription>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListEventSubscriptionsOutput {
/// <p>Details of the returned event subscriptions.</p>
pub fn subscriptions(&self) -> std::option::Option<&[crate::model::Subscription]> {
self.subscriptions.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListEventSubscriptionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListEventSubscriptionsOutput");
formatter.field("subscriptions", &self.subscriptions);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListEventSubscriptionsOutput`](crate::output::ListEventSubscriptionsOutput)
pub mod list_event_subscriptions_output {
/// A builder for [`ListEventSubscriptionsOutput`](crate::output::ListEventSubscriptionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) subscriptions: std::option::Option<std::vec::Vec<crate::model::Subscription>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `subscriptions`.
///
/// To override the contents of this collection use [`set_subscriptions`](Self::set_subscriptions).
///
/// <p>Details of the returned event subscriptions.</p>
pub fn subscriptions(mut self, input: impl Into<crate::model::Subscription>) -> Self {
let mut v = self.subscriptions.unwrap_or_default();
v.push(input.into());
self.subscriptions = Some(v);
self
}
/// <p>Details of the returned event subscriptions.</p>
pub fn set_subscriptions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Subscription>>,
) -> Self {
self.subscriptions = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListEventSubscriptionsOutput`](crate::output::ListEventSubscriptionsOutput)
pub fn build(self) -> crate::output::ListEventSubscriptionsOutput {
crate::output::ListEventSubscriptionsOutput {
subscriptions: self.subscriptions,
next_token: self.next_token,
}
}
}
}
impl ListEventSubscriptionsOutput {
/// Creates a new builder-style object to manufacture [`ListEventSubscriptionsOutput`](crate::output::ListEventSubscriptionsOutput)
pub fn builder() -> crate::output::list_event_subscriptions_output::Builder {
crate::output::list_event_subscriptions_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListAssessmentTemplatesOutput {
/// <p>A list of ARNs that specifies the assessment templates returned by the
/// action.</p>
pub assessment_template_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListAssessmentTemplatesOutput {
/// <p>A list of ARNs that specifies the assessment templates returned by the
/// action.</p>
pub fn assessment_template_arns(&self) -> std::option::Option<&[std::string::String]> {
self.assessment_template_arns.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListAssessmentTemplatesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListAssessmentTemplatesOutput");
formatter.field("assessment_template_arns", &self.assessment_template_arns);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListAssessmentTemplatesOutput`](crate::output::ListAssessmentTemplatesOutput)
pub mod list_assessment_templates_output {
/// A builder for [`ListAssessmentTemplatesOutput`](crate::output::ListAssessmentTemplatesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_template_arns:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `assessment_template_arns`.
///
/// To override the contents of this collection use [`set_assessment_template_arns`](Self::set_assessment_template_arns).
///
/// <p>A list of ARNs that specifies the assessment templates returned by the
/// action.</p>
pub fn assessment_template_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.assessment_template_arns.unwrap_or_default();
v.push(input.into());
self.assessment_template_arns = Some(v);
self
}
/// <p>A list of ARNs that specifies the assessment templates returned by the
/// action.</p>
pub fn set_assessment_template_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.assessment_template_arns = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListAssessmentTemplatesOutput`](crate::output::ListAssessmentTemplatesOutput)
pub fn build(self) -> crate::output::ListAssessmentTemplatesOutput {
crate::output::ListAssessmentTemplatesOutput {
assessment_template_arns: self.assessment_template_arns,
next_token: self.next_token,
}
}
}
}
impl ListAssessmentTemplatesOutput {
/// Creates a new builder-style object to manufacture [`ListAssessmentTemplatesOutput`](crate::output::ListAssessmentTemplatesOutput)
pub fn builder() -> crate::output::list_assessment_templates_output::Builder {
crate::output::list_assessment_templates_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListAssessmentTargetsOutput {
/// <p>A list of ARNs that specifies the assessment targets that are returned by the
/// action.</p>
pub assessment_target_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListAssessmentTargetsOutput {
/// <p>A list of ARNs that specifies the assessment targets that are returned by the
/// action.</p>
pub fn assessment_target_arns(&self) -> std::option::Option<&[std::string::String]> {
self.assessment_target_arns.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListAssessmentTargetsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListAssessmentTargetsOutput");
formatter.field("assessment_target_arns", &self.assessment_target_arns);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListAssessmentTargetsOutput`](crate::output::ListAssessmentTargetsOutput)
pub mod list_assessment_targets_output {
/// A builder for [`ListAssessmentTargetsOutput`](crate::output::ListAssessmentTargetsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_target_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `assessment_target_arns`.
///
/// To override the contents of this collection use [`set_assessment_target_arns`](Self::set_assessment_target_arns).
///
/// <p>A list of ARNs that specifies the assessment targets that are returned by the
/// action.</p>
pub fn assessment_target_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.assessment_target_arns.unwrap_or_default();
v.push(input.into());
self.assessment_target_arns = Some(v);
self
}
/// <p>A list of ARNs that specifies the assessment targets that are returned by the
/// action.</p>
pub fn set_assessment_target_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.assessment_target_arns = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListAssessmentTargetsOutput`](crate::output::ListAssessmentTargetsOutput)
pub fn build(self) -> crate::output::ListAssessmentTargetsOutput {
crate::output::ListAssessmentTargetsOutput {
assessment_target_arns: self.assessment_target_arns,
next_token: self.next_token,
}
}
}
}
impl ListAssessmentTargetsOutput {
/// Creates a new builder-style object to manufacture [`ListAssessmentTargetsOutput`](crate::output::ListAssessmentTargetsOutput)
pub fn builder() -> crate::output::list_assessment_targets_output::Builder {
crate::output::list_assessment_targets_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListAssessmentRunsOutput {
/// <p>A list of ARNs that specifies the assessment runs that are returned by the
/// action.</p>
pub assessment_run_arns: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListAssessmentRunsOutput {
/// <p>A list of ARNs that specifies the assessment runs that are returned by the
/// action.</p>
pub fn assessment_run_arns(&self) -> std::option::Option<&[std::string::String]> {
self.assessment_run_arns.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListAssessmentRunsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListAssessmentRunsOutput");
formatter.field("assessment_run_arns", &self.assessment_run_arns);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListAssessmentRunsOutput`](crate::output::ListAssessmentRunsOutput)
pub mod list_assessment_runs_output {
/// A builder for [`ListAssessmentRunsOutput`](crate::output::ListAssessmentRunsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_run_arns: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `assessment_run_arns`.
///
/// To override the contents of this collection use [`set_assessment_run_arns`](Self::set_assessment_run_arns).
///
/// <p>A list of ARNs that specifies the assessment runs that are returned by the
/// action.</p>
pub fn assessment_run_arns(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.assessment_run_arns.unwrap_or_default();
v.push(input.into());
self.assessment_run_arns = Some(v);
self
}
/// <p>A list of ARNs that specifies the assessment runs that are returned by the
/// action.</p>
pub fn set_assessment_run_arns(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.assessment_run_arns = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListAssessmentRunsOutput`](crate::output::ListAssessmentRunsOutput)
pub fn build(self) -> crate::output::ListAssessmentRunsOutput {
crate::output::ListAssessmentRunsOutput {
assessment_run_arns: self.assessment_run_arns,
next_token: self.next_token,
}
}
}
}
impl ListAssessmentRunsOutput {
/// Creates a new builder-style object to manufacture [`ListAssessmentRunsOutput`](crate::output::ListAssessmentRunsOutput)
pub fn builder() -> crate::output::list_assessment_runs_output::Builder {
crate::output::list_assessment_runs_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListAssessmentRunAgentsOutput {
/// <p>A list of ARNs that specifies the agents returned by the action.</p>
pub assessment_run_agents: std::option::Option<std::vec::Vec<crate::model::AssessmentRunAgent>>,
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl ListAssessmentRunAgentsOutput {
/// <p>A list of ARNs that specifies the agents returned by the action.</p>
pub fn assessment_run_agents(
&self,
) -> std::option::Option<&[crate::model::AssessmentRunAgent]> {
self.assessment_run_agents.as_deref()
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for ListAssessmentRunAgentsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListAssessmentRunAgentsOutput");
formatter.field("assessment_run_agents", &self.assessment_run_agents);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`ListAssessmentRunAgentsOutput`](crate::output::ListAssessmentRunAgentsOutput)
pub mod list_assessment_run_agents_output {
/// A builder for [`ListAssessmentRunAgentsOutput`](crate::output::ListAssessmentRunAgentsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_run_agents:
std::option::Option<std::vec::Vec<crate::model::AssessmentRunAgent>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// Appends an item to `assessment_run_agents`.
///
/// To override the contents of this collection use [`set_assessment_run_agents`](Self::set_assessment_run_agents).
///
/// <p>A list of ARNs that specifies the agents returned by the action.</p>
pub fn assessment_run_agents(
mut self,
input: impl Into<crate::model::AssessmentRunAgent>,
) -> Self {
let mut v = self.assessment_run_agents.unwrap_or_default();
v.push(input.into());
self.assessment_run_agents = Some(v);
self
}
/// <p>A list of ARNs that specifies the agents returned by the action.</p>
pub fn set_assessment_run_agents(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AssessmentRunAgent>>,
) -> Self {
self.assessment_run_agents = input;
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p> When a response is generated, if there is more data to be listed, this parameter is
/// present in the response and contains the value to use for the <b>nextToken</b> parameter in a subsequent pagination request. If there is no more
/// data to be listed, this parameter is set to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListAssessmentRunAgentsOutput`](crate::output::ListAssessmentRunAgentsOutput)
pub fn build(self) -> crate::output::ListAssessmentRunAgentsOutput {
crate::output::ListAssessmentRunAgentsOutput {
assessment_run_agents: self.assessment_run_agents,
next_token: self.next_token,
}
}
}
}
impl ListAssessmentRunAgentsOutput {
/// Creates a new builder-style object to manufacture [`ListAssessmentRunAgentsOutput`](crate::output::ListAssessmentRunAgentsOutput)
pub fn builder() -> crate::output::list_assessment_run_agents_output::Builder {
crate::output::list_assessment_run_agents_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetTelemetryMetadataOutput {
/// <p>Telemetry details.</p>
pub telemetry_metadata: std::option::Option<std::vec::Vec<crate::model::TelemetryMetadata>>,
}
impl GetTelemetryMetadataOutput {
/// <p>Telemetry details.</p>
pub fn telemetry_metadata(&self) -> std::option::Option<&[crate::model::TelemetryMetadata]> {
self.telemetry_metadata.as_deref()
}
}
impl std::fmt::Debug for GetTelemetryMetadataOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetTelemetryMetadataOutput");
formatter.field("telemetry_metadata", &self.telemetry_metadata);
formatter.finish()
}
}
/// See [`GetTelemetryMetadataOutput`](crate::output::GetTelemetryMetadataOutput)
pub mod get_telemetry_metadata_output {
/// A builder for [`GetTelemetryMetadataOutput`](crate::output::GetTelemetryMetadataOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) telemetry_metadata:
std::option::Option<std::vec::Vec<crate::model::TelemetryMetadata>>,
}
impl Builder {
/// Appends an item to `telemetry_metadata`.
///
/// To override the contents of this collection use [`set_telemetry_metadata`](Self::set_telemetry_metadata).
///
/// <p>Telemetry details.</p>
pub fn telemetry_metadata(
mut self,
input: impl Into<crate::model::TelemetryMetadata>,
) -> Self {
let mut v = self.telemetry_metadata.unwrap_or_default();
v.push(input.into());
self.telemetry_metadata = Some(v);
self
}
/// <p>Telemetry details.</p>
pub fn set_telemetry_metadata(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::TelemetryMetadata>>,
) -> Self {
self.telemetry_metadata = input;
self
}
/// Consumes the builder and constructs a [`GetTelemetryMetadataOutput`](crate::output::GetTelemetryMetadataOutput)
pub fn build(self) -> crate::output::GetTelemetryMetadataOutput {
crate::output::GetTelemetryMetadataOutput {
telemetry_metadata: self.telemetry_metadata,
}
}
}
}
impl GetTelemetryMetadataOutput {
/// Creates a new builder-style object to manufacture [`GetTelemetryMetadataOutput`](crate::output::GetTelemetryMetadataOutput)
pub fn builder() -> crate::output::get_telemetry_metadata_output::Builder {
crate::output::get_telemetry_metadata_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetExclusionsPreviewOutput {
/// <p>Specifies the status of the request to generate an exclusions preview.</p>
pub preview_status: std::option::Option<crate::model::PreviewStatus>,
/// <p>Information about the exclusions included in the preview.</p>
pub exclusion_previews: std::option::Option<std::vec::Vec<crate::model::ExclusionPreview>>,
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl GetExclusionsPreviewOutput {
/// <p>Specifies the status of the request to generate an exclusions preview.</p>
pub fn preview_status(&self) -> std::option::Option<&crate::model::PreviewStatus> {
self.preview_status.as_ref()
}
/// <p>Information about the exclusions included in the preview.</p>
pub fn exclusion_previews(&self) -> std::option::Option<&[crate::model::ExclusionPreview]> {
self.exclusion_previews.as_deref()
}
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
}
impl std::fmt::Debug for GetExclusionsPreviewOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetExclusionsPreviewOutput");
formatter.field("preview_status", &self.preview_status);
formatter.field("exclusion_previews", &self.exclusion_previews);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
/// See [`GetExclusionsPreviewOutput`](crate::output::GetExclusionsPreviewOutput)
pub mod get_exclusions_preview_output {
/// A builder for [`GetExclusionsPreviewOutput`](crate::output::GetExclusionsPreviewOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) preview_status: std::option::Option<crate::model::PreviewStatus>,
pub(crate) exclusion_previews:
std::option::Option<std::vec::Vec<crate::model::ExclusionPreview>>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Specifies the status of the request to generate an exclusions preview.</p>
pub fn preview_status(mut self, input: crate::model::PreviewStatus) -> Self {
self.preview_status = Some(input);
self
}
/// <p>Specifies the status of the request to generate an exclusions preview.</p>
pub fn set_preview_status(
mut self,
input: std::option::Option<crate::model::PreviewStatus>,
) -> Self {
self.preview_status = input;
self
}
/// Appends an item to `exclusion_previews`.
///
/// To override the contents of this collection use [`set_exclusion_previews`](Self::set_exclusion_previews).
///
/// <p>Information about the exclusions included in the preview.</p>
pub fn exclusion_previews(
mut self,
input: impl Into<crate::model::ExclusionPreview>,
) -> Self {
let mut v = self.exclusion_previews.unwrap_or_default();
v.push(input.into());
self.exclusion_previews = Some(v);
self
}
/// <p>Information about the exclusions included in the preview.</p>
pub fn set_exclusion_previews(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ExclusionPreview>>,
) -> Self {
self.exclusion_previews = input;
self
}
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>When a response is generated, if there is more data to be listed, this parameters is
/// present in the response and contains the value to use for the nextToken parameter in a
/// subsequent pagination request. If there is no more data to be listed, this parameter is set
/// to null.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`GetExclusionsPreviewOutput`](crate::output::GetExclusionsPreviewOutput)
pub fn build(self) -> crate::output::GetExclusionsPreviewOutput {
crate::output::GetExclusionsPreviewOutput {
preview_status: self.preview_status,
exclusion_previews: self.exclusion_previews,
next_token: self.next_token,
}
}
}
}
impl GetExclusionsPreviewOutput {
/// Creates a new builder-style object to manufacture [`GetExclusionsPreviewOutput`](crate::output::GetExclusionsPreviewOutput)
pub fn builder() -> crate::output::get_exclusions_preview_output::Builder {
crate::output::get_exclusions_preview_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetAssessmentReportOutput {
/// <p>Specifies the status of the request to generate an assessment report. </p>
pub status: std::option::Option<crate::model::ReportStatus>,
/// <p>Specifies the URL where you can find the generated assessment report. This parameter
/// is only returned if the report is successfully generated.</p>
pub url: std::option::Option<std::string::String>,
}
impl GetAssessmentReportOutput {
/// <p>Specifies the status of the request to generate an assessment report. </p>
pub fn status(&self) -> std::option::Option<&crate::model::ReportStatus> {
self.status.as_ref()
}
/// <p>Specifies the URL where you can find the generated assessment report. This parameter
/// is only returned if the report is successfully generated.</p>
pub fn url(&self) -> std::option::Option<&str> {
self.url.as_deref()
}
}
impl std::fmt::Debug for GetAssessmentReportOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetAssessmentReportOutput");
formatter.field("status", &self.status);
formatter.field("url", &self.url);
formatter.finish()
}
}
/// See [`GetAssessmentReportOutput`](crate::output::GetAssessmentReportOutput)
pub mod get_assessment_report_output {
/// A builder for [`GetAssessmentReportOutput`](crate::output::GetAssessmentReportOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::ReportStatus>,
pub(crate) url: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Specifies the status of the request to generate an assessment report. </p>
pub fn status(mut self, input: crate::model::ReportStatus) -> Self {
self.status = Some(input);
self
}
/// <p>Specifies the status of the request to generate an assessment report. </p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::ReportStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Specifies the URL where you can find the generated assessment report. This parameter
/// is only returned if the report is successfully generated.</p>
pub fn url(mut self, input: impl Into<std::string::String>) -> Self {
self.url = Some(input.into());
self
}
/// <p>Specifies the URL where you can find the generated assessment report. This parameter
/// is only returned if the report is successfully generated.</p>
pub fn set_url(mut self, input: std::option::Option<std::string::String>) -> Self {
self.url = input;
self
}
/// Consumes the builder and constructs a [`GetAssessmentReportOutput`](crate::output::GetAssessmentReportOutput)
pub fn build(self) -> crate::output::GetAssessmentReportOutput {
crate::output::GetAssessmentReportOutput {
status: self.status,
url: self.url,
}
}
}
}
impl GetAssessmentReportOutput {
/// Creates a new builder-style object to manufacture [`GetAssessmentReportOutput`](crate::output::GetAssessmentReportOutput)
pub fn builder() -> crate::output::get_assessment_report_output::Builder {
crate::output::get_assessment_report_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeRulesPackagesOutput {
/// <p>Information about the rules package.</p>
pub rules_packages: std::option::Option<std::vec::Vec<crate::model::RulesPackage>>,
/// <p>Rules package details that cannot be described. An error code is provided for each
/// failed item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeRulesPackagesOutput {
/// <p>Information about the rules package.</p>
pub fn rules_packages(&self) -> std::option::Option<&[crate::model::RulesPackage]> {
self.rules_packages.as_deref()
}
/// <p>Rules package details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeRulesPackagesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeRulesPackagesOutput");
formatter.field("rules_packages", &self.rules_packages);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeRulesPackagesOutput`](crate::output::DescribeRulesPackagesOutput)
pub mod describe_rules_packages_output {
/// A builder for [`DescribeRulesPackagesOutput`](crate::output::DescribeRulesPackagesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rules_packages: std::option::Option<std::vec::Vec<crate::model::RulesPackage>>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Appends an item to `rules_packages`.
///
/// To override the contents of this collection use [`set_rules_packages`](Self::set_rules_packages).
///
/// <p>Information about the rules package.</p>
pub fn rules_packages(mut self, input: impl Into<crate::model::RulesPackage>) -> Self {
let mut v = self.rules_packages.unwrap_or_default();
v.push(input.into());
self.rules_packages = Some(v);
self
}
/// <p>Information about the rules package.</p>
pub fn set_rules_packages(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::RulesPackage>>,
) -> Self {
self.rules_packages = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Rules package details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Rules package details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeRulesPackagesOutput`](crate::output::DescribeRulesPackagesOutput)
pub fn build(self) -> crate::output::DescribeRulesPackagesOutput {
crate::output::DescribeRulesPackagesOutput {
rules_packages: self.rules_packages,
failed_items: self.failed_items,
}
}
}
}
impl DescribeRulesPackagesOutput {
/// Creates a new builder-style object to manufacture [`DescribeRulesPackagesOutput`](crate::output::DescribeRulesPackagesOutput)
pub fn builder() -> crate::output::describe_rules_packages_output::Builder {
crate::output::describe_rules_packages_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeResourceGroupsOutput {
/// <p>Information about a resource group.</p>
pub resource_groups: std::option::Option<std::vec::Vec<crate::model::ResourceGroup>>,
/// <p>Resource group details that cannot be described. An error code is provided for each
/// failed item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeResourceGroupsOutput {
/// <p>Information about a resource group.</p>
pub fn resource_groups(&self) -> std::option::Option<&[crate::model::ResourceGroup]> {
self.resource_groups.as_deref()
}
/// <p>Resource group details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeResourceGroupsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeResourceGroupsOutput");
formatter.field("resource_groups", &self.resource_groups);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeResourceGroupsOutput`](crate::output::DescribeResourceGroupsOutput)
pub mod describe_resource_groups_output {
/// A builder for [`DescribeResourceGroupsOutput`](crate::output::DescribeResourceGroupsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_groups: std::option::Option<std::vec::Vec<crate::model::ResourceGroup>>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Appends an item to `resource_groups`.
///
/// To override the contents of this collection use [`set_resource_groups`](Self::set_resource_groups).
///
/// <p>Information about a resource group.</p>
pub fn resource_groups(mut self, input: impl Into<crate::model::ResourceGroup>) -> Self {
let mut v = self.resource_groups.unwrap_or_default();
v.push(input.into());
self.resource_groups = Some(v);
self
}
/// <p>Information about a resource group.</p>
pub fn set_resource_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ResourceGroup>>,
) -> Self {
self.resource_groups = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Resource group details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Resource group details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeResourceGroupsOutput`](crate::output::DescribeResourceGroupsOutput)
pub fn build(self) -> crate::output::DescribeResourceGroupsOutput {
crate::output::DescribeResourceGroupsOutput {
resource_groups: self.resource_groups,
failed_items: self.failed_items,
}
}
}
}
impl DescribeResourceGroupsOutput {
/// Creates a new builder-style object to manufacture [`DescribeResourceGroupsOutput`](crate::output::DescribeResourceGroupsOutput)
pub fn builder() -> crate::output::describe_resource_groups_output::Builder {
crate::output::describe_resource_groups_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeFindingsOutput {
/// <p>Information about the finding.</p>
pub findings: std::option::Option<std::vec::Vec<crate::model::Finding>>,
/// <p>Finding details that cannot be described. An error code is provided for each failed
/// item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeFindingsOutput {
/// <p>Information about the finding.</p>
pub fn findings(&self) -> std::option::Option<&[crate::model::Finding]> {
self.findings.as_deref()
}
/// <p>Finding details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeFindingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeFindingsOutput");
formatter.field("findings", &self.findings);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeFindingsOutput`](crate::output::DescribeFindingsOutput)
pub mod describe_findings_output {
/// A builder for [`DescribeFindingsOutput`](crate::output::DescribeFindingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) findings: std::option::Option<std::vec::Vec<crate::model::Finding>>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Appends an item to `findings`.
///
/// To override the contents of this collection use [`set_findings`](Self::set_findings).
///
/// <p>Information about the finding.</p>
pub fn findings(mut self, input: impl Into<crate::model::Finding>) -> Self {
let mut v = self.findings.unwrap_or_default();
v.push(input.into());
self.findings = Some(v);
self
}
/// <p>Information about the finding.</p>
pub fn set_findings(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Finding>>,
) -> Self {
self.findings = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Finding details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Finding details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeFindingsOutput`](crate::output::DescribeFindingsOutput)
pub fn build(self) -> crate::output::DescribeFindingsOutput {
crate::output::DescribeFindingsOutput {
findings: self.findings,
failed_items: self.failed_items,
}
}
}
}
impl DescribeFindingsOutput {
/// Creates a new builder-style object to manufacture [`DescribeFindingsOutput`](crate::output::DescribeFindingsOutput)
pub fn builder() -> crate::output::describe_findings_output::Builder {
crate::output::describe_findings_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeExclusionsOutput {
/// <p>Information about the exclusions.</p>
pub exclusions: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::Exclusion>,
>,
/// <p>Exclusion details that cannot be described. An error code is provided for each failed
/// item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeExclusionsOutput {
/// <p>Information about the exclusions.</p>
pub fn exclusions(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, crate::model::Exclusion>>
{
self.exclusions.as_ref()
}
/// <p>Exclusion details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeExclusionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeExclusionsOutput");
formatter.field("exclusions", &self.exclusions);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeExclusionsOutput`](crate::output::DescribeExclusionsOutput)
pub mod describe_exclusions_output {
/// A builder for [`DescribeExclusionsOutput`](crate::output::DescribeExclusionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) exclusions: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::Exclusion>,
>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Adds a key-value pair to `exclusions`.
///
/// To override the contents of this collection use [`set_exclusions`](Self::set_exclusions).
///
/// <p>Information about the exclusions.</p>
pub fn exclusions(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::Exclusion>,
) -> Self {
let mut hash_map = self.exclusions.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.exclusions = Some(hash_map);
self
}
/// <p>Information about the exclusions.</p>
pub fn set_exclusions(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::Exclusion>,
>,
) -> Self {
self.exclusions = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Exclusion details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Exclusion details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeExclusionsOutput`](crate::output::DescribeExclusionsOutput)
pub fn build(self) -> crate::output::DescribeExclusionsOutput {
crate::output::DescribeExclusionsOutput {
exclusions: self.exclusions,
failed_items: self.failed_items,
}
}
}
}
impl DescribeExclusionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeExclusionsOutput`](crate::output::DescribeExclusionsOutput)
pub fn builder() -> crate::output::describe_exclusions_output::Builder {
crate::output::describe_exclusions_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeCrossAccountAccessRoleOutput {
/// <p>The ARN that specifies the IAM role that Amazon Inspector uses to access your AWS
/// account.</p>
pub role_arn: std::option::Option<std::string::String>,
/// <p>A Boolean value that specifies whether the IAM role has the necessary policies
/// attached to enable Amazon Inspector to access your AWS account.</p>
pub valid: std::option::Option<bool>,
/// <p>The date when the cross-account access role was registered.</p>
pub registered_at: std::option::Option<aws_smithy_types::Instant>,
}
impl DescribeCrossAccountAccessRoleOutput {
/// <p>The ARN that specifies the IAM role that Amazon Inspector uses to access your AWS
/// account.</p>
pub fn role_arn(&self) -> std::option::Option<&str> {
self.role_arn.as_deref()
}
/// <p>A Boolean value that specifies whether the IAM role has the necessary policies
/// attached to enable Amazon Inspector to access your AWS account.</p>
pub fn valid(&self) -> std::option::Option<bool> {
self.valid
}
/// <p>The date when the cross-account access role was registered.</p>
pub fn registered_at(&self) -> std::option::Option<&aws_smithy_types::Instant> {
self.registered_at.as_ref()
}
}
impl std::fmt::Debug for DescribeCrossAccountAccessRoleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeCrossAccountAccessRoleOutput");
formatter.field("role_arn", &self.role_arn);
formatter.field("valid", &self.valid);
formatter.field("registered_at", &self.registered_at);
formatter.finish()
}
}
/// See [`DescribeCrossAccountAccessRoleOutput`](crate::output::DescribeCrossAccountAccessRoleOutput)
pub mod describe_cross_account_access_role_output {
/// A builder for [`DescribeCrossAccountAccessRoleOutput`](crate::output::DescribeCrossAccountAccessRoleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) role_arn: std::option::Option<std::string::String>,
pub(crate) valid: std::option::Option<bool>,
pub(crate) registered_at: std::option::Option<aws_smithy_types::Instant>,
}
impl Builder {
/// <p>The ARN that specifies the IAM role that Amazon Inspector uses to access your AWS
/// account.</p>
pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.role_arn = Some(input.into());
self
}
/// <p>The ARN that specifies the IAM role that Amazon Inspector uses to access your AWS
/// account.</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.role_arn = input;
self
}
/// <p>A Boolean value that specifies whether the IAM role has the necessary policies
/// attached to enable Amazon Inspector to access your AWS account.</p>
pub fn valid(mut self, input: bool) -> Self {
self.valid = Some(input);
self
}
/// <p>A Boolean value that specifies whether the IAM role has the necessary policies
/// attached to enable Amazon Inspector to access your AWS account.</p>
pub fn set_valid(mut self, input: std::option::Option<bool>) -> Self {
self.valid = input;
self
}
/// <p>The date when the cross-account access role was registered.</p>
pub fn registered_at(mut self, input: aws_smithy_types::Instant) -> Self {
self.registered_at = Some(input);
self
}
/// <p>The date when the cross-account access role was registered.</p>
pub fn set_registered_at(
mut self,
input: std::option::Option<aws_smithy_types::Instant>,
) -> Self {
self.registered_at = input;
self
}
/// Consumes the builder and constructs a [`DescribeCrossAccountAccessRoleOutput`](crate::output::DescribeCrossAccountAccessRoleOutput)
pub fn build(self) -> crate::output::DescribeCrossAccountAccessRoleOutput {
crate::output::DescribeCrossAccountAccessRoleOutput {
role_arn: self.role_arn,
valid: self.valid,
registered_at: self.registered_at,
}
}
}
}
impl DescribeCrossAccountAccessRoleOutput {
/// Creates a new builder-style object to manufacture [`DescribeCrossAccountAccessRoleOutput`](crate::output::DescribeCrossAccountAccessRoleOutput)
pub fn builder() -> crate::output::describe_cross_account_access_role_output::Builder {
crate::output::describe_cross_account_access_role_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAssessmentTemplatesOutput {
/// <p>Information about the assessment templates.</p>
pub assessment_templates: std::option::Option<std::vec::Vec<crate::model::AssessmentTemplate>>,
/// <p>Assessment template details that cannot be described. An error code is provided for
/// each failed item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeAssessmentTemplatesOutput {
/// <p>Information about the assessment templates.</p>
pub fn assessment_templates(&self) -> std::option::Option<&[crate::model::AssessmentTemplate]> {
self.assessment_templates.as_deref()
}
/// <p>Assessment template details that cannot be described. An error code is provided for
/// each failed item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeAssessmentTemplatesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAssessmentTemplatesOutput");
formatter.field("assessment_templates", &self.assessment_templates);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeAssessmentTemplatesOutput`](crate::output::DescribeAssessmentTemplatesOutput)
pub mod describe_assessment_templates_output {
/// A builder for [`DescribeAssessmentTemplatesOutput`](crate::output::DescribeAssessmentTemplatesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_templates:
std::option::Option<std::vec::Vec<crate::model::AssessmentTemplate>>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Appends an item to `assessment_templates`.
///
/// To override the contents of this collection use [`set_assessment_templates`](Self::set_assessment_templates).
///
/// <p>Information about the assessment templates.</p>
pub fn assessment_templates(
mut self,
input: impl Into<crate::model::AssessmentTemplate>,
) -> Self {
let mut v = self.assessment_templates.unwrap_or_default();
v.push(input.into());
self.assessment_templates = Some(v);
self
}
/// <p>Information about the assessment templates.</p>
pub fn set_assessment_templates(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AssessmentTemplate>>,
) -> Self {
self.assessment_templates = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Assessment template details that cannot be described. An error code is provided for
/// each failed item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Assessment template details that cannot be described. An error code is provided for
/// each failed item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeAssessmentTemplatesOutput`](crate::output::DescribeAssessmentTemplatesOutput)
pub fn build(self) -> crate::output::DescribeAssessmentTemplatesOutput {
crate::output::DescribeAssessmentTemplatesOutput {
assessment_templates: self.assessment_templates,
failed_items: self.failed_items,
}
}
}
}
impl DescribeAssessmentTemplatesOutput {
/// Creates a new builder-style object to manufacture [`DescribeAssessmentTemplatesOutput`](crate::output::DescribeAssessmentTemplatesOutput)
pub fn builder() -> crate::output::describe_assessment_templates_output::Builder {
crate::output::describe_assessment_templates_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAssessmentTargetsOutput {
/// <p>Information about the assessment targets.</p>
pub assessment_targets: std::option::Option<std::vec::Vec<crate::model::AssessmentTarget>>,
/// <p>Assessment target details that cannot be described. An error code is provided for
/// each failed item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeAssessmentTargetsOutput {
/// <p>Information about the assessment targets.</p>
pub fn assessment_targets(&self) -> std::option::Option<&[crate::model::AssessmentTarget]> {
self.assessment_targets.as_deref()
}
/// <p>Assessment target details that cannot be described. An error code is provided for
/// each failed item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeAssessmentTargetsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAssessmentTargetsOutput");
formatter.field("assessment_targets", &self.assessment_targets);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeAssessmentTargetsOutput`](crate::output::DescribeAssessmentTargetsOutput)
pub mod describe_assessment_targets_output {
/// A builder for [`DescribeAssessmentTargetsOutput`](crate::output::DescribeAssessmentTargetsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_targets:
std::option::Option<std::vec::Vec<crate::model::AssessmentTarget>>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Appends an item to `assessment_targets`.
///
/// To override the contents of this collection use [`set_assessment_targets`](Self::set_assessment_targets).
///
/// <p>Information about the assessment targets.</p>
pub fn assessment_targets(
mut self,
input: impl Into<crate::model::AssessmentTarget>,
) -> Self {
let mut v = self.assessment_targets.unwrap_or_default();
v.push(input.into());
self.assessment_targets = Some(v);
self
}
/// <p>Information about the assessment targets.</p>
pub fn set_assessment_targets(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AssessmentTarget>>,
) -> Self {
self.assessment_targets = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Assessment target details that cannot be described. An error code is provided for
/// each failed item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Assessment target details that cannot be described. An error code is provided for
/// each failed item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeAssessmentTargetsOutput`](crate::output::DescribeAssessmentTargetsOutput)
pub fn build(self) -> crate::output::DescribeAssessmentTargetsOutput {
crate::output::DescribeAssessmentTargetsOutput {
assessment_targets: self.assessment_targets,
failed_items: self.failed_items,
}
}
}
}
impl DescribeAssessmentTargetsOutput {
/// Creates a new builder-style object to manufacture [`DescribeAssessmentTargetsOutput`](crate::output::DescribeAssessmentTargetsOutput)
pub fn builder() -> crate::output::describe_assessment_targets_output::Builder {
crate::output::describe_assessment_targets_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAssessmentRunsOutput {
/// <p>Information about the assessment run.</p>
pub assessment_runs: std::option::Option<std::vec::Vec<crate::model::AssessmentRun>>,
/// <p>Assessment run details that cannot be described. An error code is provided for each
/// failed item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl DescribeAssessmentRunsOutput {
/// <p>Information about the assessment run.</p>
pub fn assessment_runs(&self) -> std::option::Option<&[crate::model::AssessmentRun]> {
self.assessment_runs.as_deref()
}
/// <p>Assessment run details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for DescribeAssessmentRunsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAssessmentRunsOutput");
formatter.field("assessment_runs", &self.assessment_runs);
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`DescribeAssessmentRunsOutput`](crate::output::DescribeAssessmentRunsOutput)
pub mod describe_assessment_runs_output {
/// A builder for [`DescribeAssessmentRunsOutput`](crate::output::DescribeAssessmentRunsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_runs: std::option::Option<std::vec::Vec<crate::model::AssessmentRun>>,
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Appends an item to `assessment_runs`.
///
/// To override the contents of this collection use [`set_assessment_runs`](Self::set_assessment_runs).
///
/// <p>Information about the assessment run.</p>
pub fn assessment_runs(mut self, input: impl Into<crate::model::AssessmentRun>) -> Self {
let mut v = self.assessment_runs.unwrap_or_default();
v.push(input.into());
self.assessment_runs = Some(v);
self
}
/// <p>Information about the assessment run.</p>
pub fn set_assessment_runs(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AssessmentRun>>,
) -> Self {
self.assessment_runs = input;
self
}
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Assessment run details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Assessment run details that cannot be described. An error code is provided for each
/// failed item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`DescribeAssessmentRunsOutput`](crate::output::DescribeAssessmentRunsOutput)
pub fn build(self) -> crate::output::DescribeAssessmentRunsOutput {
crate::output::DescribeAssessmentRunsOutput {
assessment_runs: self.assessment_runs,
failed_items: self.failed_items,
}
}
}
}
impl DescribeAssessmentRunsOutput {
/// Creates a new builder-style object to manufacture [`DescribeAssessmentRunsOutput`](crate::output::DescribeAssessmentRunsOutput)
pub fn builder() -> crate::output::describe_assessment_runs_output::Builder {
crate::output::describe_assessment_runs_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteAssessmentTemplateOutput {}
impl std::fmt::Debug for DeleteAssessmentTemplateOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteAssessmentTemplateOutput");
formatter.finish()
}
}
/// See [`DeleteAssessmentTemplateOutput`](crate::output::DeleteAssessmentTemplateOutput)
pub mod delete_assessment_template_output {
/// A builder for [`DeleteAssessmentTemplateOutput`](crate::output::DeleteAssessmentTemplateOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteAssessmentTemplateOutput`](crate::output::DeleteAssessmentTemplateOutput)
pub fn build(self) -> crate::output::DeleteAssessmentTemplateOutput {
crate::output::DeleteAssessmentTemplateOutput {}
}
}
}
impl DeleteAssessmentTemplateOutput {
/// Creates a new builder-style object to manufacture [`DeleteAssessmentTemplateOutput`](crate::output::DeleteAssessmentTemplateOutput)
pub fn builder() -> crate::output::delete_assessment_template_output::Builder {
crate::output::delete_assessment_template_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteAssessmentTargetOutput {}
impl std::fmt::Debug for DeleteAssessmentTargetOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteAssessmentTargetOutput");
formatter.finish()
}
}
/// See [`DeleteAssessmentTargetOutput`](crate::output::DeleteAssessmentTargetOutput)
pub mod delete_assessment_target_output {
/// A builder for [`DeleteAssessmentTargetOutput`](crate::output::DeleteAssessmentTargetOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteAssessmentTargetOutput`](crate::output::DeleteAssessmentTargetOutput)
pub fn build(self) -> crate::output::DeleteAssessmentTargetOutput {
crate::output::DeleteAssessmentTargetOutput {}
}
}
}
impl DeleteAssessmentTargetOutput {
/// Creates a new builder-style object to manufacture [`DeleteAssessmentTargetOutput`](crate::output::DeleteAssessmentTargetOutput)
pub fn builder() -> crate::output::delete_assessment_target_output::Builder {
crate::output::delete_assessment_target_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteAssessmentRunOutput {}
impl std::fmt::Debug for DeleteAssessmentRunOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteAssessmentRunOutput");
formatter.finish()
}
}
/// See [`DeleteAssessmentRunOutput`](crate::output::DeleteAssessmentRunOutput)
pub mod delete_assessment_run_output {
/// A builder for [`DeleteAssessmentRunOutput`](crate::output::DeleteAssessmentRunOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteAssessmentRunOutput`](crate::output::DeleteAssessmentRunOutput)
pub fn build(self) -> crate::output::DeleteAssessmentRunOutput {
crate::output::DeleteAssessmentRunOutput {}
}
}
}
impl DeleteAssessmentRunOutput {
/// Creates a new builder-style object to manufacture [`DeleteAssessmentRunOutput`](crate::output::DeleteAssessmentRunOutput)
pub fn builder() -> crate::output::delete_assessment_run_output::Builder {
crate::output::delete_assessment_run_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateResourceGroupOutput {
/// <p>The ARN that specifies the resource group that is created.</p>
pub resource_group_arn: std::option::Option<std::string::String>,
}
impl CreateResourceGroupOutput {
/// <p>The ARN that specifies the resource group that is created.</p>
pub fn resource_group_arn(&self) -> std::option::Option<&str> {
self.resource_group_arn.as_deref()
}
}
impl std::fmt::Debug for CreateResourceGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateResourceGroupOutput");
formatter.field("resource_group_arn", &self.resource_group_arn);
formatter.finish()
}
}
/// See [`CreateResourceGroupOutput`](crate::output::CreateResourceGroupOutput)
pub mod create_resource_group_output {
/// A builder for [`CreateResourceGroupOutput`](crate::output::CreateResourceGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_group_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN that specifies the resource group that is created.</p>
pub fn resource_group_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_group_arn = Some(input.into());
self
}
/// <p>The ARN that specifies the resource group that is created.</p>
pub fn set_resource_group_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_group_arn = input;
self
}
/// Consumes the builder and constructs a [`CreateResourceGroupOutput`](crate::output::CreateResourceGroupOutput)
pub fn build(self) -> crate::output::CreateResourceGroupOutput {
crate::output::CreateResourceGroupOutput {
resource_group_arn: self.resource_group_arn,
}
}
}
}
impl CreateResourceGroupOutput {
/// Creates a new builder-style object to manufacture [`CreateResourceGroupOutput`](crate::output::CreateResourceGroupOutput)
pub fn builder() -> crate::output::create_resource_group_output::Builder {
crate::output::create_resource_group_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateExclusionsPreviewOutput {
/// <p>Specifies the unique identifier of the requested exclusions preview. You can use the
/// unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview
/// API.</p>
pub preview_token: std::option::Option<std::string::String>,
}
impl CreateExclusionsPreviewOutput {
/// <p>Specifies the unique identifier of the requested exclusions preview. You can use the
/// unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview
/// API.</p>
pub fn preview_token(&self) -> std::option::Option<&str> {
self.preview_token.as_deref()
}
}
impl std::fmt::Debug for CreateExclusionsPreviewOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateExclusionsPreviewOutput");
formatter.field("preview_token", &self.preview_token);
formatter.finish()
}
}
/// See [`CreateExclusionsPreviewOutput`](crate::output::CreateExclusionsPreviewOutput)
pub mod create_exclusions_preview_output {
/// A builder for [`CreateExclusionsPreviewOutput`](crate::output::CreateExclusionsPreviewOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) preview_token: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Specifies the unique identifier of the requested exclusions preview. You can use the
/// unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview
/// API.</p>
pub fn preview_token(mut self, input: impl Into<std::string::String>) -> Self {
self.preview_token = Some(input.into());
self
}
/// <p>Specifies the unique identifier of the requested exclusions preview. You can use the
/// unique identifier to retrieve the exclusions preview when running the GetExclusionsPreview
/// API.</p>
pub fn set_preview_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.preview_token = input;
self
}
/// Consumes the builder and constructs a [`CreateExclusionsPreviewOutput`](crate::output::CreateExclusionsPreviewOutput)
pub fn build(self) -> crate::output::CreateExclusionsPreviewOutput {
crate::output::CreateExclusionsPreviewOutput {
preview_token: self.preview_token,
}
}
}
}
impl CreateExclusionsPreviewOutput {
/// Creates a new builder-style object to manufacture [`CreateExclusionsPreviewOutput`](crate::output::CreateExclusionsPreviewOutput)
pub fn builder() -> crate::output::create_exclusions_preview_output::Builder {
crate::output::create_exclusions_preview_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateAssessmentTemplateOutput {
/// <p>The ARN that specifies the assessment template that is created.</p>
pub assessment_template_arn: std::option::Option<std::string::String>,
}
impl CreateAssessmentTemplateOutput {
/// <p>The ARN that specifies the assessment template that is created.</p>
pub fn assessment_template_arn(&self) -> std::option::Option<&str> {
self.assessment_template_arn.as_deref()
}
}
impl std::fmt::Debug for CreateAssessmentTemplateOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateAssessmentTemplateOutput");
formatter.field("assessment_template_arn", &self.assessment_template_arn);
formatter.finish()
}
}
/// See [`CreateAssessmentTemplateOutput`](crate::output::CreateAssessmentTemplateOutput)
pub mod create_assessment_template_output {
/// A builder for [`CreateAssessmentTemplateOutput`](crate::output::CreateAssessmentTemplateOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_template_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN that specifies the assessment template that is created.</p>
pub fn assessment_template_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.assessment_template_arn = Some(input.into());
self
}
/// <p>The ARN that specifies the assessment template that is created.</p>
pub fn set_assessment_template_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.assessment_template_arn = input;
self
}
/// Consumes the builder and constructs a [`CreateAssessmentTemplateOutput`](crate::output::CreateAssessmentTemplateOutput)
pub fn build(self) -> crate::output::CreateAssessmentTemplateOutput {
crate::output::CreateAssessmentTemplateOutput {
assessment_template_arn: self.assessment_template_arn,
}
}
}
}
impl CreateAssessmentTemplateOutput {
/// Creates a new builder-style object to manufacture [`CreateAssessmentTemplateOutput`](crate::output::CreateAssessmentTemplateOutput)
pub fn builder() -> crate::output::create_assessment_template_output::Builder {
crate::output::create_assessment_template_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateAssessmentTargetOutput {
/// <p>The ARN that specifies the assessment target that is created.</p>
pub assessment_target_arn: std::option::Option<std::string::String>,
}
impl CreateAssessmentTargetOutput {
/// <p>The ARN that specifies the assessment target that is created.</p>
pub fn assessment_target_arn(&self) -> std::option::Option<&str> {
self.assessment_target_arn.as_deref()
}
}
impl std::fmt::Debug for CreateAssessmentTargetOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateAssessmentTargetOutput");
formatter.field("assessment_target_arn", &self.assessment_target_arn);
formatter.finish()
}
}
/// See [`CreateAssessmentTargetOutput`](crate::output::CreateAssessmentTargetOutput)
pub mod create_assessment_target_output {
/// A builder for [`CreateAssessmentTargetOutput`](crate::output::CreateAssessmentTargetOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assessment_target_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN that specifies the assessment target that is created.</p>
pub fn assessment_target_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.assessment_target_arn = Some(input.into());
self
}
/// <p>The ARN that specifies the assessment target that is created.</p>
pub fn set_assessment_target_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.assessment_target_arn = input;
self
}
/// Consumes the builder and constructs a [`CreateAssessmentTargetOutput`](crate::output::CreateAssessmentTargetOutput)
pub fn build(self) -> crate::output::CreateAssessmentTargetOutput {
crate::output::CreateAssessmentTargetOutput {
assessment_target_arn: self.assessment_target_arn,
}
}
}
}
impl CreateAssessmentTargetOutput {
/// Creates a new builder-style object to manufacture [`CreateAssessmentTargetOutput`](crate::output::CreateAssessmentTargetOutput)
pub fn builder() -> crate::output::create_assessment_target_output::Builder {
crate::output::create_assessment_target_output::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AddAttributesToFindingsOutput {
/// <p>Attribute details that cannot be described. An error code is provided for each failed
/// item.</p>
pub failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl AddAttributesToFindingsOutput {
/// <p>Attribute details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn failed_items(
&self,
) -> std::option::Option<
&std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
> {
self.failed_items.as_ref()
}
}
impl std::fmt::Debug for AddAttributesToFindingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AddAttributesToFindingsOutput");
formatter.field("failed_items", &self.failed_items);
formatter.finish()
}
}
/// See [`AddAttributesToFindingsOutput`](crate::output::AddAttributesToFindingsOutput)
pub mod add_attributes_to_findings_output {
/// A builder for [`AddAttributesToFindingsOutput`](crate::output::AddAttributesToFindingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) failed_items: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
}
impl Builder {
/// Adds a key-value pair to `failed_items`.
///
/// To override the contents of this collection use [`set_failed_items`](Self::set_failed_items).
///
/// <p>Attribute details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn failed_items(
mut self,
k: impl Into<std::string::String>,
v: impl Into<crate::model::FailedItemDetails>,
) -> Self {
let mut hash_map = self.failed_items.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.failed_items = Some(hash_map);
self
}
/// <p>Attribute details that cannot be described. An error code is provided for each failed
/// item.</p>
pub fn set_failed_items(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, crate::model::FailedItemDetails>,
>,
) -> Self {
self.failed_items = input;
self
}
/// Consumes the builder and constructs a [`AddAttributesToFindingsOutput`](crate::output::AddAttributesToFindingsOutput)
pub fn build(self) -> crate::output::AddAttributesToFindingsOutput {
crate::output::AddAttributesToFindingsOutput {
failed_items: self.failed_items,
}
}
}
}
impl AddAttributesToFindingsOutput {
/// Creates a new builder-style object to manufacture [`AddAttributesToFindingsOutput`](crate::output::AddAttributesToFindingsOutput)
pub fn builder() -> crate::output::add_attributes_to_findings_output::Builder {
crate::output::add_attributes_to_findings_output::Builder::default()
}
}
| 47.005047 | 156 | 0.655201 |
38301d2de2f5f87671a3ea3482346c92d4da62b2
| 7,421 |
/*
Library crate for the beejlander package
*/
use std::env;
pub struct Config {
pub include_sb: bool,
pub cmn_price: f32,
pub unc_price: f32,
pub rare_price: f32,
pub land_price: f32
}
impl Config {
pub fn new() -> Config {
let include_sb = fetch_bool("SILVER");
let cmn_price = fetch_float("COMMON", 0.10);
let unc_price = fetch_float("UNCOMMON", 0.10);
let rare_price = fetch_float("RARE", 0.25);
let land_price = fetch_float("LAND", 0.20);
Config {
include_sb,
cmn_price,
unc_price,
rare_price,
land_price
}
}
}
fn fetch_bool(var: &str) -> bool {
match env::var(var) {
Ok(r) => match r.parse::<bool>() {
Ok(b) => b,
Err(_e) => true
},
Err(_e) => true
}
}
fn fetch_float(var: &str, default: f32) -> f32 {
match env::var(var) {
Ok(r) => match r.parse::<f32>() {
Ok(f) => f,
Err(_e) => default
},
Err(_e) => default
}
}
pub struct Card {
name: String,
cmc: String,
type_line : String,
count: u8
}
pub mod query_gen {
pub struct Query{
pub rare: String,
pub other: String
}
pub fn generate_queries(config: &super::Config) -> Query {
Query {
rare: rare_query(config.include_sb, config.rare_price),
other: common_query(config.include_sb, config.cmn_price, config.unc_price, config.land_price)
}
}
fn pes(symbol: &str) -> &str {
// Percent encodes specific symbols
match symbol {
"(" => "%28",
")" => "%29",
"+" => "%2B",
":" => "%3A",
"=" => "%3D",
_ => "",
}
}
fn start_query(include_sb: bool) -> String {
if include_sb {
format!("q=-t{c}conspiracy+-t{c}contraption+{s}", c = pes(":"), s = sb(include_sb))
}
else {
format!("q=-t{c}conspiracy+-t{c}contraption", c = pes(":"))
}
}
fn end_query() -> String {
String::from("&format=text")
}
fn sb(include: bool) -> String {
// Include silver bordered cards
if include {
String::from("")
}
else {
format!("-border{}silver", pes(":"))
}
}
fn cmn(price: f32) -> String {
format!("-t{c}land+r{c}c+usd<{e}{p}", c = pes(":"), e = pes("="), p = price)
}
fn unc(price: f32) -> String {
format!("-t{c}land+r{c}u+usd<{e}{p}", c = pes(":"), e = pes("="), p = price)
}
fn land(price: f32) -> String {
format!("t{}land+r<r+usd<{}{}", pes(":"), pes("="), price)
}
fn rare(price: f32) -> String {
format!("r>u+usd<{}{}", pes("="), price)
}
fn common_query(include_sb: bool, cmn_price: f32, unc_price: f32, land_price: f32) -> String {
format!(
"{b}+{l}{l}{c}{r}+or+{l}{u}{r}+or+{l}{d}{r}{r}+{e}",
b = start_query(include_sb),
l = pes("("),
c = cmn(cmn_price),
r = pes(")"),
u = unc(unc_price),
d = land(land_price),
e = end_query()
)
}
fn rare_query(include_sb: bool, price: f32) -> String {
format!("{b}+{r}+{e}", b = start_query(include_sb), r = rare(price), e = end_query())
}
}
pub mod async_methods {
use regex::Regex;
use std::collections::HashMap;
use std::error::Error;
use super::Card;
use super::query_gen::Query;
struct Params {
base: String,
query: String,
re: Regex
}
pub async fn fetch_cards(queries: Query) -> Result<HashMap<String, Card>, Box<dyn Error>> {
let mut list = HashMap::new();
let mut params = Params{
base: String::from("https://api.scryfall.com/cards/random"),
query: format!("{}", queries.rare),
re: Regex::new(r"(?i)(\w+\s{1})?(l{1})and(\s{1}.+)?").unwrap()
};
let rare_card = fetch_card(¶ms).await?;
list.insert(format!("{}", rare_card.name), rare_card);
params.query = format!("{}", queries.other);
fetch_nonrares(¶ms, &mut list).await?;
Ok(list)
}
async fn fetch_nonrares(params: &Params, list: &mut HashMap<String, Card>) -> Result<(), Box<dyn Error>> {
let mut total_count: u8 = 0;
let mut duplicates: u8 = 0;
while total_count < 99 {
let card = fetch_card(¶ms).await;
let card = match card {
Ok(c) => c,
Err(_e) => panic!("Scryfall timed out while fetching cards"),
};
let name = format!("{}", card.name);
if list.contains_key(&name) && duplicates < 5 {
let prev_card = list.get_mut(&name).unwrap();
prev_card.count += 1;
duplicates += 1;
}
else if list.contains_key(&name) && duplicates >= 5 {
continue;
}
else {
list.insert(name, card);
}
total_count += 1;
if (total_count + 1) % 10 == 0 {
println!("{}/100", total_count + 1);
}
}
Ok(())
}
async fn fetch_card(params: &Params) -> Result<Card, Box<dyn Error>> {
let url = format!("{}?{}", params.base, params.query);
let response = surf::get(&url)
.recv_string()
.await;
let response = match response {
Ok(s) => s,
Err(_e) => panic!("Scryfall timed out while fetching cards"),
};
let mut lines = response.lines();
let title_line_vec: Vec<&str> = lines.next()
.unwrap()
.split(" ")
.collect();
let type_line = lines.next().unwrap().to_string();
let tile_line_len = title_line_vec.len();
let mut name = String::from("");
let mut cmc = String::from("");
if tile_line_len == 1 {
name.push_str(title_line_vec.get(0).unwrap())
}
else if params.re.is_match(&type_line) {
for name_part in 0..tile_line_len {
if name_part != 0 {
name.push(' ');
}
name.push_str(title_line_vec.get(name_part).unwrap());
}
}
else {
for name_part in 0..(tile_line_len - 1) {
if name_part != 0 {
name.push(' ');
}
name.push_str(title_line_vec.get(name_part).unwrap());
}
cmc.push_str(title_line_vec.get(tile_line_len - 1).unwrap());
}
let card = Card {
name,
cmc,
type_line,
count: 1
};
Ok(card)
}
}
pub mod file_io {
use std::collections::HashMap;
use std::fs;
use std::io::Error;
use super::Card;
pub fn save_to_file(list: &HashMap<String, Card>) -> Result<String, Error> {
let mut file_string = String::from("");
for (_key, value) in list {
file_string.push_str(&format!("{}x {}\n", value.count, value.name));
}
fs::write("./cards.txt", file_string).expect("Error - Unable to save file");
Ok(format!("File saved"))
}
}
| 28.003774 | 110 | 0.481067 |
5b9bdd95e3c349d96bbd98225b010e38ba616575
| 2,820 |
use super::ETag;
use util::EntityTagRange;
use HeaderValue;
/// `If-Match` header, defined in
/// [RFC7232](https://tools.ietf.org/html/rfc7232#section-3.1)
///
/// The `If-Match` header field makes the request method conditional on
/// the recipient origin server either having at least one current
/// representation of the target resource, when the field-value is "*",
/// or having a current representation of the target resource that has an
/// entity-tag matching a member of the list of entity-tags provided in
/// the field-value.
///
/// An origin server MUST use the strong comparison function when
/// comparing entity-tags for `If-Match`, since the client
/// intends this precondition to prevent the method from being applied if
/// there have been any changes to the representation data.
///
/// # ABNF
///
/// ```text
/// If-Match = "*" / 1#entity-tag
/// ```
///
/// # Example values
///
/// * `"xyzzy"`
/// * "xyzzy", "r2d2xxxx", "c3piozzzz"
///
/// # Examples
///
/// ```
/// # extern crate headers;
/// use headers::IfMatch;
///
/// let if_match = IfMatch::any();
/// ```
#[derive(Clone, Debug, PartialEq)]
pub struct IfMatch(EntityTagRange);
derive_header! {
IfMatch(_),
name: IF_MATCH
}
impl IfMatch {
/// Create a new `If-Match: *` header.
pub fn any() -> IfMatch {
IfMatch(EntityTagRange::Any)
}
/// Returns whether this is `If-Match: *`, matching any entity tag.
pub fn is_any(&self) -> bool {
match self.0 {
EntityTagRange::Any => true,
EntityTagRange::Tags(..) => false,
}
}
/// Checks whether the `ETag` strongly matches.
pub fn precondition_passes(&self, etag: &ETag) -> bool {
self.0.matches_strong(&etag.0)
}
}
impl From<ETag> for IfMatch {
fn from(etag: ETag) -> IfMatch {
IfMatch(EntityTagRange::Tags(HeaderValue::from(etag.0).into()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_any() {
assert!(IfMatch::any().is_any());
assert!(!IfMatch::from(ETag::from_static("\"yolo\"")).is_any());
}
#[test]
fn precondition_fails() {
let if_match = IfMatch::from(ETag::from_static("\"foo\""));
let bar = ETag::from_static("\"bar\"");
let weak_foo = ETag::from_static("W/\"foo\"");
assert!(!if_match.precondition_passes(&bar));
assert!(!if_match.precondition_passes(&weak_foo));
}
#[test]
fn precondition_passes() {
let foo = ETag::from_static("\"foo\"");
let if_match = IfMatch::from(foo.clone());
assert!(if_match.precondition_passes(&foo));
}
#[test]
fn precondition_any() {
let foo = ETag::from_static("\"foo\"");
let if_match = IfMatch::any();
assert!(if_match.precondition_passes(&foo));
}
}
| 25.178571 | 73 | 0.607092 |
0357c89cbf6a0dfdf68b28bcdd9d795df0a0b673
| 4,690 |
use ansi_term::{Colour::*, Style};
use std::error::Error;
use std::fmt;
use std::ops::Range;
// TODO: display the span correctly with the markers
// TODO: remove the optional span, and just have a `with_source` on `SourceError`,
// with the start and end `Position`s in the source error struct.
/// Error that is ready to represent to the user in a nice way,
/// including a line snippet with the error position, and a bunch
/// of contexts to help the user know what the program was up to
/// when the error happened.
#[derive(Debug)]
pub struct SourceError<T, C> {
pub kind: T,
snippet: Option<Snippet>,
span: Option<Range<usize>>,
contexts: Vec<C>,
}
impl<T, C> SourceError<T, C> {
pub const fn new(kind: T) -> Self {
Self {
kind,
snippet: None,
span: None,
contexts: Vec::new(),
}
}
pub fn set_span(&mut self, span: Range<usize>) {
self.span = Some(span)
}
pub fn set_source(&mut self, source: &str) {
self.snippet = self
.span
.as_ref()
.and_then(|span| Snippet::from_source(span, source))
}
pub fn with_span(mut self, span: Range<usize>) -> Self {
self.set_span(span);
self
}
pub fn with_source(mut self, source: &str) -> Self {
self.set_source(source);
self
}
pub fn with_context(mut self, ctx: C) -> Self {
self.contexts.push(ctx);
self
}
}
#[derive(Debug)]
struct Snippet {
start: Position,
line: String,
}
impl Snippet {
fn from_source(span: &Range<usize>, source: &str) -> Option<Self> {
let start_pos = Position::from_offset(span.start, source);
let end_pos = Position::from_offset(span.end, source);
if start_pos.line == end_pos.line {
Some(Self {
start: start_pos,
line: source.lines().nth(start_pos.line).unwrap().to_string(),
})
} else {
None
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Position {
pub col: usize,
pub line: usize,
}
impl Position {
fn from_offset(target_offset: usize, source: &str) -> Self {
let mut offset = 0;
let mut line_count = 0;
for (i, line) in source.split_terminator('\n').enumerate() {
let line_len = line.len() + 1; // +1 to count for the '\n'
let next_offset = offset + line_len;
if next_offset >= target_offset {
return Self {
line: i,
col: target_offset - offset,
};
}
offset = next_offset;
line_count = i;
}
Self {
line: line_count,
col: 0,
}
}
}
impl<T, C> Error for SourceError<T, C>
where
T: Error + 'static,
C: fmt::Debug + fmt::Display,
{
fn source(&self) -> Option<&(dyn Error + 'static)> {
Some(&self.kind)
}
}
impl<T: fmt::Display, C: fmt::Display> fmt::Display for SourceError<T, C> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref snippet) = self.snippet {
write!(
f,
r#"
{error}: {kind}
{arrow} {location}
{separator}
{line:3} {separator} {snip}
{separator} {red}{markers}{end}"#,
error = Red.bold().paint("error"),
kind = Style::new().bold().paint(self.kind.to_string()),
arrow = Blue.bold().paint("-->"),
location = Yellow.bold().paint(format!(
"{line}:{col}",
line = snippet.start.line + 1,
col = snippet.start.col + 1,
)),
line = Blue.bold().paint((snippet.start.line + 1).to_string()),
separator = Blue.bold().paint("|"),
snip = &snippet.line,
red = Red.bold().prefix(),
end = Red.bold().suffix(),
markers =
" ".repeat(snippet.start.col) + &"^".repeat(self.span.as_ref().unwrap().len())
)
} else if let Some(ref span) = self.span {
write!(
f,
"error at {span:?}: {kind}",
span = span,
kind = self.kind
)
} else {
write!(f, "error: <no position info>: {kind}", kind = self.kind)
}?;
for ctx in &self.contexts {
write!(
f,
"{str}",
str = Purple.italic().paint(format!("\n => while {ctx}"))
)?;
}
Ok(())
}
}
| 28.597561 | 98 | 0.499787 |
8a88d2c6a15c1dc3ec3aa5c0e2030c2dad5c480a
| 1,216 |
// hashmap1.rs
// A basket of fruits in the form of a hash map needs to be defined.
// The key represents the name of the fruit and the value represents
// how many of that particular fruit is in the basket. You have to put
// at least three different types of fruits (e.g apple, banana, mango)
// in the basket and the total count of all the fruits should be at
// least five.
//
// Make me compile and pass the tests!
//
// Execute the command `rustlings hint hashmap1` if you need
// hints.
use std::collections::HashMap;
fn fruit_basket() -> HashMap<String, u32> {
let mut basket = HashMap::new(); // TODO: declare your hash map here.
// Two bananas are already given for you :)
basket.insert(String::from("banana"), 2);
basket.insert(String::from("apple"), 2);
basket.insert(String::from("orange"), 1);
// TODO: Put more fruits in your basket here.
basket
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn at_least_three_types_of_fruits() {
let basket = fruit_basket();
assert!(basket.len() >= 3);
}
#[test]
fn at_least_five_fruits() {
let basket = fruit_basket();
assert!(basket.values().sum::<u32>() >= 5);
}
}
| 27.022222 | 73 | 0.645559 |
7aa0139253832063f39d1cb76af3c663d6fa27f9
| 5,566 |
// Copyright 2020 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(not(feature = "std"))]
use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec};
use bytes::{Bytes, BytesMut};
use core::iter::{empty, once};
use core::{mem, str};
use crate::error::DecoderError;
use crate::rlpin::Rlp;
use crate::stream::RlpStream;
use crate::traits::{Decodable, Encodable};
pub fn decode_usize(bytes: &[u8]) -> Result<usize, DecoderError> {
match bytes.len() {
l if l <= mem::size_of::<usize>() => {
if bytes[0] == 0 {
return Err(DecoderError::RlpInvalidIndirection);
}
let mut res = 0usize;
for (i, byte) in bytes.iter().enumerate().take(l) {
let shift = (l - 1 - i) * 8;
res += (*byte as usize) << shift;
}
Ok(res)
}
_ => Err(DecoderError::RlpIsTooBig),
}
}
impl<T: Encodable + ?Sized> Encodable for Box<T> {
fn rlp_append(&self, s: &mut RlpStream) {
Encodable::rlp_append(&**self, s)
}
}
impl<T: Decodable> Decodable for Box<T> {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
T::decode(rlp).map(Box::new)
}
}
impl Encodable for bool {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_iter(once(if *self { 1u8 } else { 0 }));
}
}
impl Decodable for bool {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| match bytes.len() {
0 => Ok(false),
1 => Ok(bytes[0] != 0),
_ => Err(DecoderError::RlpIsTooBig),
})
}
}
impl<'a> Encodable for &'a [u8] {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_value(self);
}
}
impl Encodable for Vec<u8> {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_value(self);
}
}
impl Decodable for Vec<u8> {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| Ok(bytes.to_vec()))
}
}
impl Encodable for Bytes {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_value(self);
}
}
impl Decodable for Bytes {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| Ok(Bytes::copy_from_slice(bytes)))
}
}
impl Encodable for BytesMut {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_value(self);
}
}
impl Decodable for BytesMut {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| Ok(bytes.into()))
}
}
impl<T> Encodable for Option<T>
where
T: Encodable,
{
fn rlp_append(&self, s: &mut RlpStream) {
match *self {
None => {
s.begin_list(0);
}
Some(ref value) => {
s.begin_list(1);
s.append(value);
}
}
}
}
impl<T> Decodable for Option<T>
where
T: Decodable,
{
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
let items = rlp.item_count()?;
match items {
1 => rlp.val_at(0).map(Some),
0 => Ok(None),
_ => Err(DecoderError::RlpIncorrectListLen),
}
}
}
impl Encodable for u8 {
fn rlp_append(&self, s: &mut RlpStream) {
if *self != 0 {
s.encoder().encode_iter(once(*self));
} else {
s.encoder().encode_iter(empty());
}
}
}
impl Decodable for u8 {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| match bytes.len() {
1 if bytes[0] != 0 => Ok(bytes[0]),
0 => Ok(0),
1 => Err(DecoderError::RlpInvalidIndirection),
_ => Err(DecoderError::RlpIsTooBig),
})
}
}
macro_rules! impl_encodable_for_u {
($name: ident) => {
impl Encodable for $name {
fn rlp_append(&self, s: &mut RlpStream) {
let leading_empty_bytes = self.leading_zeros() as usize / 8;
let buffer = self.to_be_bytes();
s.encoder().encode_value(&buffer[leading_empty_bytes..]);
}
}
};
}
macro_rules! impl_decodable_for_u {
($name: ident) => {
impl Decodable for $name {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| match bytes.len() {
0 | 1 => u8::decode(rlp).map(|v| v as $name),
l if l <= mem::size_of::<$name>() => {
if bytes[0] == 0 {
return Err(DecoderError::RlpInvalidIndirection);
}
let mut res = 0 as $name;
for (i, byte) in bytes.iter().enumerate().take(l) {
let shift = (l - 1 - i) * 8;
res += (*byte as $name) << shift;
}
Ok(res)
}
_ => Err(DecoderError::RlpIsTooBig),
})
}
}
};
}
impl_encodable_for_u!(u16);
impl_encodable_for_u!(u32);
impl_encodable_for_u!(u64);
impl_encodable_for_u!(u128);
impl_decodable_for_u!(u16);
impl_decodable_for_u!(u32);
impl_decodable_for_u!(u64);
impl_decodable_for_u!(u128);
impl Encodable for usize {
fn rlp_append(&self, s: &mut RlpStream) {
(*self as u64).rlp_append(s);
}
}
impl Decodable for usize {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
u64::decode(rlp).map(|value| value as usize)
}
}
impl<'a> Encodable for &'a str {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_value(self.as_bytes());
}
}
impl Encodable for String {
fn rlp_append(&self, s: &mut RlpStream) {
s.encoder().encode_value(self.as_bytes());
}
}
impl Decodable for String {
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
rlp.decoder().decode_value(|bytes| {
match str::from_utf8(bytes) {
Ok(s) => Ok(s.to_owned()),
// consider better error type here
Err(_err) => Err(DecoderError::RlpExpectedToBeData),
}
})
}
}
| 23.191667 | 71 | 0.636004 |
1c13ed6e02915484dad69443f6445570d2b8622c
| 8,346 |
use crate::{
consts,
error::ApiError,
filters::{handle, with_options},
libra::{self, Libra},
options::Options,
types::{AccountIdentifier, Amount, Block, BlockIdentifier, BlockRequest, BlockResponse, Currency, Operation, OperationIdentifier, Transaction, TransactionIdentifier},
};
use libra_json_rpc_client::views::{AmountView, EventDataView, TransactionDataView};
use log::debug;
use warp::Filter;
pub fn routes(options: Options) -> impl Filter<Extract=impl warp::Reply, Error=warp::Rejection> + Clone {
warp::post()
.and(
warp::path!("block")
.and(warp::body::json())
.and(with_options(options.clone()))
.and_then(handle(block))
)
}
async fn block(block_request: BlockRequest, options: Options) -> Result<BlockResponse, ApiError> {
debug!("/block");
let network_identifier = block_request.network_identifier;
if network_identifier.blockchain != consts::BLOCKCHAIN || network_identifier.network != options.network {
return Err(ApiError::BadNetwork);
}
let libra = Libra::new(&options.libra_endpoint);
let block_version = block_request
.block_identifier
.index
.ok_or_else(|| ApiError::BadBlockRequest)?;
let metadata = libra.get_metadata(Some(block_version)).await?;
let tx = if block_version == 0 {
// For the genesis block, we populate parent_block_identifier with the
// same genesis block. Refer to
// https://www.rosetta-api.org/docs/common_mistakes.html#malformed-genesis-block
let one_tx = libra.get_transactions(block_version, 1, true).await?;
vec![one_tx[0].clone(), one_tx[0].clone()]
} else {
libra.get_transactions(block_version - 1, 2, true).await?
};
let block_identifier = BlockIdentifier {
index: tx[1].version,
hash: tx[1].hash.clone(),
};
let parent_block_identifier = BlockIdentifier {
index: tx[0].version,
hash: tx[0].hash.clone(),
};
// block timestamp is in usecs, and Rosetta wants millis
// Note that this timestamp is 0 for genesis block and any following timeout blocks
let timestamp = metadata.timestamp / 1000;
let status = libra::vmstatus_to_str(&tx[1].vm_status);
let mut operations = tx[1]
.events
.iter()
.filter(|event| {
// NOTE: mint, preburn, burn, and cancelburn emit extra sent/recv
// payment events, which are used instead, so we filter thse
// out. We also filter out some other events we don't care about.
match &event.data {
// events that are represented better with sent/recv payment
EventDataView::Mint { .. } |
EventDataView::ReceivedMint { .. } |
EventDataView::Preburn { .. } |
EventDataView::Burn { .. } |
EventDataView::CancelBurn { .. } => false,
// events that we don't care about
EventDataView::ComplianceKeyRotation { .. } |
EventDataView::BaseUrlRotation { .. } |
EventDataView::Unknown { .. } => false,
_ => true,
}
})
.enumerate()
.map(|(index, event)| {
let index = index as u64;
let operation_identifier = OperationIdentifier {
index,
network_index: None,
};
#[derive(Debug)]
enum AmountKind<'a> {
Credit(&'a AmountView),
Debit(&'a AmountView),
}
use AmountKind::*;
let (type_, amount, account) = match &event.data {
// NOTE: mint, preburn, burn, and cancelburn are filtered out above.
EventDataView::Mint { .. } => unreachable!(),
EventDataView::ReceivedMint { .. } => unreachable!(),
EventDataView::Preburn { .. } => unreachable!(),
EventDataView::Burn { .. } => unreachable!(),
EventDataView::CancelBurn { .. } => unreachable!(),
EventDataView::ComplianceKeyRotation { .. } => unreachable!(),
EventDataView::BaseUrlRotation { .. } => unreachable!(),
EventDataView::Unknown { .. } => unreachable!(),
EventDataView::ToLBRExchangeRateUpdate { .. } =>
("to_lbr_exchange_rate_update", None, None),
EventDataView::AdminTransaction { .. } =>
("upgrade", None, None),
EventDataView::NewEpoch { .. } =>
("newepoch", None, None),
EventDataView::NewBlock { .. } =>
("newblock", None, None),
EventDataView::CreateAccount { .. } =>
("createaccount", None, None),
EventDataView::ReceivedPayment { amount, receiver, .. } =>
("receivedpayment", Some(Credit(amount)), Some(receiver)),
EventDataView::SentPayment { amount, sender, .. } =>
("sentpayment", Some(Debit(amount)), Some(sender)),
};
let type_ = type_.to_string();
let status = status.to_string();
let account = account.map(|account| {
AccountIdentifier {
address: account.0.to_lowercase(),
sub_account: None,
}
});
let amount = amount.map(|amount| {
let (currency, value) = match amount {
Credit(amount) => (amount.currency.clone(), format!("{}", amount.amount)),
Debit(amount) => (amount.currency.clone(), format!("-{}", amount.amount)),
};
Amount {
value,
currency: Currency {
symbol: currency,
decimals: 6, // TODO: use get_currencies instead of hardcoding
},
}
});
Operation {
operation_identifier,
related_operations: None,
type_,
status,
account,
amount,
}
})
.collect::<Vec<_>>();
// Handle transcation fees
// There are no events for transaction fees, since gas is used regardless
// of transaction status. We append the sent_fee operation to represent fee
// payment. Fee receipt is not represented, since the fees do not live in a "balance"
// under the association account, so are not visible to Rosetta.
if let TransactionDataView::UserTransaction { sender, gas_unit_price, gas_currency, .. } = &tx[1].transaction {
if *gas_unit_price > 0 {
let value = gas_unit_price * tx[1].gas_used;
let currency = Currency {
symbol: gas_currency.clone(),
decimals: 6, // TODO: use get_currencies instead of hardcoding
};
let status = "executed".to_string(); // NOTE: tx fees are always charged
let sent_fee_op = Operation {
operation_identifier: OperationIdentifier {
index: tx[1].events.len() as u64,
network_index: None,
},
related_operations: None,
type_: "sentfee".to_string(),
status: status.clone(),
account: Some(AccountIdentifier {
address: sender.to_lowercase(),
sub_account: None,
}),
amount: Some(Amount {
value: format!("-{}", value),
currency: currency.clone(),
}),
};
operations.push(sent_fee_op);
}
}
let transactions = vec![
Transaction {
transaction_identifier: TransactionIdentifier {
hash: tx[1].hash.clone(),
},
operations,
}
];
let block = Block {
block_identifier,
parent_block_identifier,
timestamp,
transactions,
};
let response = BlockResponse {
block,
};
Ok(response)
}
| 37.426009 | 170 | 0.529715 |
e5d6c46dedc74cef62796ffdc70455e96a7be87c
| 352 |
use blake3::keyed_hash;
use crate::DiscoveryKey;
/// Seed for the discovery key hash.
const DISCOVERY_NS_BUF: &[u8] = b"hypercore";
/// Calculate the discovery key of a key.
///
/// The discovery key is a 32 byte namespaced hash of the key.
pub fn discovery_key(key: &[u8; 32]) -> DiscoveryKey {
*keyed_hash(key, &DISCOVERY_NS_BUF).as_bytes()
}
| 25.142857 | 62 | 0.704545 |
e593ca2940433312a00a4b7c00025953874f848d
| 58 |
mod choose;
pub mod game_color_prefs;
pub use choose::*;
| 11.6 | 25 | 0.741379 |
3adc76926da2dd9139546be2452975bf3a3da92d
| 3,969 |
//! You'll need to compile multi-echo-server-worker before running this example:
//!
//! ```bash
//! cargo build --example multi-echo-server-worker
//! ```
//!
//! Then run:
//!
//! ```bash
//! cargo run --example multi-echo-server
//! ```
//!
//! Now you can connect to localhost:7000:
//!
//! ```bash
//! nc localhost 7000
//! ```
//!
//! Anything you type will be echoed back. You'll see that every time you connect, you'll get the
//! next worker, round-robin style.
extern crate libuv;
use libuv::prelude::*;
use libuv::{
cpu_info, exepath, Buf, PipeHandle, ProcessHandle, ProcessOptions, StdioContainer, StdioFlags,
StdioType, TcpBindFlags,
};
use std::net::Ipv4Addr;
use std::path::PathBuf;
struct Workers {
workers: Vec<PipeHandle>,
next: usize,
}
impl Workers {
fn new(capacity: usize) -> Workers {
Workers {
workers: Vec::with_capacity(capacity),
next: 0,
}
}
fn add(&mut self, worker: PipeHandle) {
self.workers.push(worker);
}
fn next(&mut self) -> &mut PipeHandle {
let next = (self.next + 1) % self.workers.len();
let ret = unsafe { self.workers.get_unchecked_mut(self.next) };
self.next = next;
return ret;
}
}
fn close_process_handle(mut handle: ProcessHandle, exit_status: i64, term_signal: i32) {
println!(
"Process exited with status {}, signal {}",
exit_status, term_signal
);
handle.close(());
}
fn on_new_connection(mut server: StreamHandle, status: libuv::Result<u32>, workers: &mut Workers) {
if let Err(e) = status {
eprintln!("Error with new connection: {}", e);
return;
}
if let Ok(mut client) = server.get_loop().tcp() {
match server.accept(&mut client.to_stream()) {
Ok(_) => {
if let Ok(buf) = Buf::new("a") {
let worker = workers.next();
if let Err(e) = worker.write2(&client.to_stream(), &[buf], ()) {
eprintln!("Could not write to worker: {}", e);
}
}
}
Err(_) => {
client.close(());
}
}
}
}
fn setup_workers(r#loop: &mut Loop) -> Result<Workers, Box<dyn std::error::Error>> {
let mut path: PathBuf = exepath()?.into();
if cfg!(windows) {
path.set_file_name("multi-echo-server-worker.exe");
} else {
path.set_file_name("multi-echo-server-worker");
}
let path = path.to_string_lossy().into_owned();
let args: [&str; 1] = [&path];
let info = cpu_info()?;
let mut workers = Workers::new(info.len());
for _ in 0..info.len() {
let pipe = r#loop.pipe(true)?;
workers.add(pipe);
let child_stdio = [
StdioContainer {
flags: StdioFlags::CREATE_PIPE | StdioFlags::READABLE_PIPE,
data: StdioType::Stream(pipe.to_stream()),
},
StdioContainer {
flags: StdioFlags::INHERIT_FD,
data: StdioType::Fd(1),
},
StdioContainer {
flags: StdioFlags::INHERIT_FD,
data: StdioType::Fd(2),
},
];
let mut options = ProcessOptions::new(&args);
options.exit_cb = close_process_handle.into();
options.stdio = &child_stdio;
let process = r#loop.spawn_process(options)?;
println!("Started worker {}", process.pid());
}
Ok(workers)
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut r#loop = Loop::default()?;
let mut workers = setup_workers(&mut r#loop)?;
let mut server = r#loop.tcp()?;
let addr = (Ipv4Addr::UNSPECIFIED, 7000).into();
server.bind(&addr, TcpBindFlags::empty())?;
server.listen(128, move |server, status| {
on_new_connection(server, status, &mut workers)
})?;
r#loop.run(RunMode::Default)?;
Ok(())
}
| 27.184932 | 99 | 0.555304 |
16a0fd090ad04533e0fc1fde345d8c11ea930b5d
| 294 |
// run-rustfix
#![warn(clippy::all, clippy::pedantic)]
#![allow(clippy::missing_docs_in_private_items)]
#![allow(clippy::map_identity)]
fn main() {
let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| 0..x).flatten().collect();
let _: Option<_> = (Some(Some(1))).map(|x| x).flatten();
}
| 26.727273 | 80 | 0.622449 |
23e2135c67894a7d2a99e6268ce9871fd8d65211
| 3,263 |
use {
mundis_ledger::{
blockstore::Blockstore,
shred::{Nonce, SIZE_OF_NONCE},
},
mundis_sdk::{clock::Slot, packet::Packet},
std::{io, net::SocketAddr},
};
pub fn repair_response_packet(
blockstore: &Blockstore,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
nonce: Nonce,
) -> Option<Packet> {
let shred = blockstore
.get_data_shred(slot, shred_index)
.expect("Blockstore could not get data shred");
shred
.map(|shred| repair_response_packet_from_bytes(shred, dest, nonce))
.unwrap_or(None)
}
pub fn repair_response_packet_from_bytes(
bytes: Vec<u8>,
dest: &SocketAddr,
nonce: Nonce,
) -> Option<Packet> {
let mut packet = Packet::default();
packet.meta.size = bytes.len() + SIZE_OF_NONCE;
if packet.meta.size > packet.data.len() {
return None;
}
packet.meta.set_addr(dest);
packet.data[..bytes.len()].copy_from_slice(&bytes);
let mut wr = io::Cursor::new(&mut packet.data[bytes.len()..]);
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
Some(packet)
}
pub fn nonce(packet: &Packet) -> Option<Nonce> {
let nonce_start = packet.meta.size.checked_sub(SIZE_OF_NONCE)?;
packet.deserialize_slice(nonce_start..).ok()
}
#[cfg(test)]
mod test {
use {
super::*,
mundis_ledger::{
shred::{Shred, Shredder},
sigverify_shreds::verify_shred_cpu,
},
mundis_sdk::{
packet::PacketFlags,
signature::{Keypair, Signer},
},
std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr},
},
};
fn run_test_sigverify_shred_cpu_repair(slot: Slot) {
mundis_logger::setup();
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
assert_eq!(shred.slot(), slot);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
trace!("signature {}", shred.common_header.signature);
let nonce = 9;
let mut packet = repair_response_packet_from_bytes(
shred.payload,
&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
nonce,
)
.unwrap();
packet.meta.flags |= PacketFlags::REPAIR;
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(1));
let wrong_keypair = Keypair::new();
let leader_slots = [(slot, wrong_keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(0));
let leader_slots = HashMap::new();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, None);
}
#[test]
fn test_sigverify_shred_cpu_repair() {
run_test_sigverify_shred_cpu_repair(0xdead_c0de);
}
}
| 28.12931 | 92 | 0.563592 |
c1dcfdc4ee4a3a1eb43d69839d8fdae18cb3f388
| 20,764 |
/**
* Copyright (c) 2016, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree. An additional
* directory.
*
**
*
* THIS FILE IS @generated; DO NOT EDIT IT
* To regenerate this file, run
*
* buck run //hphp/hack/src:generate_full_fidelity
*
**
*
*/
use parser_core_types::token_factory::TokenFactory;
use parser_core_types::lexable_token::LexableToken;
pub type Token<S> = <<S as SmartConstructors>::TF as TokenFactory>::Token;
pub type Trivia<S> = <Token<S> as LexableToken>::Trivia;
pub trait SmartConstructors: Clone {
type TF: TokenFactory;
type State;
type R;
fn state_mut(&mut self) -> &mut Self::State;
fn into_state(self) -> Self::State;
fn token_factory_mut(&mut self) -> &mut Self::TF;
fn make_missing(&mut self, offset : usize) -> Self::R;
fn make_token(&mut self, arg0: Token<Self>) -> Self::R;
fn make_list(&mut self, arg0: Vec<Self::R>, offset: usize) -> Self::R;
fn begin_enumerator(&mut self) {}
fn begin_enum_class_enumerator(&mut self) {}
fn begin_constant_declarator(&mut self) {}
fn make_end_of_file(&mut self, arg0 : Self::R) -> Self::R;
fn make_script(&mut self, arg0 : Self::R) -> Self::R;
fn make_qualified_name(&mut self, arg0 : Self::R) -> Self::R;
fn make_simple_type_specifier(&mut self, arg0 : Self::R) -> Self::R;
fn make_literal_expression(&mut self, arg0 : Self::R) -> Self::R;
fn make_prefixed_string_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_prefixed_code_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_variable_expression(&mut self, arg0 : Self::R) -> Self::R;
fn make_pipe_variable_expression(&mut self, arg0 : Self::R) -> Self::R;
fn make_file_attribute_specification(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_enum_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R, arg10 : Self::R) -> Self::R;
fn make_enum_use(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_enumerator(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_enum_class_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R, arg10 : Self::R, arg11 : Self::R) -> Self::R;
fn make_enum_class_enumerator(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_alias_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R) -> Self::R;
fn make_context_alias_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R) -> Self::R;
fn make_property_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_property_declarator(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_namespace_declaration(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_namespace_declaration_header(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_namespace_body(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_namespace_empty_body(&mut self, arg0 : Self::R) -> Self::R;
fn make_namespace_use_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_namespace_group_use_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R) -> Self::R;
fn make_namespace_use_clause(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_function_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_function_declaration_header(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R, arg10 : Self::R, arg11 : Self::R) -> Self::R;
fn make_contexts(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_where_clause(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_where_constraint(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_methodish_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_methodish_trait_resolution(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_classish_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R, arg10 : Self::R, arg11 : Self::R) -> Self::R;
fn make_classish_body(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_trait_use(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_require_clause(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_const_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R) -> Self::R;
fn make_constant_declarator(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_type_const_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R) -> Self::R;
fn make_context_const_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R) -> Self::R;
fn make_decorated_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_parameter_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R) -> Self::R;
fn make_variadic_parameter(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_old_attribute_specification(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_attribute_specification(&mut self, arg0 : Self::R) -> Self::R;
fn make_attribute(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_inclusion_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_inclusion_directive(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_compound_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_expression_statement(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_markup_section(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_markup_suffix(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_unset_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_using_statement_block_scoped(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R) -> Self::R;
fn make_using_statement_function_scoped(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_while_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_if_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R) -> Self::R;
fn make_else_clause(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_try_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_catch_clause(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R) -> Self::R;
fn make_finally_clause(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_do_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R) -> Self::R;
fn make_for_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R) -> Self::R;
fn make_foreach_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R) -> Self::R;
fn make_switch_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R) -> Self::R;
fn make_switch_section(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_switch_fallthrough(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_case_label(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_default_label(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_return_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_yield_break_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_throw_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_break_statement(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_continue_statement(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_echo_statement(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_concurrent_statement(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_simple_initializer(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_anonymous_class(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R) -> Self::R;
fn make_anonymous_function(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R, arg10 : Self::R, arg11 : Self::R) -> Self::R;
fn make_anonymous_function_use_clause(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_lambda_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_lambda_signature(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R) -> Self::R;
fn make_cast_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_scope_resolution_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_member_selection_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_safe_member_selection_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_embedded_member_selection_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_yield_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_prefix_unary_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_postfix_unary_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_binary_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_is_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_as_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_nullable_as_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_upcast_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_conditional_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_eval_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_isset_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_function_call_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_function_pointer_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_parenthesized_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_braced_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_et_splice_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_embedded_braced_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_list_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_collection_literal_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_object_creation_expression(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_constructor_call(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_darray_intrinsic_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_dictionary_intrinsic_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_keyset_intrinsic_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_varray_intrinsic_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_vector_intrinsic_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_element_initializer(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_subscript_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_embedded_subscript_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_awaitable_creation_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_children_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_children_parenthesized_list(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_category_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_enum_type(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_xhp_lateinit(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_xhp_required(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_xhp_class_attribute_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_class_attribute(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_xhp_simple_class_attribute(&mut self, arg0 : Self::R) -> Self::R;
fn make_xhp_simple_attribute(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_spread_attribute(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_xhp_open(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_xhp_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_xhp_close(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_type_constant(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_vector_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_keyset_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_tuple_type_explicit_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_varray_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_function_ctx_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_type_parameter(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R) -> Self::R;
fn make_type_constraint(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_context_constraint(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_darray_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R) -> Self::R;
fn make_dictionary_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_closure_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R, arg6 : Self::R, arg7 : Self::R, arg8 : Self::R, arg9 : Self::R, arg10 : Self::R) -> Self::R;
fn make_closure_parameter_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_classname_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_field_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_field_initializer(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_shape_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R) -> Self::R;
fn make_shape_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_tuple_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R) -> Self::R;
fn make_generic_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_nullable_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_like_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_soft_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_attributized_specifier(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_reified_type_argument(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_type_arguments(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_type_parameters(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_tuple_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_union_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_intersection_type_specifier(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_error(&mut self, arg0 : Self::R) -> Self::R;
fn make_list_item(&mut self, arg0 : Self::R, arg1 : Self::R) -> Self::R;
fn make_enum_class_label_expression(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
fn make_module_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R, arg3 : Self::R, arg4 : Self::R, arg5 : Self::R) -> Self::R;
fn make_module_membership_declaration(&mut self, arg0 : Self::R, arg1 : Self::R, arg2 : Self::R) -> Self::R;
}
| 98.407583 | 256 | 0.627673 |
62ae30da98e7f9ed9c35749bb53003a49181f347
| 1,113 |
use crate::lc::Solution;
/// #String
impl Solution {
pub fn convert(s: String, num_rows: i32) -> String {
if num_rows == 1 {
return s;
}
let base = (2 * num_rows - 2) as usize;
let mut res = String::new();
let bytes = s.into_bytes();
for i in 0..num_rows {
let mut j = i as usize;
let mut idx = (2 * num_rows - 2 - i) as usize;
while j < bytes.len() {
res.push(bytes[j] as char);
if i > 0 && i < num_rows - 1 && idx < bytes.len() {
res.push(bytes[idx] as char);
}
j += base;
idx += base;
}
}
res
}
}
#[test]
fn test() {
assert_eq!("PAHNAPLSIIGYIR", Solution::convert("PAYPALISHIRING".to_string(), 3));
assert_eq!("PINALSIGYAHRPI", Solution::convert("PAYPALISHIRING".to_string(), 4));
assert_eq!("A", Solution::convert("A".to_string(), 1));
assert_eq!("AB", Solution::convert("AB".to_string(), 2));
assert_eq!("ACB", Solution::convert("ABC".to_string(), 2));
}
| 31.8 | 85 | 0.49416 |
9b576bbb0868d2b2d8eb59072de5744cd05e591e
| 709 |
// ignore-windows: No libc on Windows
// error-pattern: the main thread terminated without waiting for all remaining threads
// Check that we terminate the program when the main thread terminates.
#![feature(rustc_private)]
extern crate libc;
use std::{mem, ptr};
extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
loop {}
}
fn main() {
unsafe {
let mut native: libc::pthread_t = mem::zeroed();
let attr: libc::pthread_attr_t = mem::zeroed();
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
}
}
| 29.541667 | 106 | 0.672779 |
01d882e0421e8f696e5f9156b4ceed6d857029f2
| 2,794 |
use colored::*;
use std::sync::{Arc, Mutex};
use crate::util;
pub enum State {
Continue,
Stop,
}
#[derive(Debug)]
pub struct Success {
pub start: u64,
pub request: String,
pub duration: i64,
pub code: u16,
}
impl Success {
pub fn new(start: u64, request: String, duration: i64, code: u16) -> Result<Success, Failure> {
Ok(Success { start, request, duration, code })
}
}
pub struct Failure {
pub start: u64,
pub request: String,
pub duration: i64,
pub code: u16,
pub reason: String,
}
impl Failure {
pub fn http(start: u64, request: String, duration: i64, code: u16, reason: String) -> Result<Success, Failure> {
Err(Failure {
start,
request,
duration,
code,
reason,
})
}
pub fn global(start: u64, request: String, duration: i64, reason: String) -> Result<Success, Failure> {
Err(Failure {
start,
request,
duration,
code: 0,
reason,
})
}
}
pub fn process(results: &Arc<Mutex<Vec<Result<Success, Failure>>>>) {
let data = results.lock().unwrap();
let success = data
.iter()
.filter(|x| x.is_ok())
.map(|x| match x {
Ok(Success { duration, .. }) => *duration,
_ => 0,
})
.collect::<Vec<i64>>();
println!();
let mut histogram = histogram::Histogram::new();
for v in &success {
if *v >= 0 {
histogram.increment(*v as u64).unwrap();
}
}
if histogram.entries() > 0 {
let kv = vec![
("Requests count", format!("{}", data.len())),
("Success count:", format!("{}", success.len())),
("Error count:", format!("{}", data.len() - success.len())),
("Success rate:", format!("{:.2}%", format_rate(success.len(), data.len()))),
("Mean:", format_res_ms(histogram.mean())),
("Std. dev.:", format_opt_ms(histogram.stddev())),
("90th percentile:", format_res_ms(histogram.percentile(90.0))),
("95th percentile:", format_res_ms(histogram.percentile(95.0))),
("99th percentile:", format_res_ms(histogram.percentile(99.0))),
];
println!("{}", "STATISTICS:".blue().bold());
util::print_kv(kv);
} else {
util::info("no results");
}
}
fn format_rate(value: usize, total: usize) -> f64 {
value as f64 / total as f64 * 100.0
}
fn format_res_ms(value: Result<u64, &'static str>) -> String {
match value {
Ok(value) => {
if value < 1000 {
format!("{}ms", value)
} else {
format!("{}s", value as f64 / 1000.0)
}
}
Err(_) => String::from("N/A"),
}
}
fn format_opt_ms(value: Option<u64>) -> String {
match value {
Some(value) => {
if value < 1000 {
format!("{}ms", value)
} else {
format!("{}s", value as f64 / 1000.0)
}
}
None => String::from("N/A"),
}
}
| 22 | 114 | 0.560129 |
f4b8b7fbe39b12073a36ba40867f5c76b0399387
| 2,219 |
// Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use crate::embedded_assets::{EmbeddedAssets, EmbeddedAssetsError};
use proc_macro2::TokenStream;
use quote::quote;
use std::path::PathBuf;
use tauri_utils::config::Config;
/// Necessary data needed by [`context_codegen`] to generate code for a Tauri application context.
pub struct ContextData {
pub dev: bool,
pub config: Config,
pub config_parent: PathBuf,
pub root: TokenStream,
}
/// Build a `tauri::Context` for including in application code.
pub fn context_codegen(data: ContextData) -> Result<TokenStream, EmbeddedAssetsError> {
let ContextData {
dev,
config,
config_parent,
root,
} = data;
let assets_path = if dev {
// if dev_path is a dev server, we don't have any assets to embed
if config.build.dev_path.starts_with("http") {
None
} else {
Some(config_parent.join(&config.build.dev_path))
}
} else {
Some(config_parent.join(&config.build.dist_dir))
};
// generate the assets inside the dist dir into a perfect hash function
let assets = if let Some(assets_path) = assets_path {
EmbeddedAssets::new(&assets_path)?
} else {
Default::default()
};
// handle default window icons for Windows targets
let default_window_icon = if cfg!(windows) {
let icon_path = config_parent.join("icons/icon.ico").display().to_string();
quote!(Some(include_bytes!(#icon_path).to_vec()))
} else {
quote!(None)
};
let package_name = if let Some(product_name) = &config.package.product_name {
quote!(#product_name.to_string())
} else {
quote!(env!("CARGO_PKG_NAME").to_string())
};
let package_version = if let Some(version) = &config.package.version {
quote!(#version.to_string())
} else {
quote!(env!("CARGO_PKG_VERSION").to_string())
};
// double braces are purposeful to force the code into a block expression
Ok(quote!(#root::Context {
config: #config,
assets: #assets,
default_window_icon: #default_window_icon,
package_info: #root::api::PackageInfo {
name: #package_name,
version: #package_version,
},
}))
}
| 29.586667 | 98 | 0.691753 |
f4bf820556339fb0a14a0683258ba4df3507aca3
| 8,023 |
use std::cmp::min;
use std::mem;
use crossfont::Metrics;
use glutin::event::{ElementState, ModifiersState};
use urlocator::{UrlLocation, UrlLocator};
use alacritty_terminal::index::{Column, Point};
use alacritty_terminal::term::cell::Flags;
use alacritty_terminal::term::color::Rgb;
use alacritty_terminal::term::render::RenderableCell;
use alacritty_terminal::term::SizeInfo;
use crate::config::Config;
use crate::event::Mouse;
use crate::renderer::rects::{RenderLine, RenderRect};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Url {
lines: Vec<RenderLine>,
end_offset: u16,
num_cols: Column,
}
impl Url {
pub fn rects(&self, metrics: &Metrics, size: &SizeInfo) -> Vec<RenderRect> {
let end = self.end();
self.lines
.iter()
.filter(|line| line.start <= end)
.map(|line| {
let mut rect_line = *line;
rect_line.end = min(line.end, end);
rect_line.rects(Flags::UNDERLINE, metrics, size)
})
.flatten()
.collect()
}
pub fn start(&self) -> Point {
self.lines[0].start
}
pub fn end(&self) -> Point {
self.lines[self.lines.len() - 1].end.sub(self.num_cols, self.end_offset as usize)
}
}
pub struct Urls {
locator: UrlLocator,
urls: Vec<Url>,
scheme_buffer: Vec<(Point, Rgb)>,
last_point: Option<Point>,
state: UrlLocation,
}
impl Default for Urls {
fn default() -> Self {
Self {
locator: UrlLocator::new(),
scheme_buffer: Vec::new(),
urls: Vec::new(),
state: UrlLocation::Reset,
last_point: None,
}
}
}
impl Urls {
pub fn new() -> Self {
Self::default()
}
// Update tracked URLs.
pub fn update(&mut self, num_cols: Column, cell: &RenderableCell) {
let point: Point = cell.into();
let mut end = point;
// Include the following wide char spacer.
if cell.flags.contains(Flags::WIDE_CHAR) {
end.col += 1;
}
// Reset URL when empty cells have been skipped.
if point != Point::default() && Some(point.sub(num_cols, 1)) != self.last_point {
self.reset();
}
self.last_point = Some(end);
// Extend current state if a leading wide char spacer is encountered.
if cell.flags.intersects(Flags::LEADING_WIDE_CHAR_SPACER) {
if let UrlLocation::Url(_, mut end_offset) = self.state {
if end_offset != 0 {
end_offset += 1;
}
self.extend_url(point, end, cell.fg, end_offset);
}
return;
}
// Advance parser.
let last_state = mem::replace(&mut self.state, self.locator.advance(cell.character));
match (self.state, last_state) {
(UrlLocation::Url(_length, end_offset), UrlLocation::Scheme) => {
// Create empty URL.
self.urls.push(Url { lines: Vec::new(), end_offset, num_cols });
// Push schemes into URL.
for (scheme_point, scheme_fg) in self.scheme_buffer.split_off(0) {
self.extend_url(scheme_point, scheme_point, scheme_fg, end_offset);
}
// Push the new cell into URL.
self.extend_url(point, end, cell.fg, end_offset);
},
(UrlLocation::Url(_length, end_offset), UrlLocation::Url(..)) => {
self.extend_url(point, end, cell.fg, end_offset);
},
(UrlLocation::Scheme, _) => self.scheme_buffer.push((cell.into(), cell.fg)),
(UrlLocation::Reset, _) => self.reset(),
_ => (),
}
// Reset at un-wrapped linebreak.
if cell.column + 1 == num_cols && !cell.flags.contains(Flags::WRAPLINE) {
self.reset();
}
}
/// Extend the last URL.
fn extend_url(&mut self, start: Point, end: Point, color: Rgb, end_offset: u16) {
let url = self.urls.last_mut().unwrap();
// If color changed, we need to insert a new line.
if url.lines.last().map(|last| last.color) == Some(color) {
url.lines.last_mut().unwrap().end = end;
} else {
url.lines.push(RenderLine { color, start, end });
}
// Update excluded cells at the end of the URL.
url.end_offset = end_offset;
}
/// Find URL below the mouse cursor.
pub fn highlighted(
&self,
config: &Config,
mouse: &Mouse,
mods: ModifiersState,
mouse_mode: bool,
selection: bool,
) -> Option<Url> {
// Require additional shift in mouse mode.
let mut required_mods = config.ui_config.mouse.url.mods();
if mouse_mode {
required_mods |= ModifiersState::SHIFT;
}
// Make sure all prerequisites for highlighting are met.
if selection
|| !mouse.inside_text_area
|| config.ui_config.mouse.url.launcher.is_none()
|| required_mods != mods
|| mouse.left_button_state == ElementState::Pressed
{
return None;
}
self.find_at(Point::new(mouse.line, mouse.column))
}
/// Find URL at location.
pub fn find_at(&self, point: Point) -> Option<Url> {
for url in &self.urls {
if (url.start()..=url.end()).contains(&point) {
return Some(url.clone());
}
}
None
}
fn reset(&mut self) {
self.locator = UrlLocator::new();
self.state = UrlLocation::Reset;
self.scheme_buffer.clear();
}
}
#[cfg(test)]
mod tests {
use super::*;
use alacritty_terminal::index::{Column, Line};
fn text_to_cells(text: &str) -> Vec<RenderableCell> {
text.chars()
.enumerate()
.map(|(i, character)| RenderableCell {
character,
zerowidth: None,
line: Line(0),
column: Column(i),
fg: Default::default(),
bg: Default::default(),
bg_alpha: 0.,
flags: Flags::empty(),
is_match: false,
})
.collect()
}
#[test]
fn multi_color_url() {
let mut input = text_to_cells("test https://example.org ing");
let num_cols = input.len();
input[10].fg = Rgb { r: 0xff, g: 0x00, b: 0xff };
let mut urls = Urls::new();
for cell in input {
urls.update(Column(num_cols), &cell);
}
let url = urls.urls.first().unwrap();
assert_eq!(url.start().col, Column(5));
assert_eq!(url.end().col, Column(23));
}
#[test]
fn multiple_urls() {
let input = text_to_cells("test git:a git:b git:c ing");
let num_cols = input.len();
let mut urls = Urls::new();
for cell in input {
urls.update(Column(num_cols), &cell);
}
assert_eq!(urls.urls.len(), 3);
assert_eq!(urls.urls[0].start().col, Column(5));
assert_eq!(urls.urls[0].end().col, Column(9));
assert_eq!(urls.urls[1].start().col, Column(11));
assert_eq!(urls.urls[1].end().col, Column(15));
assert_eq!(urls.urls[2].start().col, Column(17));
assert_eq!(urls.urls[2].end().col, Column(21));
}
#[test]
fn wide_urls() {
let input = text_to_cells("test https://こんにちは (http:여보세요) ing");
let num_cols = input.len() + 9;
let mut urls = Urls::new();
for cell in input {
urls.update(Column(num_cols), &cell);
}
assert_eq!(urls.urls.len(), 2);
assert_eq!(urls.urls[0].start().col, Column(5));
assert_eq!(urls.urls[0].end().col, Column(17));
assert_eq!(urls.urls[1].start().col, Column(20));
assert_eq!(urls.urls[1].end().col, Column(28));
}
}
| 28.963899 | 93 | 0.538452 |
56c28e433936ed52e0cdedeb73027d8a2b3d26a7
| 13,796 |
#[doc = "Register `SEQCFG` reader"]
pub struct R(crate::R<SEQCFG_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SEQCFG_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<SEQCFG_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<SEQCFG_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `SEQCFG` writer"]
pub struct W(crate::W<SEQCFG_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SEQCFG_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<SEQCFG_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<SEQCFG_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `HWLA` reader - Half word left adjust"]
pub struct HWLA_R(crate::FieldReader<bool, bool>);
impl HWLA_R {
pub(crate) fn new(bits: bool) -> Self {
HWLA_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for HWLA_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `HWLA` writer - Half word left adjust"]
pub struct HWLA_W<'a> {
w: &'a mut W,
}
impl<'a> HWLA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
#[doc = "Field `BIPOLAR` reader - Bipolar Mode"]
pub struct BIPOLAR_R(crate::FieldReader<bool, bool>);
impl BIPOLAR_R {
pub(crate) fn new(bits: bool) -> Self {
BIPOLAR_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BIPOLAR_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BIPOLAR` writer - Bipolar Mode"]
pub struct BIPOLAR_W<'a> {
w: &'a mut W,
}
impl<'a> BIPOLAR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `GAIN` reader - Gain factor"]
pub struct GAIN_R(crate::FieldReader<u8, u8>);
impl GAIN_R {
pub(crate) fn new(bits: u8) -> Self {
GAIN_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for GAIN_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `GAIN` writer - Gain factor"]
pub struct GAIN_W<'a> {
w: &'a mut W,
}
impl<'a> GAIN_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 4)) | ((value as u32 & 0x07) << 4);
self.w
}
}
#[doc = "Field `GCOMP` reader - Gain Compensation"]
pub struct GCOMP_R(crate::FieldReader<bool, bool>);
impl GCOMP_R {
pub(crate) fn new(bits: bool) -> Self {
GCOMP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for GCOMP_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `GCOMP` writer - Gain Compensation"]
pub struct GCOMP_W<'a> {
w: &'a mut W,
}
impl<'a> GCOMP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7);
self.w
}
}
#[doc = "Field `TRGSEL` reader - Trigger selection"]
pub struct TRGSEL_R(crate::FieldReader<u8, u8>);
impl TRGSEL_R {
pub(crate) fn new(bits: u8) -> Self {
TRGSEL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TRGSEL_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TRGSEL` writer - Trigger selection"]
pub struct TRGSEL_W<'a> {
w: &'a mut W,
}
impl<'a> TRGSEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 8)) | ((value as u32 & 0x07) << 8);
self.w
}
}
#[doc = "Field `RES` reader - Resolution"]
pub struct RES_R(crate::FieldReader<bool, bool>);
impl RES_R {
pub(crate) fn new(bits: bool) -> Self {
RES_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RES_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RES` writer - Resolution"]
pub struct RES_W<'a> {
w: &'a mut W,
}
impl<'a> RES_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12);
self.w
}
}
#[doc = "Field `INTERNAL` reader - Internal Voltage Source Selection"]
pub struct INTERNAL_R(crate::FieldReader<u8, u8>);
impl INTERNAL_R {
pub(crate) fn new(bits: u8) -> Self {
INTERNAL_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for INTERNAL_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `INTERNAL` writer - Internal Voltage Source Selection"]
pub struct INTERNAL_W<'a> {
w: &'a mut W,
}
impl<'a> INTERNAL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 14)) | ((value as u32 & 0x03) << 14);
self.w
}
}
#[doc = "Field `MUXPOS` reader - MUX selection on Positive ADC input channel"]
pub struct MUXPOS_R(crate::FieldReader<u8, u8>);
impl MUXPOS_R {
pub(crate) fn new(bits: u8) -> Self {
MUXPOS_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MUXPOS_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MUXPOS` writer - MUX selection on Positive ADC input channel"]
pub struct MUXPOS_W<'a> {
w: &'a mut W,
}
impl<'a> MUXPOS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | ((value as u32 & 0x0f) << 16);
self.w
}
}
#[doc = "Field `MUXNEG` reader - MUX selection on Negative ADC input channel"]
pub struct MUXNEG_R(crate::FieldReader<u8, u8>);
impl MUXNEG_R {
pub(crate) fn new(bits: u8) -> Self {
MUXNEG_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for MUXNEG_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `MUXNEG` writer - MUX selection on Negative ADC input channel"]
pub struct MUXNEG_W<'a> {
w: &'a mut W,
}
impl<'a> MUXNEG_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 20)) | ((value as u32 & 0x07) << 20);
self.w
}
}
#[doc = "Field `ZOOMRANGE` reader - Zoom shift/unipolar reference source selection"]
pub struct ZOOMRANGE_R(crate::FieldReader<u8, u8>);
impl ZOOMRANGE_R {
pub(crate) fn new(bits: u8) -> Self {
ZOOMRANGE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for ZOOMRANGE_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ZOOMRANGE` writer - Zoom shift/unipolar reference source selection"]
pub struct ZOOMRANGE_W<'a> {
w: &'a mut W,
}
impl<'a> ZOOMRANGE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 28)) | ((value as u32 & 0x07) << 28);
self.w
}
}
impl R {
#[doc = "Bit 0 - Half word left adjust"]
#[inline(always)]
pub fn hwla(&self) -> HWLA_R {
HWLA_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 2 - Bipolar Mode"]
#[inline(always)]
pub fn bipolar(&self) -> BIPOLAR_R {
BIPOLAR_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bits 4:6 - Gain factor"]
#[inline(always)]
pub fn gain(&self) -> GAIN_R {
GAIN_R::new(((self.bits >> 4) & 0x07) as u8)
}
#[doc = "Bit 7 - Gain Compensation"]
#[inline(always)]
pub fn gcomp(&self) -> GCOMP_R {
GCOMP_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 8:10 - Trigger selection"]
#[inline(always)]
pub fn trgsel(&self) -> TRGSEL_R {
TRGSEL_R::new(((self.bits >> 8) & 0x07) as u8)
}
#[doc = "Bit 12 - Resolution"]
#[inline(always)]
pub fn res(&self) -> RES_R {
RES_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bits 14:15 - Internal Voltage Source Selection"]
#[inline(always)]
pub fn internal(&self) -> INTERNAL_R {
INTERNAL_R::new(((self.bits >> 14) & 0x03) as u8)
}
#[doc = "Bits 16:19 - MUX selection on Positive ADC input channel"]
#[inline(always)]
pub fn muxpos(&self) -> MUXPOS_R {
MUXPOS_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 20:22 - MUX selection on Negative ADC input channel"]
#[inline(always)]
pub fn muxneg(&self) -> MUXNEG_R {
MUXNEG_R::new(((self.bits >> 20) & 0x07) as u8)
}
#[doc = "Bits 28:30 - Zoom shift/unipolar reference source selection"]
#[inline(always)]
pub fn zoomrange(&self) -> ZOOMRANGE_R {
ZOOMRANGE_R::new(((self.bits >> 28) & 0x07) as u8)
}
}
impl W {
#[doc = "Bit 0 - Half word left adjust"]
#[inline(always)]
pub fn hwla(&mut self) -> HWLA_W {
HWLA_W { w: self }
}
#[doc = "Bit 2 - Bipolar Mode"]
#[inline(always)]
pub fn bipolar(&mut self) -> BIPOLAR_W {
BIPOLAR_W { w: self }
}
#[doc = "Bits 4:6 - Gain factor"]
#[inline(always)]
pub fn gain(&mut self) -> GAIN_W {
GAIN_W { w: self }
}
#[doc = "Bit 7 - Gain Compensation"]
#[inline(always)]
pub fn gcomp(&mut self) -> GCOMP_W {
GCOMP_W { w: self }
}
#[doc = "Bits 8:10 - Trigger selection"]
#[inline(always)]
pub fn trgsel(&mut self) -> TRGSEL_W {
TRGSEL_W { w: self }
}
#[doc = "Bit 12 - Resolution"]
#[inline(always)]
pub fn res(&mut self) -> RES_W {
RES_W { w: self }
}
#[doc = "Bits 14:15 - Internal Voltage Source Selection"]
#[inline(always)]
pub fn internal(&mut self) -> INTERNAL_W {
INTERNAL_W { w: self }
}
#[doc = "Bits 16:19 - MUX selection on Positive ADC input channel"]
#[inline(always)]
pub fn muxpos(&mut self) -> MUXPOS_W {
MUXPOS_W { w: self }
}
#[doc = "Bits 20:22 - MUX selection on Negative ADC input channel"]
#[inline(always)]
pub fn muxneg(&mut self) -> MUXNEG_W {
MUXNEG_W { w: self }
}
#[doc = "Bits 28:30 - Zoom shift/unipolar reference source selection"]
#[inline(always)]
pub fn zoomrange(&mut self) -> ZOOMRANGE_W {
ZOOMRANGE_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Sequencer Configuration Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [seqcfg](index.html) module"]
pub struct SEQCFG_SPEC;
impl crate::RegisterSpec for SEQCFG_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [seqcfg::R](R) reader structure"]
impl crate::Readable for SEQCFG_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [seqcfg::W](W) writer structure"]
impl crate::Writable for SEQCFG_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets SEQCFG to value 0"]
impl crate::Resettable for SEQCFG_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.541756 | 419 | 0.57089 |
feadbc5fc6e2a84074938215991955b59b524367
| 144,055 |
//! Support for the "compact unwinding format" used by Apple platforms,
//! which can be found in __unwind_info sections of binaries.
//!
//! The primary type of interest is [`CompactUnwindInfoIter`], which can be
//! constructed directly from a section of memory.
//!
//! The [`CompactUnwindInfoIter]` lets you iterate through all of the mappings
//! from instruction addresses to unwinding instructions, or lookup a specific
//! mapping by instruction address (unimplemented).
//!
//!
//!
//! # Examples
//!
//! If you want to process all the Compact Unwind Info at once, do something like this:
//!
//! ```
//! use symbolic_debuginfo::macho::{
//! CompactCfiOp, CompactCfiRegister, CompactUnwindOp,
//! CompactUnwindInfoIter, MachError, MachObject,
//! };
//!
//! fn read_compact_unwind_info<'d>(mut iter: CompactUnwindInfoIter<'d>)
//! -> Result<(), MachError>
//! {
//! // Iterate through the entries
//! while let Some(entry) = iter.next()? {
//! match entry.instructions(&iter) {
//! CompactUnwindOp::None => {
//! // No instructions for this region, will need to use
//! // stack scanning or frame-pointer techniques to unwind.
//! }
//! CompactUnwindOp::UseDwarfFde { offset_in_eh_frame } => {
//! // Need to grab the CFI info from the eh_frame section
//!
//! // process_eh_frame_fde_at(offset_in_eh_frame)
//! }
//! CompactUnwindOp::CfiOps(ops) => {
//! // Emit a new entry with the following operations
//! let start_addr = entry.instruction_address;
//! let length = entry.len;
//!
//! for instruction in ops {
//! match instruction {
//! CompactCfiOp::RegisterAt {
//! dest_reg,
//! src_reg,
//! offset_from_src,
//! } => {
//! let dest_reg_name = dest_reg.name(&iter);
//! let src_reg_name = src_reg.name(&iter);
//!
//! // Emit something to the effect of
//! // $dest_reg_name = *($src_reg_name + offset_from_src);
//! }
//! CompactCfiOp::RegisterIs {
//! dest_reg,
//! src_reg,
//! offset_from_src,
//! } => {
//! let dest_reg_name = dest_reg.name(&iter);
//! let src_reg_name = src_reg.name(&iter);
//!
//! // Emit something to the effect of
//! // $dest_reg_name = $src_reg_name + offset_from_src;
//! }
//! };
//! }
//! }
//! }
//! }
//! Ok(())
//! }
//! ```
//!
//! If you want to unwind from a specific location, do something like this
//! (API not yet implemented!):
//!
//! ```rust,ignore
//! use symbolic_debuginfo::macho::{
//! CompactCfiOp, CompactCfiRegister, CompactUnwindOp,
//! CompactUnwindInfoIter, MachError, MachObject,
//! };
//!
//! fn unwind_one_frame<'d>(mut iter: CompactUnwindInfoIter<'d>, current_address_in_module: u32)
//! -> Result<(), MachError>
//! {
//! if let Some(entry) = iter.entry_for_address(current_address_in_module)? {
//! match entry.instructions(&iter) {
//! CompactUnwindOp::None => {
//! // No instructions for this region, will need to use
//! // stack scanning or frame-pointer techniques to unwind.
//! }
//! CompactUnwindOp::UseDwarfFde { offset_in_eh_frame } => {
//! // Need to grab the CFI info from the eh_frame section
//!
//! // process_eh_frame_fde_at(offset_in_eh_frame)
//! }
//! CompactUnwindOp::CfiOps(ops) => {
//! // Emit a new entry with the following operations
//! let start_addr = entry.instruction_address;
//! let length = entry.len;
//!
//! for instruction in ops {
//! match instruction {
//! CompactCfiOp::RegisterAt {
//! dest_reg,
//! src_reg,
//! offset_from_src,
//! } => {
//! let dest_reg_name = dest_reg.name(&iter);
//! let src_reg_name = src_reg.name(&iter);
//!
//! // Emit something to the effect of
//! // $dest_reg_name = *($src_reg_name + offset_from_src);
//! }
//! CompactCfiOp::RegisterIs {
//! dest_reg,
//! src_reg,
//! offset_from_src,
//! } => {
//! let dest_reg_name = dest_reg.name(&iter);
//! let src_reg_name = src_reg.name(&iter);
//!
//! // Emit something to the effect of
//! // $dest_reg_name = $src_reg_name + offset_from_src;
//! }
//! };
//! }
//! }
//! }
//! }
//! Ok(())
//! }
//! ```
//!
//!
//! # Unimplemented Features (TODO)
//!
//! * Personality/LSDA lookup (for runtime unwinders)
//! * Entry lookup by address (for runtime unwinders)
//! * x86/x64 Stackless-Indirect mode decoding (for stack frames > 2KB)
//!
//!
//! # The Compact Unwinding Format
//!
//! This format is defined only by its implementation in llvm. Notably these two
//! files include lots of useful comments and definitions:
//!
//! * [Header describing layout of the format](https://github.com/llvm/llvm-project/blob/main/libunwind/include/mach-o/compact_unwind_encoding.h)
//! * [Implementation that outputs the format](https://github.com/llvm/llvm-project/blob/main/lld/MachO/UnwindInfoSection.cpp)
//! * [Implementation of lldb interpreting that format (CreateUnwindPlan_x86_64 especially useful)](https://github.com/llvm/llvm-project/blob/main/lldb/source/Symbol/CompactUnwindInfo.cpp)
//!
//! This implementation is based on those files at commit `d480f968ad8b56d3ee4a6b6df5532d485b0ad01e`.
//!
//! Unfortunately the description of the format in those files elides some important
//! details, and it uses some naming conventions that are confusing, so this document
//! will attempt to define this format more completely, and with more clear terms.
//!
//! Some notable terminology changes from llvm:
//!
//! * "encoding" or "encoding type" => opcode
//! * "function offset" => instruction address
//!
//! Like all unwinding info formats, the goal of the compact unwinding format
//! is to create a mapping from addresses in the binary to opcodes describing
//! how to unwind from that location.
//!
//! These opcodes describe:
//!
//! * How to recover the return pointer for the current frame
//! * How to recover some of the registers for the current frame
//! * How to run destructors / catch the unwind at runtime (personality/LSDA)
//!
//! A user of the compact unwinding format would:
//!
//! 1. Get the current instruction pointer (e.g. `%rip`).
//! 2. Lookup the corresponding opcode in the compact unwinding structure.
//! 3. Follow the instructions of that opcode to recover the current frame.
//! 4. Optionally perform runtime unwinding tasks for the current frame (destructors).
//! 5. Use that information to recover the instruction pointer of the previous frame.
//! 6. Repeat until unwinding is complete.
//!
//! The compact unwinding format can be understood as two separate pieces:
//!
//! * An architecture-agnostic "page-table" structure for finding opcode entries
//! * Architecture-specific opcode formats (x86, x64, and ARM64)
//!
//! Unlike DWARF CFI, compact unwinding doesn't have facilities for incrementally
//! updating how to recover certain registers as the function progresses.
//!
//! Empirical analysis suggests that there tends to only be one opcode for
//! an entire function (which explains why llvm refers to instruction addresses
//! as "function offsets"), although nothing in the format seems to *require*
//! this to be the case.
//!
//! One consequence of only having one opcode for a whole function is that
//! functions will generally have incorrect instructions for the function's
//! prologue (where callee-saved registers are individually PUSHed onto the
//! stack before the rest of the stack space is allocated), and epilogue
//! (where callee-saved registers are individually POPed back into registers).
//!
//! Presumably this isn't a very big deal, since there's very few situations
//! where unwinding would involve a function still executing its prologue/epilogue.
//! This might matter when handling a stack overflow that occurred while
//! saving the registers, or when processing a non-crashing thread in a minidump
//! that happened to be in its prologue/epilogue.
//!
//! Similarly, the way ranges of instructions are mapped means that Compact
//! Unwinding will generally incorrectly map the padding bytes between functions
//! (attributing them to the previous function), while DWARF CFI tends to
//! more carefully exclude those addresses. Presumably also not a big deal.
//!
//! Both of these things mean that if DWARF CFI and Compact Unwinding are
//! available for a function, the DWARF CFI is expected to be more precise.
//!
//! It's possible that LSDA entries have addresses decoupled from the primary
//! opcode so that instructions on how to run destructors can vary more
//! granularly, but LSDA support is still TODO as it's not needed for
//! backtraces.
//!
//!
//! # Page Tables
//!
//! This section describes the architecture-agnostic layout of the compact
//! unwinding format. The layout of the format is a two-level page-table
//! with one root first-level node pointing to arbitrarily many second-level
//! nodes, which in turn can hold several hundred opcode entries.
//!
//! There are two high-level concepts in this format that enable significant
//! compression of the tables:
//!
//! 1. Eliding duplicate instruction addresses
//! 2. Palettizing the opcodes
//!
//!
//!
//! Trick 1 is standard for unwinders: the table of mappings is sorted by
//! address, and any entries that would have the same opcode as the
//! previous one are elided. So for instance the following:
//!
//! ```text
//! address: 1, opcode: 1
//! address: 2, opcode: 1
//! address: 3, opcode: 2
//! ```
//!
//! Is just encoded like this:
//!
//! ```text
//! address: 1, opcode: 1
//! address: 3, opcode: 2
//! ```
//!
//! We have found a few places with "zero-length" entries, where the same
//! address gets repeated, such as the following in `libsystem_kernel.dylib`:
//!
//! ```text
//! address: 0x000121c3, opcode: 0x00000000
//! address: 0x000121c3, opcode: 0x04000680
//! ```
//!
//! In this case you can just discard the zero-length one (the first one).
//!
//!
//!
//! Trick 2 is more novel: At the first level a global palette of up to 127 opcodes
//! is defined. Each second-level "compressed" (leaf) page can also define up to 128 local
//! opcodes. Then the entries mapping instruction addresses to opcodes can use 8-bit
//! indices into those palettes instead of entire 32-bit opcodes. If an index is
//! smaller than the number of global opcodes, it's global, otherwise it's local
//! (subtract the global count to get the local index).
//!
//! > Unclear detail: If the global palette is smaller than 127, can the local
//! palette be larger than 128?
//!
//! To compress these entries into a single 32-bit value, the address is truncated
//! to 24 bits and packed with the index. The addresses stored in these entries
//! are also relative to a base address that each second-level page defines.
//! (This will be made more clear below).
//!
//! There are also non-palletized "regular" second-level pages with absolute
//! 32-bit addresses, but those are fairly rare. llvm seems to only want to emit
//! them in the final page.
//!
//! The root page also stores the first address mapped by each second-level
//! page, allowing for more efficient binary search for a particular function
//! offset entry. (This is the base address the compressed pages use.)
//!
//! The root page always has a final sentinel entry which has a null pointer
//! to its second-level page while still specifying a first address. This
//! makes it easy to lookup the maximum mapped address (the sentinel will store
//! that value +1), and just generally makes everything Work Nicer.
//!
//!
//!
//! ## Layout of the Page Table
//!
//! The page table starts at the very beginning of the `__unwind_info` section
//! with the root page:
//!
//! ```rust,ignore
//! struct RootPage {
//! /// Only version 1 is currently defined
//! version: u32 = 1,
//!
//! /// The array of u32 global opcodes (offset relative to start of root page).
//! ///
//! /// These may be indexed by "compressed" second-level pages.
//! global_opcodes_offset: u32,
//! global_opcodes_len: u32,
//!
//! /// The array of u32 global personality codes
//! /// (offset relative to start of root page).
//! ///
//! /// Personalities define the style of unwinding that an unwinder should
//! /// use, and how to interpret the LSDA entries for a function (see below).
//! personalities_offset: u32,
//! personalities_len: u32,
//!
//! /// The array of FirstLevelPageEntry's describing the second-level pages
//! /// (offset relative to start of root page).
//! pages_offset: u32,
//! pages_len: u32,
//!
//! // After this point there are several dynamically-sized arrays whose
//! // precise order and positioning don't matter, because they are all
//! // accessed using offsets like the ones above. The arrays are:
//!
//! global_opcodes: [u32; global_opcodes_len],
//! personalities: [u32; personalities_len],
//! pages: [FirstLevelPageEntry; pages_len],
//!
//! /// An array of LSDA pointers (Language Specific Data Areas).
//! ///
//! /// LSDAs are tables that an unwinder's personality function will use to
//! /// find what destructors should be run and whether unwinding should
//! /// be caught and normal execution resumed. We can treat them opaquely.
//! ///
//! /// Second-level pages have addresses into this array so that it can
//! /// can be indexed, the root page doesn't need to know about them.
//! lsdas: [LsdaEntry; unknown_len],
//! }
//!
//!
//! struct FirstLevelPageEntry {
//! /// The first address mapped by this page.
//! ///
//! /// This is useful for binary-searching for the page that can map
//! /// a specific address in the binary (the primary kind of lookup
//! /// performed by an unwinder).
//! first_address: u32,
//!
//! /// Offset to the second-level page (offset relative to start of root page).
//! ///
//! /// This may point to a RegularSecondLevelPage or a CompressedSecondLevelPage.
//! /// Which it is can be determined by the 32-bit "kind" value that is at
//! /// the start of both layouts.
//! second_level_page_offset: u32,
//!
//! /// Base offset into the lsdas array that entries in this page will be
//! /// relative to (offset relative to start of root page).
//! lsda_index_offset: u32,
//! }
//!
//!
//! struct RegularSecondLevelPage {
//! /// Always 2 (use to distinguish from CompressedSecondLevelPage).
//! kind: u32 = 2,
//!
//! /// The Array of RegularEntry's (offset relative to **start of this page**).
//! entries_offset: u16,
//! entries_len: u16,
//! }
//!
//!
//! struct RegularEntry {
//! /// The address in the binary for this entry (absolute).
//! instruction_address: u32,
//! /// The opcode for this address.
//! opcode: u32,
//! }
//!
//!
//! struct CompressedSecondLevelPage {
//! /// Always 3 (use to distinguish from RegularSecondLevelPage).
//! kind: u32 = 3,
//!
//! /// The array of compressed u32 entries
//! /// (offset relative to **start of this page**).
//! ///
//! /// Entries are a u32 that contains two packed values (from high to low):
//! /// * 8 bits: opcode index
//! /// * 0..global_opcodes_len => index into global palette
//! /// * global_opcodes_len..255 => index into local palette
//! /// (subtract global_opcodes_len to get the real local index)
//! /// * 24 bits: instruction address
//! /// * address is relative to this page's first_address!
//! entries_offset: u16,
//! entries_len: u16,
//!
//! /// The array of u32 local opcodes for this page
//! /// (offset relative to **start of this page**).
//! local_opcodes_offset: u16,
//! local_opcodes_len: u16,
//! }
//!
//!
//! // TODO: why do these have instruction_addresses? Are they not in sync
//! // with the second-level entries?
//! struct LsdaEntry {
//! instruction_address: u32,
//! lsda_address: u32,
//! }
//! ```
//!
//!
//!
//! # Opcode Format
//!
//! There are 3 architecture-specific opcode formats: x86, x64, and ARM64.
//!
//! All 3 formats have a "null opcode" (`0x0000_0000`) which indicates that
//! there is no unwinding information for this range of addresses. This happens
//! with things like hand-written assembly subroutines. This implementation
//! will yield it as a valid opcode that converts into [`CompactUnwindOp::None`].
//!
//! All 3 formats share a common header in the top 8 bits (from high to low):
//!
//! ```rust,ignore
//! /// Whether this instruction is the start of a function.
//! is_start: u1,
//!
//! /// Whether there is an lsda entry for this instruction.
//! has_lsda: u1,
//!
//! /// An index into the global personalities array
//! /// (TODO: ignore if has_lsda == false?)
//! personality_index: u2,
//!
//! /// The architecture-specific kind of opcode this is, specifying how to
//! /// interpret the remaining 24 bits of the opcode.
//! opcode_kind: u4,
//! ```
//!
//!
//!
//! ## x86 and x64 Opcodes
//!
//! x86 and x64 use the same opcode layout, differing only in the registers
//! being restored. Registers are numbered 0-6, with the following mappings:
//!
//! x86:
//! * 0 => no register (like `Option::None`)
//! * 1 => `ebx`
//! * 2 => `ecx`
//! * 3 => `edx`
//! * 4 => `edi`
//! * 5 => `esi`
//! * 6 => `ebp`
//!
//! x64:
//! * 0 => no register (like `Option::None`)
//! * 1 => `rbx`
//! * 2 => `r12`
//! * 3 => `r13`
//! * 4 => `r14`
//! * 5 => `r15`
//! * 6 => `rbp`
//!
//! Note also that encoded sizes/offsets are generally divided by the pointer size
//! (since all values we are interested in are pointer-aligned), which of course differs
//! between x86 and x64.
//!
//! There are 4 kinds of x86/x64 opcodes (specified by opcode_kind):
//!
//! (One of the llvm headers refers to a 5th "0=old" opcode. Apparently this
//! was used for initial development of the format, and is basically just
//! reserved to prevent the testing data from ever getting mixed with real
//! data. Nothing should produce or handle it. It does incidentally match
//! the "null opcode", but it's fine to regard that as an unknown opcode
//! and do nothing.)
//!
//!
//! ### x86/x64 Opcode 1: Frame-Based
//!
//! The function has the standard frame pointer (`bp`) prelude which:
//!
//! * Pushes the caller's `bp` to the stack
//! * Sets `bp := sp` (new frame pointer is the current top of the stack)
//!
//! `bp` has been preserved, and any callee-saved registers that need to be restored
//! are saved on the stack at a known offset from `bp`. The return address is
//! stored just before the caller's `bp`. The caller's stack pointer should
//! point before where the return address is saved.
//!
//! So to unwind you just need to do:
//!
//! ```text
//! %sp := %bp + 2*POINTER_SIZE
//! %ip := *(%bp + POINTER_SIZE)
//! %bp := *(%bp)
//!
//! (and restore all the other callee-saved registers as described below)
//! ```
//!
//! Registers are stored in increasing order (so `reg1` comes before `reg2`).
//! If a register has the "no register" value, continue iterating the offset
//! forward. This lets the registers be stored slightly-non-contiguously on the
//! stack.
//!
//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
//!
//! ```rust,ignore
//! /// The offset from bp that the registers to restore are saved at,
//! /// divided by pointer size.
//! stack_offset: u8,
//!
//! _unused: u1,
//!
//! /// Registers to restore (see register mapping in previous section)
//! reg1: u3,
//! reg2: u3,
//! reg3: u3,
//! reg4: u3,
//! reg5: u3,
//! ```
//!
//!
//!
//! ### x86/x64 Opcode 2: Frameless (Stack-Immediate)
//!
//!
//! The callee's stack frame has a known size, so we can find the start
//! of the frame by offsetting from sp (the stack pointer). The return
//! address is saved immediately after that location. Any callee-saved
//! registers that need to be restored are saved immediately after that.
//!
//! So to unwind you just need to do:
//!
//! ```text
//! %sp := %sp + stack_size * POINTER_SIZE
//! %ip := *(%sp - 8)
//!
//! (and restore all the other callee-saved registers as described below)
//! ```
//!
//! Registers are stored in *reverse* order on the stack from the order the
//! decoding algorithm outputs (so `reg[1]` comes before `reg[0]`).
//!
//! If a register has the "no register" value, *do not* continue iterating the
//! offset forward -- registers are strictly contiguous (it's possible
//! "no register" can only be trailing due to the encoding, but I haven't
//! verified this).
//!
//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
//!
//! ```rust,ignore
//! /// How big the stack frame is, divided by pointer size.
//! stack_size: u8,
//!
//! _unused: u3,
//!
//! /// The number of registers that are saved on the stack.
//! register_count: u3,
//!
//! /// The permutation encoding of the registers that are saved
//! /// on the stack (see below).
//! register_permutations: u10,
//! ```
//!
//! The register permutation encoding is a Lehmer code sequence encoded into a
//! single variable-base number so we can encode the ordering of up to
//! six registers in a 10-bit space.
//!
//! This can't really be described well with anything but code, so
//! just read this implementation or llvm's implementation for how to
//! encode/decode this.
//!
//!
//!
//! ### x86/x64 Opcode 3: Frameless (Stack-Indirect)
//!
//! (Currently Unimplemented)
//!
//! Stack-Indirect is exactly the same situation as Stack-Immediate, but
//! the stack-frame size is too large for Stack-Immediate to encode. However,
//! the function prereserved the size of the frame in its prologue, so we can
//! extract the the size of the frame from a `sub` instruction at a known
//! offset from the start of the function (`subl $nnnnnnnn,ESP` in x86,
//! `subq $nnnnnnnn,RSP` in x64).
//!
//! This requires being able to find the first instruction of the function
//! (TODO: presumably the first is_start entry <= this one?).
//!
//! TODO: describe how to extract the value from the `sub` instruction.
//!
//!
//! ```rust,ignore
//! /// Offset from the start of the function where the `sub` instruction
//! /// we need is stored. (NOTE: not divided by anything!)
//! instruction_offset: u8,
//!
//! /// An offset to add to the loaded stack size, divided by pointer size.
//! /// This allows the stack size to differ slightly from the `sub`, to
//! /// compensate for any function prologue that pushes a bunch of
//! /// pointer-sized registers.
//! stack_adjust: u3,
//!
//! /// The number of registers that are saved on the stack.
//! register_count: u3,
//!
//! /// The permutation encoding of the registers that are saved on the stack
//! /// (see Stack-Immediate for a description of this format).
//! register_permutations: u10,
//! ```
//!
//! **Note**: apparently binaries generated by the clang in Xcode 6 generated
//! corrupted versions of this opcode, but this was fixed in Xcode 7
//! (released in September 2015), so *presumably* this isn't something we're
//! likely to encounter. But if you encounter messed up opcodes this might be why.
//!
//!
//!
//! ### x86/x64 Opcode 4: Dwarf
//!
//! There is no compact unwind info here, and you should instead use the
//! DWARF CFI in `.eh_frame` for this line. The remaining 24 bits of the opcode
//! are an offset into the `.eh_frame` section that should hold the DWARF FDE
//! for this instruction address.
//!
//!
//!
//! ## ARM64 Opcodes
//!
//! ARM64 (AKA AArch64) is a lot more strict about the ABI of functions, and
//! as such it has fairly simple opcodes. There are 3 kinds of ARM64 opcode:
//!
//! (Yes there's no Opcode 1, I don't know why.)
//!
//!
//! ### ARM64 Opcode 2: Frameless
//!
//! This is a "frameless" leaf function. The caller is responsible for
//! saving/restoring all of its general purpose registers. The frame pointer
//! is still the caller's frame pointer and doesn't need to be touched. The
//! return address is stored in the link register (`x30`).
//!
//! So to unwind you just need to do:
//!
//! ```text
//! %sp := %sp + stack_size * 16
//! %pc := %x30
//!
//! (no other registers to restore)
//! ```
//!
//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
//!
//! ```rust,ignore
//! /// How big the stack frame is, divided by 16.
//! stack_size: u12,
//!
//! _unused: u12,
//! ```
//!
//!
//!
//! ### ARM64 Opcode 3: Dwarf
//!
//! There is no compact unwind info here, and you should instead use the
//! DWARF CFI in `.eh_frame` for this line. The remaining 24 bits of the opcode
//! are an offset into the `.eh_frame` section that should hold the DWARF FDE
//! for this instruction address.
//!
//!
//!
//! ### ARM64 Opcode 4: Frame-Based
//!
//! This is a function with the standard prologue. The return address (`pc`) and the
//! frame pointer (`x29`) were pushed onto the stack in a pair and in that order
//! (ARM64 registers are saved/restored in pairs), and then the frame pointer was updated
//! to the current stack pointer.
//!
//! So to unwind you just need to do:
//!
//! ```text
//! %sp := %x29 + 16
//! %pc := *(%x29 + 8)
//! %x29 := *(%x29)
//!
//! (and restore all the other callee-saved registers as described below)
//! ```
//!
//! Any callee-saved registers that need to be restored were then pushed
//! onto the stack in pairs in the following order (if they were pushed at
//! all, see below):
//!
//! 1. `x19`, `x20`
//! 2. `x21`, `x22`
//! 3. `x23`, `x24`
//! 4. `x25`, `x26`
//! 5. `x27`, `x28`
//! 6. `d8`, `d9`
//! 7. `d10`, `d11`
//! 8. `d12`, `d13`
//! 9. `d14`, `d15`
//!
//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
//!
//! ```rust,ignore
//! _unused: u15,
//!
//! // Whether each register pair was pushed
//! d14_and_d15_saved: u1,
//! d12_and_d13_saved: u1,
//! d10_and_d11_saved: u1,
//! d8_and_d9_saved: u1,
//!
//! x27_and_x28_saved: u1,
//! x25_and_x26_saved: u1,
//! x23_and_x24_saved: u1,
//! x21_and_x22_saved: u1,
//! x19_and_x20_saved: u1,
//! ```
//!
//!
//!
//! # Notable Corners
//!
//! Here's some notable corner cases and esoterica of the format. Behaviour in
//! these situations is not strictly guaranteed (as in we may decide to
//! make the implemenation more strict or liberal if it is deemed necessary
//! or desirable). But current behaviour *is* documented here for the sake of
//! maintenance/debugging. Hopefully it also helps highlight all the ways things
//! can go wrong for anyone using this documentation to write their own tooling.
//!
//! For all these cases, if an Error is reported during iteration/search, the
//! [`CompactUnwindInfoIter`] will be in an unspecified state for future queries.
//! It will never violate memory safety but it may start yielding chaotic
//! values.
//!
//! If this implementation ever panics, that should be regarded as an
//! implementation bug.
//!
//!
//! Things we allow:
//!
//! * The personalities array has a 32-bit length, but all indices into
//! it are only 2 bits. As such, it is theoretically possible for there
//! to be unindexable personalities. In practice that Shouldn't Happen,
//! and this implementation won't report an error if it does, because it
//! can be benign (although we have no way to tell if indices were truncated).
//!
//! * The llvm headers say that at most there should be 127 global opcodes
//! and 128 local opcodes, but since local index translation is based on
//! the actual number of global opcodes and *not* 127/128, there's no
//! reason why each palette should be individually limited like this.
//! This implementation doesn't report an error if this happens, and should
//! work fine if it does.
//!
//! * The llvm headers say that second-level pages are *actual* pages at
//! a fixed size of 4096 bytes. It's unclear what advantage this provides,
//! perhaps there's a situation where you're mapping in the pages on demand?
//! This puts a practical limit on the number of entries each second-level
//! page can hold -- regular pages can fit 511 entries, while compressed
//! pages can hold 1021 entries+local_opcodes (they have to share). This
//! implementation does not report an error if a second-level page has more
//! values than that, and should work fine if it does.
//!
//! * If a [`CompactUnwindInfoIter`] is created for an architecture it wasn't
//! designed for, it is assumed that the layout of the page tables will
//! remain the same, and entry iteration/lookup should still work and
//! produce results. However [`CompactUnwindInfoEntry::instructions`]
//! will always return [`CompactUnwindOp::None`].
//!
//! * If an opcode kind is encountered that this implementation wasn't
//! designed for, `Opcode::instructions` will return [`CompactUnwindOp::None`].
//!
//! * If two entries have the same address (making the first have zero-length),
//! we silently discard the first one in favour of the second.
//!
//! * Only 7 register mappings are provided for x86/x64 opcodes, but the
//! 3-bit encoding allows for 8. This implementation will just map the
//! 8th encoding to "no register" as well.
//!
//! * Only 6 registers can be restored by the x86/x64 stackless modes, but
//! the 3-bit encoding of the register count allows for 7. This implementation
//! clamps the value to 6.
//!
//!
//! Things we produce errors for:
//!
//! * If the root page has a version this implementation wasn't designed for,
//! [`CompactUnwindInfoIter::new`] will return an Error.
//!
//! * A corrupt unwind_info section may have its entries out of order. Since
//! the next entry's instruction_address is always needed to compute the
//! number of bytes the current entry covers, the implementation will report
//! an error if it encounters this. However it does not attempt to fully
//! validate the ordering during an `entry_for_address` query, as this would
//! significantly slow down the binary search. In this situation
//! you may get chaotic results (same guarantees as `BTreeMap` with an
//! inconsistent `Ord` implementation).
//!
//! * A corrupt unwind_info section may attempt to index out of bounds either
//! with out-of-bounds offset values (e.g. personalities_offset) or with out
//! of bounds indices (e.g. a local opcode index). When an array length is
//! provided, this implementation will return an error if an index is out
//! out of bounds. Offsets are only restricted to the unwind_info
//! section itself, as this implementation does not assume arrays are
//! placed in any particular place, and does not try to prevent aliasing.
//! Trying to access outside the `.unwind_info` section will return an error.
//!
//! * If an unknown second-level page type is encountered, iteration/lookup will
//! return an error.
//!
//!
//! Things that cause chaos:
//!
//! * If the null page was missing, there would be no way to identify the
//! number of instruction bytes the last entry in the table covers. As such,
//! this implementation assumes that it exists, and currently does not validate
//! it ahead of time. If the null page *is* missing, the last entry or page
//! may be treated as the null page, and won't be emitted. (Perhaps we should
//! provide more reliable behaviour here?)
//!
//! * If there are multiple null pages, or if there is a page with a defined
//! second-level page but no entries of its own, behaviour is unspecified.
//!
use crate::macho::MachError;
use goblin::error::Error;
use goblin::mach::segment::SectionData;
use scroll::{Endian, Pread};
use std::mem;
// Hacky glue types to keep exposure of the containing library minimal.
// This will help with transplanting this code into goblin.
type Result<T> = std::result::Result<T, MachError>;
#[derive(Debug, Clone)]
enum Arch {
X86,
X64,
Arm64,
Other,
}
// Types marked with repr(C) indicate their layout precisely matches the
// layout of the format. In theory we could point directly into the binary
// of the unwind_info section with these types, but we avoid doing so for
// endianness/safety.
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct FirstLevelPage {
// Only version 1 is currently defined
// version: u32 = 1,
/// The array of u32 global opcodes (offset relative to start of root page).
///
/// These may be indexed by "compressed" second-level pages.
global_opcodes_offset: u32,
global_opcodes_len: u32,
/// The array of u32 global personality codes (offset relative to start of root page).
///
/// Personalities define the style of unwinding that an unwinder should use,
/// and how to interpret the LSDA entries for a function (see below).
personalities_offset: u32,
personalities_len: u32,
/// The array of [`FirstLevelPageEntry`]'s describing the second-level pages
/// (offset relative to start of root page).
pages_offset: u32,
pages_len: u32,
// After this point there are several dynamically-sized arrays whose precise
// order and positioning don't matter, because they are all accessed using
// offsets like the ones above. The arrays are:
// global_opcodes: [u32; global_opcodes_len],
// personalities: [u32; personalities_len],
// pages: [FirstLevelPageEntry; pages_len],
// lsdas: [LsdaEntry; unknown_len],
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct FirstLevelPageEntry {
/// The first address mapped by this page.
///
/// This is useful for binary-searching for the page that can map
/// a specific address in the binary (the primary kind of lookup
/// performed by an unwinder).
first_address: u32,
/// Offset to the second-level page (offset relative to start of root page).
///
/// This may point to either a [`RegularSecondLevelPage`] or a [`CompressedSecondLevelPage`].
/// Which it is can be determined by the 32-bit "kind" value that is at
/// the start of both layouts.
second_level_page_offset: u32,
/// Base offset into the lsdas array that entries in this page will be relative
/// to (offset relative to start of root page).
lsda_index_offset: u32,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct RegularSecondLevelPage {
// Always 2 (use to distinguish from CompressedSecondLevelPage).
// kind: u32 = 2,
/// The Array of [`RegularEntry`]'s (offset relative to **start of this page**).
entries_offset: u16,
entries_len: u16,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct CompressedSecondLevelPage {
// Always 3 (use to distinguish from RegularSecondLevelPage).
// kind: u32 = 3,
/// The array of compressed u32 entries (offset relative to **start of this page**).
///
/// Entries are a u32 that contains two packed values (from highest to lowest bits):
/// * 8 bits: opcode index
/// * 0..global_opcodes_len => index into global palette
/// * global_opcodes_len..255 => index into local palette (subtract global_opcodes_len)
/// * 24 bits: instruction address
/// * address is relative to this page's first_address!
entries_offset: u16,
entries_len: u16,
/// The array of u32 local opcodes for this page (offset relative to **start of this page**).
local_opcodes_offset: u16,
local_opcodes_len: u16,
}
#[repr(C)]
#[derive(Debug, Clone, Pread)]
struct RegularEntry {
/// The address in the binary for this entry (absolute).
instruction_address: u32,
/// The opcode for this address.
opcode: u32,
}
#[derive(Debug, Clone)]
#[repr(C)]
struct LsdaEntry {
instruction_address: u32,
lsda_address: u32,
}
#[derive(Debug, Clone)]
enum OpcodeOrIndex {
Opcode(u32),
Index(u32),
}
#[derive(Debug, Clone)]
struct RawCompactUnwindInfoEntry {
/// The address of the first instruction this entry applies to
/// (may apply to later instructions as well).
instruction_address: u32,
/// Either an opcode or the index into an opcode palette
opcode_or_index: OpcodeOrIndex,
}
/// An iterator over the [`CompactUnwindInfoEntry`]'s of a `.unwind_info` section.
#[derive(Debug, Clone)]
pub struct CompactUnwindInfoIter<'a> {
/// Parent .unwind_info metadata.
arch: Arch,
endian: Endian,
section: SectionData<'a>,
/// Parsed root page.
root: FirstLevelPage,
// Iterator state
/// Current index in the root node.
first_idx: u32,
/// Current index in the second-level node.
second_idx: u32,
/// Parsed version of the current pages.
page_of_next_entry: Option<(FirstLevelPageEntry, SecondLevelPage)>,
/// Minimally parsed version of the next entry, which we need to have
/// already loaded to know how many instructions the previous entry covered.
next_entry: Option<RawCompactUnwindInfoEntry>,
done_page: bool,
}
impl<'a> CompactUnwindInfoIter<'a> {
/// Creates a new [`CompactUnwindInfoIter`] for the given section.
pub fn new(
section: SectionData<'a>,
little_endian: bool,
arch: symbolic_common::Arch,
) -> Result<Self> {
const UNWIND_SECTION_VERSION: u32 = 1;
use symbolic_common::CpuFamily;
let arch = match arch.cpu_family() {
CpuFamily::Intel32 => Arch::X86,
CpuFamily::Amd64 => Arch::X64,
CpuFamily::Arm64 => Arch::Arm64,
_ => Arch::Other,
};
let endian = if little_endian {
Endian::Little
} else {
Endian::Big
};
let offset = &mut 0;
// Grab all the fields from the header
let version: u32 = section.gread_with(offset, endian)?;
if version != UNWIND_SECTION_VERSION {
return Err(MachError::from(Error::Malformed(format!(
"Unknown Compact Unwinding Info version {}",
version
))));
}
let root = section.gread_with(offset, endian)?;
let iter = CompactUnwindInfoIter {
arch,
endian,
section,
root,
first_idx: 0,
second_idx: 0,
page_of_next_entry: None,
next_entry: None,
done_page: true,
};
Ok(iter)
}
/// Gets the next entry in the iterator.
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Result<Option<CompactUnwindInfoEntry>> {
// Iteration is slightly more complex here because we want to be able to
// report how many instructions an entry covers, and knowing this requires us
// to parse the *next* entry's instruction_address value. Also, there's
// a sentinel page at the end of the listing with a null second_level_page_offset
// which requires some special handling.
//
// To handle this, we split iteration into two phases:
//
// * next_raw minimally parses the next entry so we can extract the opcode,
// while also ensuring page_of_next_entry is set to match it.
//
// * next uses next_raw to "peek" the instruction_address of the next entry,
// and then saves the result as `next_entry`, to avoid doing a bunch of
// repeated work.
// If this is our first iteration next_entry will be empty, try to get it.
if self.next_entry.is_none() {
self.next_entry = self.next_raw()?;
}
if let Some(cur_entry) = self.next_entry.take() {
// Copy the first and second page data, as it may get overwritten
// by next_raw, then peek the next entry.
let (first_page, second_page) = self.page_of_next_entry.clone().unwrap();
self.next_entry = self.next_raw()?;
if let Some(next_entry) = self.next_entry.as_ref() {
let result = self.complete_entry(
&cur_entry,
next_entry.instruction_address,
&first_page,
&second_page,
)?;
Ok(Some(result))
} else {
// If there's no next_entry, then cur_entry is the sentinel, which
// we shouldn't yield.
Ok(None)
}
} else {
// next_raw still yielded nothing, we're done.
Ok(None)
}
}
// Yields a minimally parsed version of the next entry, and sets
// page_of_next_entry to the page matching it (so it can be further
// parsed when needed.
fn next_raw(&mut self) -> Result<Option<RawCompactUnwindInfoEntry>> {
// First, load up the page for this value if needed
if self.done_page {
// Only advance the indices if we've already loaded up a page
// (so it's not the first iteration) and we have pages left.
if self.page_of_next_entry.is_some() && self.first_idx != self.root.pages_len {
self.first_idx += 1;
self.second_idx = 0;
}
if let Some(entry) = self.first_level_entry(self.first_idx)? {
if entry.second_level_page_offset == 0 {
// sentinel page at the end of the list, create a dummy entry
// and advance past this page (don't reset done_page).
return Ok(Some(RawCompactUnwindInfoEntry {
instruction_address: entry.first_address,
opcode_or_index: OpcodeOrIndex::Opcode(0),
}));
}
let second_level_page = self.second_level_page(entry.second_level_page_offset)?;
self.page_of_next_entry = Some((entry, second_level_page));
self.done_page = false;
} else {
// Couldn't load a page, so we're at the end of our iteration.
return Ok(None);
}
}
// If we get here, we must have loaded a page
let (first_level_entry, second_level_page) = self.page_of_next_entry.as_ref().unwrap();
let entry =
self.second_level_entry(first_level_entry, second_level_page, self.second_idx)?;
// Advance to the next entry
self.second_idx += 1;
// If we reach the end of the page, setup for the next page
if self.second_idx == second_level_page.len() {
self.done_page = true;
}
Ok(Some(entry))
}
/*
/// Gets the entry associated with a particular address.
pub fn entry_for_address(&mut self, _address: u32) -> Result<Option<CompactUnwindInfoEntry>> {
// TODO: this would be nice for an actual unwinding implementation, but
// dumping all of the entries doesn't need this.
}
*/
fn first_level_entry(&self, idx: u32) -> Result<Option<FirstLevelPageEntry>> {
if idx < self.root.pages_len {
let idx_offset = mem::size_of::<FirstLevelPageEntry>() * idx as usize;
let offset = self.root.pages_offset as usize + idx_offset;
Ok(Some(self.section.pread_with(offset, self.endian)?))
} else {
Ok(None)
}
}
fn second_level_page(&self, offset: u32) -> Result<SecondLevelPage> {
const SECOND_LEVEL_REGULAR: u32 = 2;
const SECOND_LEVEL_COMPRESSED: u32 = 3;
let mut offset = offset as usize;
let kind: u32 = self.section.gread_with(&mut offset, self.endian)?;
if kind == SECOND_LEVEL_REGULAR {
Ok(SecondLevelPage::Regular(
self.section.gread_with(&mut offset, self.endian)?,
))
} else if kind == SECOND_LEVEL_COMPRESSED {
Ok(SecondLevelPage::Compressed(
self.section.gread_with(&mut offset, self.endian)?,
))
} else {
Err(MachError::from(Error::Malformed(format!(
"Unknown second-level page kind: {}",
kind
))))
}
}
fn second_level_entry(
&self,
first_level_entry: &FirstLevelPageEntry,
second_level_page: &SecondLevelPage,
second_level_idx: u32,
) -> Result<RawCompactUnwindInfoEntry> {
match *second_level_page {
SecondLevelPage::Compressed(ref page) => {
let offset = first_level_entry.second_level_page_offset as usize
+ page.entries_offset as usize
+ second_level_idx as usize * 4;
let compressed_entry: u32 = self.section.pread_with(offset, self.endian)?;
let instruction_address =
(compressed_entry & 0x00FFFFFF) + first_level_entry.first_address;
let opcode_idx = (compressed_entry >> 24) & 0xFF;
Ok(RawCompactUnwindInfoEntry {
instruction_address,
opcode_or_index: OpcodeOrIndex::Index(opcode_idx),
})
}
SecondLevelPage::Regular(ref page) => {
let offset = first_level_entry.second_level_page_offset as usize
+ page.entries_offset as usize
+ second_level_idx as usize * 8;
let entry: RegularEntry = self.section.pread_with(offset, self.endian)?;
Ok(RawCompactUnwindInfoEntry {
instruction_address: entry.instruction_address,
opcode_or_index: OpcodeOrIndex::Opcode(entry.opcode),
})
}
}
}
fn complete_entry(
&self,
entry: &RawCompactUnwindInfoEntry,
next_entry_instruction_address: u32,
first_level_entry: &FirstLevelPageEntry,
second_level_page: &SecondLevelPage,
) -> Result<CompactUnwindInfoEntry> {
if entry.instruction_address > next_entry_instruction_address {
return Err(MachError::from(Error::Malformed(format!(
"Entry addresses are not monotonic! ({} > {})",
entry.instruction_address, next_entry_instruction_address
))));
}
let opcode = match entry.opcode_or_index {
OpcodeOrIndex::Opcode(opcode) => opcode,
OpcodeOrIndex::Index(opcode_idx) => {
if let SecondLevelPage::Compressed(ref page) = second_level_page {
if opcode_idx < self.root.global_opcodes_len {
self.global_opcode(opcode_idx)?
} else {
let opcode_idx = opcode_idx - self.root.global_opcodes_len;
if opcode_idx >= page.local_opcodes_len as u32 {
return Err(MachError::from(Error::Malformed(format!(
"Local opcode index too large ({} >= {})",
opcode_idx, page.local_opcodes_len
))));
}
let offset = first_level_entry.second_level_page_offset as usize
+ page.local_opcodes_offset as usize
+ opcode_idx as usize * 4;
let opcode: u32 = self.section.pread_with(offset, self.endian)?;
opcode
}
} else {
unreachable!()
}
}
};
let opcode = Opcode(opcode);
Ok(CompactUnwindInfoEntry {
instruction_address: entry.instruction_address,
len: next_entry_instruction_address - entry.instruction_address,
opcode,
})
}
fn global_opcode(&self, opcode_idx: u32) -> Result<u32> {
if opcode_idx >= self.root.global_opcodes_len {
return Err(MachError::from(Error::Malformed(format!(
"Global opcode index too large ({} >= {})",
opcode_idx, self.root.global_opcodes_len
))));
}
let offset = self.root.global_opcodes_offset as usize + opcode_idx as usize * 4;
let opcode: u32 = self.section.pread_with(offset, self.endian)?;
Ok(opcode)
}
fn personality(&self, personality_idx: u32) -> Result<u32> {
if personality_idx >= self.root.personalities_len {
return Err(MachError::from(Error::Malformed(format!(
"Personality index too large ({} >= {})",
personality_idx, self.root.personalities_len
))));
}
let offset = self.root.personalities_offset as usize + personality_idx as usize * 4;
let personality: u32 = self.section.pread_with(offset, self.endian)?;
Ok(personality)
}
/// Dumps similar output to `llvm-objdump --unwind-info`, for debugging.
pub fn dump(&self) -> Result<()> {
println!("Contents of __unwind_info section:");
println!(" Version: 0x1");
println!(
" Common encodings array section offset: 0x{:x}",
self.root.global_opcodes_offset
);
println!(
" Number of common encodings in array: 0x{:x}",
self.root.global_opcodes_len
);
println!(
" Personality function array section offset: 0x{:x}",
self.root.personalities_offset
);
println!(
" Number of personality functions in array: 0x{:x}",
self.root.personalities_len
);
println!(
" Index array section offset: 0x{:x}",
self.root.pages_offset
);
println!(
" Number of indices in array: 0x{:x}",
self.root.pages_len
);
println!(
" Common encodings: (count = {})",
self.root.global_opcodes_len
);
for i in 0..self.root.global_opcodes_len {
let opcode = self.global_opcode(i)?;
println!(" encoding[{}]: 0x{:08x}", i, opcode);
}
println!(
" Personality functions: (count = {})",
self.root.personalities_len
);
for i in 0..self.root.personalities_len {
let personality = self.personality(i)?;
println!(" personality[{}]: 0x{:08x}", i, personality);
}
println!(" Top level indices: (count = {})", self.root.pages_len);
for i in 0..self.root.pages_len {
let entry = self.first_level_entry(i)?.unwrap();
println!(" [{}]: function offset=0x{:08x}, 2nd level page offset=0x{:08x}, LSDA offset=0x{:08x}",
i,
entry.first_address,
entry.second_level_page_offset,
entry.lsda_index_offset);
}
// TODO: print LSDA info
println!(" LSDA descriptors:");
println!(" Second level indices:");
let mut iter = (*self).clone();
while let Some(raw_entry) = iter.next_raw()? {
let (first, second) = iter.page_of_next_entry.clone().unwrap();
// Always observing the index after the step, so subtract 1
let second_idx = iter.second_idx - 1;
// If this is the first entry of this page, dump the page
if second_idx == 0 {
println!(" Second level index[{}]: offset in section=0x{:08x}, base function=0x{:08x}",
iter.first_idx,
first.second_level_page_offset,
first.first_address);
}
// Dump the entry
// Feed in own instruction_address as a dummy value (we don't need it for this format)
let entry =
iter.complete_entry(&raw_entry, raw_entry.instruction_address, &first, &second)?;
if let OpcodeOrIndex::Index(opcode_idx) = raw_entry.opcode_or_index {
println!(
" [{}]: function offset=0x{:08x}, encoding[{}]=0x{:08x}",
second_idx, entry.instruction_address, opcode_idx, entry.opcode.0
);
} else {
println!(
" [{}]: function offset=0x{:08x}, encoding=0x{:08x}",
second_idx, entry.instruction_address, entry.opcode.0
);
}
}
Ok(())
}
}
#[derive(Debug, Clone)]
enum SecondLevelPage {
Compressed(CompressedSecondLevelPage),
Regular(RegularSecondLevelPage),
}
impl SecondLevelPage {
fn len(&self) -> u32 {
match *self {
SecondLevelPage::Regular(ref page) => page.entries_len as u32,
SecondLevelPage::Compressed(ref page) => page.entries_len as u32,
}
}
}
/// A Compact Unwind Info entry.
#[derive(Debug, Clone)]
pub struct CompactUnwindInfoEntry {
/// The first instruction this entry covers.
pub instruction_address: u32,
/// How many addresses this entry covers.
pub len: u32,
/// The opcode for this entry.
opcode: Opcode,
}
impl CompactUnwindInfoEntry {
/// Gets cfi instructions associated with this entry.
pub fn instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
self.opcode.instructions(iter)
}
}
/// A Compact Unwinding Operation
#[derive(Debug)]
pub enum CompactUnwindOp {
/// The instructions can be described with simple CFI operations.
CfiOps(CompactCfiOpIter),
/// Instructions can't be encoded by Compact Unwinding, but an FDE
/// with real DWARF CFI instructions is stored in the eh_frame section.
UseDwarfFde {
/// The offset in the eh_frame section where the FDE is.
offset_in_eh_frame: u32,
},
/// Nothing to do (may be unimplemented features or an unknown encoding)
None,
}
/// Minimal set of CFI ops needed to express Compact Unwinding semantics:
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CompactCfiOp {
/// The value of `dest_reg` is *stored at* `src_reg + offset_from_src`.
RegisterAt {
/// Destination
dest_reg: CompactCfiRegister,
/// Source
src_reg: CompactCfiRegister,
/// Offset
offset_from_src: i32,
},
/// The value of `dest_reg` *is* `src_reg + offset_from_src`.
RegisterIs {
/// Destination
dest_reg: CompactCfiRegister,
/// Source
src_reg: CompactCfiRegister,
/// Offset
offset_from_src: i32,
},
}
#[derive(Debug, Clone)]
enum X86UnwindingMode {
RbpFrame,
StackImmediate,
StackIndirect,
Dwarf,
}
#[derive(Debug, Clone)]
enum Arm64UnwindingMode {
Frameless,
Dwarf,
Frame,
}
#[derive(Debug, Clone)]
struct Opcode(u32);
// Arch-generic stuff
impl Opcode {
fn instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
match iter.arch {
Arch::X86 | Arch::X64 => self.x86_instructions(iter),
Arch::Arm64 => self.arm64_instructions(iter),
_ => CompactUnwindOp::None,
}
}
fn pointer_size(&self, iter: &CompactUnwindInfoIter) -> u32 {
match iter.arch {
Arch::X86 => 4,
Arch::X64 => 8,
Arch::Arm64 => 8,
_ => unimplemented!(),
}
}
/*
// potentially needed for future work:
fn is_start(&self) -> bool {
let offset = 32 - 1;
(self.0 & (1 << offset)) != 0
}
fn has_lsda(&self) -> bool{
let offset = 32 - 2;
(self.0 & (1 << offset)) != 0
}
fn personality_index(&self) -> u32 {
let offset = 32 - 4;
(self.0 >> offset) & 0b11
}
*/
}
// x86/x64 implementation
impl Opcode {
fn x86_instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
let pointer_size = self.pointer_size(iter) as i32;
match self.x86_mode() {
Some(X86UnwindingMode::RbpFrame) => {
// This function has the standard function prelude and rbp
// has been preserved. Additionally, any callee-saved registers
// that haven't been preserved (x86_rbp_registers) are saved on
// the stack at x86_rbp_stack_offset.
let mut ops = CompactCfiOpIter::new();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
});
// This implementation here is in line with whatever llvm does here:
// https://github.com/llvm/llvm-project/blob/d21a35ac0a958fd4cff0b8f424a2706b8785b89d/lldb/source/Symbol/CompactUnwindInfo.cpp#L766-L788
// These offsets are relative to the frame pointer, but
// cfi prefers things to be relative to the cfa, so apply
// the same offset here too.
let offset = self.x86_rbp_stack_offset() as i32 + 2;
// Offset advances even if there's no register here
for (i, reg) in self.x86_rbp_registers().iter().enumerate() {
if let Some(reg) = *reg {
ops.push(CompactCfiOp::RegisterAt {
dest_reg: reg,
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(offset - i as i32) * pointer_size,
});
}
}
CompactUnwindOp::CfiOps(ops.into_iter())
}
Some(X86UnwindingMode::StackImmediate) => {
// This function doesn't have the standard rbp-based prelude,
// but we know how large its stack frame is (x86_frameless_stack_size),
// and any callee-saved registers that haven't been preserved are
// saved *immediately* after the location at rip.
let mut ops = CompactCfiOpIter::new();
let stack_size = self.x86_frameless_stack_size();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size as i32 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
});
let mut offset = 2;
// offset only advances if there's a register here.
// also note registers are in reverse order.
for reg in self.x86_frameless_registers().iter().rev() {
if let Some(reg) = *reg {
ops.push(CompactCfiOp::RegisterAt {
dest_reg: reg,
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -offset * pointer_size,
});
offset += 1;
}
}
CompactUnwindOp::CfiOps(ops.into_iter())
}
Some(X86UnwindingMode::StackIndirect) => {
// TODO: implement this? Perhaps there is no reasonable implementation
// since this involves parsing a value out of a machine instruction
// in the binary? Or can we just do that work here and it just
// becomes a constant in the CFI output?
//
// Either way it's not urgent, since this mode is only needed for
// stack frames that are bigger than ~2KB.
CompactUnwindOp::None
}
Some(X86UnwindingMode::Dwarf) => {
// Oops! It was in the eh_frame all along.
let offset_in_eh_frame = self.x86_dwarf_fde();
CompactUnwindOp::UseDwarfFde { offset_in_eh_frame }
}
None => CompactUnwindOp::None,
}
}
fn x86_mode(&self) -> Option<X86UnwindingMode> {
const X86_MODE_MASK: u32 = 0x0F00_0000;
const X86_MODE_RBP_FRAME: u32 = 0x0100_0000;
const X86_MODE_STACK_IMMD: u32 = 0x0200_0000;
const X86_MODE_STACK_IND: u32 = 0x0300_0000;
const X86_MODE_DWARF: u32 = 0x0400_0000;
let masked = self.0 & X86_MODE_MASK;
match masked {
X86_MODE_RBP_FRAME => Some(X86UnwindingMode::RbpFrame),
X86_MODE_STACK_IMMD => Some(X86UnwindingMode::StackImmediate),
X86_MODE_STACK_IND => Some(X86UnwindingMode::StackIndirect),
X86_MODE_DWARF => Some(X86UnwindingMode::Dwarf),
_ => None,
}
}
fn x86_rbp_registers(&self) -> [Option<CompactCfiRegister>; 5] {
let mask = 0b111;
[
CompactCfiRegister::from_x86_encoded(self.0 & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 3) & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 6) & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 9) & mask),
CompactCfiRegister::from_x86_encoded((self.0 >> 12) & mask),
]
}
fn x86_rbp_stack_offset(&self) -> u32 {
let offset = 32 - 8 - 8;
(self.0 >> offset) & 0b1111_1111
}
fn x86_frameless_stack_size(&self) -> u32 {
let offset = 32 - 8 - 8;
(self.0 >> offset) & 0b1111_1111
}
fn x86_frameless_register_count(&self) -> u32 {
let offset = 32 - 8 - 8 - 3 - 3;
let register_count = (self.0 >> offset) & 0b111;
if register_count > 6 {
6
} else {
register_count
}
}
fn x86_frameless_registers(&self) -> [Option<CompactCfiRegister>; 6] {
let mut permutation = self.0 & 0b11_1111_1111;
let mut permunreg = [0; 6];
let register_count = self.x86_frameless_register_count();
// I honestly haven't looked into what the heck this is doing, I
// just copied this implementation from llvm since it honestly doesn't
// matter much. Magically unpack 6 values from 10 bits!
match register_count {
6 => {
permunreg[0] = permutation / 120; // 120 == 5!
permutation -= permunreg[0] * 120;
permunreg[1] = permutation / 24; // 24 == 4!
permutation -= permunreg[1] * 24;
permunreg[2] = permutation / 6; // 6 == 3!
permutation -= permunreg[2] * 6;
permunreg[3] = permutation / 2; // 2 == 2!
permutation -= permunreg[3] * 2;
permunreg[4] = permutation; // 1 == 1!
permunreg[5] = 0;
}
5 => {
permunreg[0] = permutation / 120;
permutation -= permunreg[0] * 120;
permunreg[1] = permutation / 24;
permutation -= permunreg[1] * 24;
permunreg[2] = permutation / 6;
permutation -= permunreg[2] * 6;
permunreg[3] = permutation / 2;
permutation -= permunreg[3] * 2;
permunreg[4] = permutation;
}
4 => {
permunreg[0] = permutation / 60;
permutation -= permunreg[0] * 60;
permunreg[1] = permutation / 12;
permutation -= permunreg[1] * 12;
permunreg[2] = permutation / 3;
permutation -= permunreg[2] * 3;
permunreg[3] = permutation;
}
3 => {
permunreg[0] = permutation / 20;
permutation -= permunreg[0] * 20;
permunreg[1] = permutation / 4;
permutation -= permunreg[1] * 4;
permunreg[2] = permutation;
}
2 => {
permunreg[0] = permutation / 5;
permutation -= permunreg[0] * 5;
permunreg[1] = permutation;
}
1 => {
permunreg[0] = permutation;
}
_ => {
// Do nothing
}
}
let mut registers = [0u32; 6];
let mut used = [false; 7];
for i in 0..register_count {
let mut renum = 0;
for j in 1u32..7 {
if !used[j as usize] {
if renum == permunreg[i as usize] {
registers[i as usize] = j;
used[j as usize] = true;
break;
}
renum += 1;
}
}
}
[
CompactCfiRegister::from_x86_encoded(registers[0]),
CompactCfiRegister::from_x86_encoded(registers[1]),
CompactCfiRegister::from_x86_encoded(registers[2]),
CompactCfiRegister::from_x86_encoded(registers[3]),
CompactCfiRegister::from_x86_encoded(registers[4]),
CompactCfiRegister::from_x86_encoded(registers[5]),
]
}
fn x86_dwarf_fde(&self) -> u32 {
self.0 & 0x00FF_FFFF
}
/*
// potentially needed for future work:
fn x86_frameless_stack_adjust(&self) -> u32 {
let offset = 32 - 8 - 8 - 3;
(self.0 >> offset) & 0b111
}
*/
}
// ARM64 implementation
impl Opcode {
fn arm64_mode(&self) -> Option<Arm64UnwindingMode> {
const ARM64_MODE_MASK: u32 = 0x0F000000;
const ARM64_MODE_FRAMELESS: u32 = 0x02000000;
const ARM64_MODE_DWARF: u32 = 0x03000000;
const ARM64_MODE_FRAME: u32 = 0x04000000;
let masked = self.0 & ARM64_MODE_MASK;
match masked {
ARM64_MODE_FRAMELESS => Some(Arm64UnwindingMode::Frameless),
ARM64_MODE_DWARF => Some(Arm64UnwindingMode::Dwarf),
ARM64_MODE_FRAME => Some(Arm64UnwindingMode::Frame),
_ => None,
}
}
fn arm64_instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
let pointer_size = self.pointer_size(iter) as i32;
match self.arm64_mode() {
Some(Arm64UnwindingMode::Frameless) => {
// This is a "frameless" leaf function. All there is to
// do is pop the stack and move the return address from
// the link register to the instruction pointer.
// Stack size is divided by 16.
let stack_size = self.arm64_frameless_stack_size() * 16;
let mut ops = CompactCfiOpIter::new();
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size as i32,
});
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::link_register(),
offset_from_src: 0,
});
CompactUnwindOp::CfiOps(ops.into_iter())
}
Some(Arm64UnwindingMode::Dwarf) => {
let offset_in_eh_frame = self.arm64_dwarf_fde();
CompactUnwindOp::UseDwarfFde { offset_in_eh_frame }
}
Some(Arm64UnwindingMode::Frame) => {
let mut ops = CompactCfiOpIter::new();
// This function has the standard ARM64 prologue, where
// the frame pointer and instruction pointer are immediately
// pushed as a pair onto the stack, and then the frame
// pointer is updated to be the current stack pointer.
ops.push(CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
});
// Then the X19-X28 registers that need to be restored
// are pushed onto the stack in pairs in ascending order.
// This is followed by the D8-D15 registers that need
// to be restored.
// The registers that were pushed are just represented
// by a simple bit set covering bits 0-9 (**low-to-high**):
let num_reg_pairs = 9;
let mut pairs_saved = 0;
for pair_num in 0..num_reg_pairs {
let has_pair = (self.0 & (1 << pair_num)) != 0;
if has_pair {
// Although ARM64 wants to restore these registers in pairs,
// we specify them individually since CFI likes it that way.
let first_reg = ARM64_REG_BASE + pair_num * 2;
let second_reg = ARM64_REG_BASE + pair_num * 2 + 1;
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(first_reg),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: (-2 * pairs_saved - 3) * pointer_size,
});
ops.push(CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(second_reg),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: (-2 * pairs_saved - 4) * pointer_size,
});
pairs_saved += 1;
}
}
CompactUnwindOp::CfiOps(ops.into_iter())
}
None => CompactUnwindOp::None,
}
}
fn arm64_frameless_stack_size(&self) -> u32 {
let offset = 32 - 8 - 12;
(self.0 >> offset) & 0xFFF
}
fn arm64_dwarf_fde(&self) -> u32 {
self.0 & 0x00FF_FFFF
}
}
// The x86 encoding includes the frame pointer as value 6, while
// the ARM64 encoding doesn't encode it (but needs it for output).
// To avoid the register number of the frame pointer being dependent
// on the target architecture, we start ARM64 register numbers
// *after* 6, so that value can still be used. This is potentially
// needlessly cute, but it makes usage a bit cleaner.
const REG_FRAME: u8 = 6;
const ARM64_REG_BASE: u32 = REG_FRAME as u32 + 1;
// These registers aren't ever encoded explicitly, so we make
// up some arbitrary values for reporting them in our outputs.
const REG_LINK: u8 = 252;
const REG_INSTRUCTION: u8 = 253;
const REG_STACK: u8 = 254;
const REG_CFA: u8 = 255;
/// A register for a [`CompactCfiOp`], as used by Compact Unwinding.
///
/// You should just treat this opaquely and use its methods to make sense of it.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct CompactCfiRegister(u8);
impl CompactCfiRegister {
fn from_x86_encoded(val: u32) -> Option<Self> {
if (1..=6).contains(&val) {
Some(CompactCfiRegister(val as u8))
} else {
None
}
}
fn from_arm64_encoded(val: u32) -> Self {
// Assert shouldn't happen as we're processing trusted input here, but
// good to validate this in tests.
debug_assert!((ARM64_REG_BASE..ARM64_REG_BASE + 18).contains(&val));
CompactCfiRegister(val as u8)
}
/// Whether this register is the cfa register.
pub fn is_cfa(&self) -> bool {
self.0 == REG_CFA
}
/// The name of this register that cfi wants.
pub fn name(&self, iter: &CompactUnwindInfoIter) -> Option<&'static str> {
match self.0 {
REG_CFA => Some("cfa"),
other => name_of_other_reg(other, iter),
}
}
/// Gets the CFA register (Canonical Frame Address) -- the frame pointer (e.g. rbp)
pub fn cfa() -> Self {
Self(REG_CFA)
}
/// Gets the register for the frame pointer (e.g. rbp).
pub fn frame_pointer() -> Self {
CompactCfiRegister(REG_FRAME)
}
/// Gets the register for the instruction pointer (e.g. rip).
pub fn instruction_pointer() -> Self {
CompactCfiRegister(REG_INSTRUCTION)
}
/// Gets the register for the stack pointer (e.g. rsp).
pub fn stack_pointer() -> Self {
CompactCfiRegister(REG_STACK)
}
/// Get the ARM64 link register (x30).
pub fn link_register() -> Self {
CompactCfiRegister(REG_LINK)
}
}
fn name_of_other_reg(reg: u8, iter: &CompactUnwindInfoIter) -> Option<&'static str> {
match iter.arch {
Arch::X86 => match reg {
0 => None,
1 => Some("ebx"),
2 => Some("ecx"),
3 => Some("edx"),
4 => Some("edi"),
5 => Some("esi"),
6 => Some("ebp"),
// Not part of the compact format, but needed to describe opcode behaviours
REG_INSTRUCTION => Some("eip"),
REG_STACK => Some("esp"),
_ => None,
},
Arch::X64 => match reg {
0 => None,
1 => Some("rbx"),
2 => Some("r12"),
3 => Some("r13"),
4 => Some("r14"),
5 => Some("r15"),
6 => Some("rbp"),
// Not part of the compact format, but needed to describe opcode behaviours
REG_INSTRUCTION => Some("rip"),
REG_STACK => Some("rsp"),
_ => None,
},
Arch::Arm64 => {
match reg {
7 => Some("x19"),
8 => Some("x20"),
9 => Some("x21"),
10 => Some("x22"),
11 => Some("x23"),
12 => Some("x24"),
13 => Some("x25"),
14 => Some("x26"),
15 => Some("x27"),
16 => Some("x28"),
17 => Some("d8"),
18 => Some("d9"),
19 => Some("d10"),
20 => Some("d11"),
21 => Some("d12"),
22 => Some("d13"),
23 => Some("d14"),
24 => Some("d15"),
// Not part of the compact format, but needed to describe opcode behaviours
REG_FRAME => Some("x29"),
REG_LINK => Some("x30"),
REG_INSTRUCTION => Some("pc"),
REG_STACK => Some("sp"),
_ => None,
}
}
_ => None,
}
}
/// An iterator over the [`CompactCfiOp`]s yielded by [`CompactUnwindOp::CfiOps`].
#[derive(Debug, Clone)]
pub struct CompactCfiOpIter {
// This is just a hacky impl of an ArrayVec to avoid depending on it, and
// avoid allocating. This ends up storing 20 u64's if enum optimizations
// work the way I expect.
items: [Option<CompactCfiOp>; 21],
cur_idx: usize,
}
impl CompactCfiOpIter {
fn new() -> Self {
Self {
items: [
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None,
],
cur_idx: 0,
}
}
fn push(&mut self, item: CompactCfiOp) {
// Will panic if we overflow, but that's fine, the buffer should be
// sized to fit any payload we need, since that's bounded.
self.items[self.cur_idx] = Some(item);
self.cur_idx += 1;
}
/// Resets cur_idx for this to be used as an iterator,
/// because I'm too lazy to make *another* type for this.
fn into_iter(mut self) -> Self {
self.cur_idx = 0;
self
}
}
impl Iterator for CompactCfiOpIter {
type Item = CompactCfiOp;
fn next(&mut self) -> Option<Self::Item> {
if self.cur_idx < self.items.len() {
let old_idx = self.cur_idx;
self.cur_idx += 1;
self.items[old_idx].take()
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::{
CompactCfiOp, CompactCfiRegister, CompactUnwindInfoIter, CompactUnwindOp, Opcode,
ARM64_REG_BASE,
};
use crate::macho::MachError;
use scroll::Pwrite;
use symbolic_common::Arch;
// All Second-level pages have this much memory to work with, let's stick to that
const PAGE_SIZE: usize = 4096;
const REGULAR_PAGE_HEADER_LEN: usize = 8;
const COMPRESSED_PAGE_HEADER_LEN: usize = 12;
const MAX_REGULAR_SECOND_LEVEL_ENTRIES: usize = (PAGE_SIZE - REGULAR_PAGE_HEADER_LEN) / 8;
const MAX_COMPRESSED_SECOND_LEVEL_ENTRIES: usize = (PAGE_SIZE - COMPRESSED_PAGE_HEADER_LEN) / 4;
const MAX_COMPRESSED_SECOND_LEVEL_ENTRIES_WITH_MAX_LOCALS: usize =
(PAGE_SIZE - COMPRESSED_PAGE_HEADER_LEN - MAX_LOCAL_OPCODES_LEN as usize * 4) / 4;
// Mentioned by headers, but seems to have no real significance
const MAX_GLOBAL_OPCODES_LEN: u32 = 127;
const MAX_LOCAL_OPCODES_LEN: u32 = 128;
// Only 2 bits are allocated to this index
const MAX_PERSONALITIES_LEN: u32 = 4;
const X86_MODE_RBP_FRAME: u32 = 0x0100_0000;
const X86_MODE_STACK_IMMD: u32 = 0x0200_0000;
const X86_MODE_STACK_IND: u32 = 0x0300_0000;
const X86_MODE_DWARF: u32 = 0x0400_0000;
const ARM64_MODE_FRAMELESS: u32 = 0x02000000;
const ARM64_MODE_DWARF: u32 = 0x03000000;
const ARM64_MODE_FRAME: u32 = 0x04000000;
const REGULAR_PAGE_KIND: u32 = 2;
const COMPRESSED_PAGE_KIND: u32 = 3;
fn align(offset: u32, align: u32) -> u32 {
// Adding `align - 1` to a value push unaligned values to the next multiple,
// and integer division + multiplication can then remove the remainder.
((offset + align - 1) / align) * align
}
fn pack_x86_rbp_registers(regs: [u8; 5]) -> u32 {
let mut result: u32 = 0;
let base_offset = 0;
for (idx, ®) in regs.iter().enumerate() {
assert!(reg <= 6);
result |= (reg as u32 & 0b111) << (base_offset + idx * 3);
}
result
}
fn pack_x86_stackless_registers(num_regs: u32, registers: [u8; 6]) -> u32 {
for ® in ®isters {
assert!(reg <= 6);
}
// Also copied from llvm implementation
let mut renumregs = [0u32; 6];
for i in 6 - num_regs..6 {
let mut countless = 0;
for j in 6 - num_regs..i {
if registers[j as usize] < registers[i as usize] {
countless += 1;
}
}
renumregs[i as usize] = registers[i as usize] as u32 - countless - 1;
}
let mut permutation_encoding: u32 = 0;
match num_regs {
6 => {
permutation_encoding |= 120 * renumregs[0]
+ 24 * renumregs[1]
+ 6 * renumregs[2]
+ 2 * renumregs[3]
+ renumregs[4];
}
5 => {
permutation_encoding |= 120 * renumregs[1]
+ 24 * renumregs[2]
+ 6 * renumregs[3]
+ 2 * renumregs[4]
+ renumregs[5];
}
4 => {
permutation_encoding |=
60 * renumregs[2] + 12 * renumregs[3] + 3 * renumregs[4] + renumregs[5];
}
3 => {
permutation_encoding |= 20 * renumregs[3] + 4 * renumregs[4] + renumregs[5];
}
2 => {
permutation_encoding |= 5 * renumregs[4] + renumregs[5];
}
1 => {
permutation_encoding |= renumregs[5];
}
0 => {
// do nothing
}
_ => unreachable!(),
}
permutation_encoding
}
fn assert_opcodes_match<A, B>(mut a: A, mut b: B)
where
A: Iterator<Item = CompactCfiOp>,
B: Iterator<Item = CompactCfiOp>,
{
while let (Some(a_op), Some(b_op)) = (a.next(), b.next()) {
assert_eq!(a_op, b_op);
}
assert!(b.next().is_none());
assert!(a.next().is_none());
}
#[test]
// Make sure we error out for an unknown version of this section
fn test_compact_unknown_version() -> Result<(), MachError> {
{
let offset = &mut 0;
let mut section = vec![0u8; 1024];
// Version 0 doesn't exist
section.gwrite(0u32, offset)?;
assert!(CompactUnwindInfoIter::new(§ion, true, Arch::Amd64).is_err());
}
{
let offset = &mut 0;
let mut section = vec![0; 1024];
// Version 2 doesn't exist
section.gwrite(2u32, offset)?;
assert!(CompactUnwindInfoIter::new(§ion, true, Arch::X86).is_err());
}
Ok(())
}
#[test]
// Make sure we handle a section with no entries reasonably
fn test_compact_empty() -> Result<(), MachError> {
let offset = &mut 0;
let mut section = vec![0u8; 1024];
// Just set the version, everything else is 0
section.gwrite(1u32, offset)?;
let mut iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
assert!(iter.next()?.is_none());
assert!(iter.next()?.is_none());
Ok(())
}
#[test]
// Create a reasonable structure that has both kinds of second-level pages
// and poke at some corner cases. opcode values are handled opaquely, just
// checking that they roundtrip correctly.
fn test_compact_structure() -> Result<(), MachError> {
let global_opcodes: Vec<u32> = vec![0, 2, 4, 7];
assert!(global_opcodes.len() <= MAX_GLOBAL_OPCODES_LEN as usize);
let personalities: Vec<u32> = vec![7, 12, 3];
assert!(personalities.len() <= MAX_PERSONALITIES_LEN as usize);
// instruction_address, lsda_address
let lsdas: Vec<(u32, u32)> = vec![(0, 1), (7, 3), (18, 5)];
// first_instruction_address, second_page_offset, lsda_offset
let mut first_entries: Vec<(u32, u32, u32)> = vec![];
/////////////////////////////////////////////////
// Values we will be testing //
/////////////////////////////////////////////////
// page entries are instruction_address, opcode
let mut regular_entries: Vec<Vec<(u32, u32)>> = vec![
// Some data
vec![(1, 7), (3, 8), (6, 10), (10, 4)],
vec![(20, 5), (21, 2), (24, 7), (25, 0)],
// Page len 1
vec![(29, 8)],
];
let mut compressed_entries: Vec<Vec<(u32, u32)>> = vec![
// Some data
vec![(10001, 7), (10003, 8), (10006, 10), (10010, 4)],
vec![(10020, 5), (10021, 2), (10024, 7), (10025, 0)],
// Page len 1
vec![(10029, 8)],
];
// max-len regular page
let mut temp = vec![];
let base_instruction = 100;
for i in 0..MAX_REGULAR_SECOND_LEVEL_ENTRIES {
temp.push((base_instruction + i as u32, i as u32))
}
regular_entries.push(temp);
// max-len compact page (only global entries)
let mut temp = vec![];
let base_instruction = 10100;
for i in 0..MAX_COMPRESSED_SECOND_LEVEL_ENTRIES {
temp.push((base_instruction + i as u32, 2))
}
compressed_entries.push(temp);
// max-len compact page (max local entries)
let mut temp = vec![];
let base_instruction = 14100;
for i in 0..MAX_COMPRESSED_SECOND_LEVEL_ENTRIES_WITH_MAX_LOCALS {
temp.push((
base_instruction + i as u32,
100 + (i as u32 % MAX_LOCAL_OPCODES_LEN),
))
}
compressed_entries.push(temp);
///////////////////////////////////////////////////////
// Compute the format //
///////////////////////////////////////////////////////
// First temporarily write the second level pages into other buffers
let mut second_level_pages: Vec<[u8; PAGE_SIZE]> = vec![];
for page in ®ular_entries {
second_level_pages.push([0; PAGE_SIZE]);
let buf = second_level_pages.last_mut().unwrap();
let buf_offset = &mut 0;
// kind
buf.gwrite(REGULAR_PAGE_KIND, buf_offset)?;
// entry array offset + len
buf.gwrite(REGULAR_PAGE_HEADER_LEN as u16, buf_offset)?;
buf.gwrite(page.len() as u16, buf_offset)?;
for &(insruction_address, opcode) in page {
buf.gwrite(insruction_address, buf_offset)?;
buf.gwrite(opcode, buf_offset)?;
}
}
for page in &compressed_entries {
second_level_pages.push([0; PAGE_SIZE]);
let buf = second_level_pages.last_mut().unwrap();
let buf_offset = &mut 0;
// Compute a palete for local opcodes
// (this is semi-quadratic in that it can do 255 * 1000 iterations, it's fine)
let mut local_opcodes = vec![];
let mut indices = vec![];
for &(_, opcode) in page {
if let Some((idx, _)) = global_opcodes
.iter()
.enumerate()
.find(|&(_, &global_opcode)| global_opcode == opcode)
{
indices.push(idx);
} else if let Some((idx, _)) = local_opcodes
.iter()
.enumerate()
.find(|&(_, &global_opcode)| global_opcode == opcode)
{
indices.push(global_opcodes.len() + idx);
} else {
local_opcodes.push(opcode);
indices.push(global_opcodes.len() + local_opcodes.len() - 1);
}
}
assert!(local_opcodes.len() <= MAX_LOCAL_OPCODES_LEN as usize);
let entries_offset = COMPRESSED_PAGE_HEADER_LEN + local_opcodes.len() * 4;
let first_address = page.first().unwrap().0;
// kind
buf.gwrite(COMPRESSED_PAGE_KIND, buf_offset)?;
// entry array offset + len
buf.gwrite(entries_offset as u16, buf_offset)?;
buf.gwrite(page.len() as u16, buf_offset)?;
// local opcodes array + len
buf.gwrite(COMPRESSED_PAGE_HEADER_LEN as u16, buf_offset)?;
buf.gwrite(local_opcodes.len() as u16, buf_offset)?;
for opcode in local_opcodes {
buf.gwrite(opcode, buf_offset)?;
}
for (&(instruction_address, _opcode), idx) in page.iter().zip(indices) {
let compressed_address = (instruction_address - first_address) & 0x00FF_FFFF;
let compressed_idx = (idx as u32) << 24;
assert_eq!(compressed_address + first_address, instruction_address);
assert_eq!(idx & 0xFFFF_FF00, 0);
let compressed_opcode: u32 = compressed_address | compressed_idx;
buf.gwrite(compressed_opcode, buf_offset)?;
}
}
let header_size: u32 = 4 * 7;
let global_opcodes_offset: u32 = header_size;
let personalities_offset: u32 = global_opcodes_offset + global_opcodes.len() as u32 * 4;
let first_entries_offset: u32 = personalities_offset + personalities.len() as u32 * 4;
let lsdas_offset: u32 = first_entries_offset + (second_level_pages.len() + 1) as u32 * 12;
let second_level_pages_offset: u32 =
align(lsdas_offset + lsdas.len() as u32 * 8, PAGE_SIZE as u32);
let final_size: u32 =
second_level_pages_offset + second_level_pages.len() as u32 * PAGE_SIZE as u32;
// Validate that we have strictly monotonically increasing addresses,
// and build the first-level entries.
let mut cur_address = 0;
for (idx, page) in regular_entries
.iter()
.chain(compressed_entries.iter())
.enumerate()
{
let first_address = page.first().unwrap().0;
let page_offset = second_level_pages_offset + PAGE_SIZE as u32 * idx as u32;
first_entries.push((first_address, page_offset, lsdas_offset));
for &(address, _) in page {
assert!(address > cur_address);
cur_address = address;
}
}
assert_eq!(second_level_pages.len(), first_entries.len());
// Push the null page into our first_entries
first_entries.push((cur_address + 1, 0, 0));
///////////////////////////////////////////////////////
// Emit the binary //
///////////////////////////////////////////////////////
let offset = &mut 0;
let mut section = vec![0u8; final_size as usize];
// Write the header
section.gwrite(1u32, offset)?;
section.gwrite(global_opcodes_offset, offset)?;
section.gwrite(global_opcodes.len() as u32, offset)?;
section.gwrite(personalities_offset, offset)?;
section.gwrite(personalities.len() as u32, offset)?;
section.gwrite(first_entries_offset, offset)?;
section.gwrite(first_entries.len() as u32, offset)?;
// Write the arrays
assert_eq!(*offset as u32, global_opcodes_offset);
for &opcode in &global_opcodes {
section.gwrite(opcode, offset)?;
}
assert_eq!(*offset as u32, personalities_offset);
for &personality in &personalities {
section.gwrite(personality, offset)?;
}
assert_eq!(*offset as u32, first_entries_offset);
for &entry in &first_entries {
section.gwrite(entry.0, offset)?;
section.gwrite(entry.1, offset)?;
section.gwrite(entry.2, offset)?;
}
assert_eq!(*offset as u32, lsdas_offset);
for &lsda in &lsdas {
section.gwrite(lsda.0, offset)?;
section.gwrite(lsda.1, offset)?;
}
// Write the pages
*offset = second_level_pages_offset as usize;
for second_level_page in &second_level_pages {
for byte in second_level_page {
section.gwrite(byte, offset)?;
}
}
///////////////////////////////////////////////////////
// Test that everything roundtrips //
///////////////////////////////////////////////////////
let mut iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
let mut orig_entries = regular_entries
.iter()
.chain(compressed_entries.iter())
.flatten();
while let (Some(entry), Some((orig_address, orig_opcode))) =
(iter.next()?, orig_entries.next())
{
assert_eq!(entry.instruction_address, *orig_address);
assert_eq!(entry.opcode.0, *orig_opcode);
}
// Confirm both were completely exhausted at the same time
assert!(iter.next()?.is_none());
assert_eq!(orig_entries.next(), None);
Ok(())
}
#[test]
fn test_compact_opcodes_x86() -> Result<(), MachError> {
// Make an empty but valid section to initialize the CompactUnwindInfoIter
let pointer_size = 4;
let frameless_reg_count_offset = 32 - 8 - 8 - 3 - 3;
let stack_size_offset = 32 - 8 - 8;
let offset = &mut 0;
let mut section = vec![0u8; 1024];
// Just set the version, everything else is 0
section.gwrite(1u32, offset)?;
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::X86)?;
// Check that the null opcode is handled reasonably
{
let opcode = Opcode(0);
assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
}
// Check that dwarf opcodes work
{
let opcode = Opcode(X86_MODE_DWARF | 0x00123456);
assert!(matches!(
opcode.instructions(&iter),
CompactUnwindOp::UseDwarfFde {
offset_in_eh_frame: 0x00123456
}
));
}
// Check that rbp opcodes work
{
// Simple, no general registers to restore
let stack_size: i32 = 0xa1;
let registers = [0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// One general register to restore
let stack_size: i32 = 0x13;
let registers = [1, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// All general register slots used
let stack_size: i32 = 0xc2;
let registers = [2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 1) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 3) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// Holes in the general registers
let stack_size: i32 = 0xa7;
let registers = [2, 0, 4, 0, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
// Check that stack-immediate opcodes work
{
// Simple, no general registers to restore
let stack_size: i32 = 0xa1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 0;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// One general register to restore
let stack_size: i32 = 0x13;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 1;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 1];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// All general register slots used
let stack_size: i32 = 0xc1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 6;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [1, 2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// Some general registers
let stack_size: i32 = 0xf1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 3;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 2, 4, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
// Check that stack-indirect opcodes work (feature unimplemented)
{
let _opcode = Opcode(X86_MODE_STACK_IND);
// ... tests
}
Ok(())
}
#[test]
fn test_compact_opcodes_x64() -> Result<(), MachError> {
// Make an empty but valid section to initialize the CompactUnwindInfoIter
let pointer_size = 8;
let frameless_reg_count_offset = 32 - 8 - 8 - 3 - 3;
let stack_size_offset = 32 - 8 - 8;
let offset = &mut 0;
let mut section = vec![0u8; 1024];
// Just set the version, everything else is 0
section.gwrite(1u32, offset)?;
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
// Check that the null opcode is handled reasonably
{
let opcode = Opcode(0);
assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
}
// Check that dwarf opcodes work
{
let opcode = Opcode(X86_MODE_DWARF | 0x00123456);
assert!(matches!(
opcode.instructions(&iter),
CompactUnwindOp::UseDwarfFde {
offset_in_eh_frame: 0x00123456
}
));
}
// Check that rbp opcodes work
{
// Simple, no general registers to restore
let stack_size: i32 = 0xa1;
let registers = [0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// One general register to restore
let stack_size: i32 = 0x13;
let registers = [1, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// All general register slots used
let stack_size: i32 = 0xc2;
let registers = [2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 1) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 3) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// Holes in the general registers
let stack_size: i32 = 0xa7;
let registers = [2, 0, 4, 0, 6];
let opcode = Opcode(
X86_MODE_RBP_FRAME
| pack_x86_rbp_registers(registers)
| (stack_size as u32) << stack_size_offset,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 2) * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -(stack_size + 2 - 4) * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
// Check that stack-immediate opcodes work
{
// Simple, no general registers to restore
let stack_size: i32 = 0xa1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 0;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 0];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// One general register to restore
let stack_size: i32 = 0x13;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 1;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 0, 0, 1];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// All general register slots used
let stack_size: i32 = 0xc1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 6;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [1, 2, 3, 4, 5, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// Some general registers
let stack_size: i32 = 0xf1;
let packed_stack_size = (stack_size as u32) << stack_size_offset;
let num_regs = 3;
let packed_num_regs = num_regs << frameless_reg_count_offset;
let registers = [0, 0, 0, 2, 4, 6];
let opcode = Opcode(
X86_MODE_STACK_IMMD
| pack_x86_stackless_registers(num_regs, registers)
| packed_num_regs
| packed_stack_size,
);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
// Check that stack-indirect opcodes work (feature unimplemented)
{
let _opcode = Opcode(X86_MODE_STACK_IND);
// ... tests
}
Ok(())
}
#[test]
fn test_compact_opcodes_arm64() -> Result<(), MachError> {
// Make an empty but valid section to initialize the CompactUnwindInfoIter
let pointer_size = 8;
let frameless_stack_size_offset = 32 - 8 - 12;
let offset = &mut 0;
let mut section = vec![0u8; 1024];
// Just set the version, everything else is 0
section.gwrite(1u32, offset)?;
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Arm64)?;
// Check that the null opcode is handled reasonably
{
let opcode = Opcode(0);
assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
}
// Check that dwarf opcodes work
{
let opcode = Opcode(ARM64_MODE_DWARF | 0x00123456);
assert!(matches!(
opcode.instructions(&iter),
CompactUnwindOp::UseDwarfFde {
offset_in_eh_frame: 0x00123456
}
));
}
// Check that frame opcodes work
{
// Simple, no general registers to restore
let registers = 0b0_0000_0000;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// One general register pair to restore
let registers = 0b0_0100_0000;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// All general purpose registers restored
let registers = 0b1_1111_1111;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 1),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 2),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 3),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 4),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 5),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -8 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 6),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -9 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 7),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -10 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 8),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -11 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 9),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -12 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 10),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -13 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 11),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -14 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -15 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -16 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 14),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -17 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 15),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -18 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 16),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -19 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 17),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -20 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
{
// Holes between the registers
let registers = 0b1_0101_0101;
let opcode = Opcode(ARM64_MODE_FRAME | registers);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::frame_pointer(),
offset_from_src: 2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::frame_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -2 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -3 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 1),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -4 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 4),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -5 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 5),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -6 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 8),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -7 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 9),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -8 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -9 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -10 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 16),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -11 * pointer_size,
},
CompactCfiOp::RegisterAt {
dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 17),
src_reg: CompactCfiRegister::cfa(),
offset_from_src: -12 * pointer_size,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
// Check that the frameless opcode works
{
let stack_size = 0xae1;
let packed_stack_size = stack_size << frameless_stack_size_offset;
let opcode = Opcode(ARM64_MODE_FRAMELESS | packed_stack_size);
let expected = vec![
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::cfa(),
src_reg: CompactCfiRegister::stack_pointer(),
offset_from_src: stack_size as i32 * 16,
},
CompactCfiOp::RegisterIs {
dest_reg: CompactCfiRegister::instruction_pointer(),
src_reg: CompactCfiRegister::link_register(),
offset_from_src: 0,
},
];
match opcode.instructions(&iter) {
CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
_ => unreachable!(),
}
}
Ok(())
}
#[test]
fn test_compact_register_naming() -> Result<(), MachError> {
// Just guarding against special register names breaking
let offset = &mut 0;
let mut section = vec![0u8; 1024];
// Just set the version, everything else is 0
section.gwrite(1u32, offset)?;
{
// ARM64 register names
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Arm64)?;
assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("sp"));
assert_eq!(
CompactCfiRegister::instruction_pointer().name(&iter),
Some("pc")
);
assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("x29"));
assert_eq!(CompactCfiRegister::link_register().name(&iter), Some("x30"));
}
{
// x86 register names
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::X86)?;
assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("esp"));
assert_eq!(
CompactCfiRegister::instruction_pointer().name(&iter),
Some("eip")
);
assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("ebp"));
}
{
// x64 register names
let iter = CompactUnwindInfoIter::new(§ion, true, Arch::Amd64)?;
assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("rsp"));
assert_eq!(
CompactCfiRegister::instruction_pointer().name(&iter),
Some("rip")
);
assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("rbp"));
}
Ok(())
}
}
| 39.805195 | 188 | 0.556544 |
9c91d3215c0b3953e23c028e6ce0c7aa8e3af5b1
| 21,041 |
use super::{
has_comments::HasComments,
helpers::{group, space},
name::{gen_name, gen_qualified_name, gen_qualified_proper_name, gen_unused_name},
r#type::gen_type,
syntax::{gen_braces_list, gen_brackets_list, gen_parens, gen_parens_list, gen_parens_list1},
token::{
gen_close_brace, gen_colon, gen_do_keyword, gen_dot, gen_else_keyword, gen_equals,
gen_false_keyword, gen_if_keyword, gen_left_arrow, gen_match_keyword, gen_open_brace,
gen_pipe, gen_return_keyword, gen_right_arrow, gen_right_pizza_operator, gen_semicolon,
gen_string_token, gen_then_keyword, gen_true_keyword, gen_unit_keyword, gen_with_keyword,
},
};
use ditto_cst::{
BinOp, Effect, Expression, FunctionParameter, MatchArm, Pattern, RecordField, StringToken,
TypeAnnotation,
};
use dprint_core::formatting::{
condition_helpers, conditions, ir_helpers, ConditionResolver, ConditionResolverContext, Info,
PrintItems, Signal,
};
use std::rc::Rc;
pub fn gen_expression(expr: Expression) -> PrintItems {
match expr {
// TODO remove redundant parens?
Expression::Parens(parens) => gen_parens(parens, |box expr| gen_expression(expr)),
Expression::True(keyword) => gen_true_keyword(keyword),
Expression::False(keyword) => gen_false_keyword(keyword),
Expression::Unit(keyword) => gen_unit_keyword(keyword),
Expression::Constructor(constructor) => gen_qualified_proper_name(constructor),
Expression::Variable(variable) => gen_qualified_name(variable),
Expression::Float(token) => gen_string_token(token),
Expression::Int(token) => gen_string_token(token),
Expression::String(token) => gen_string_token(StringToken {
span: token.span,
leading_comments: token.leading_comments,
trailing_comment: token.trailing_comment,
value: format!("\"{}\"", token.value),
}),
Expression::Array(brackets) => gen_brackets_list(brackets, |box expr| {
ir_helpers::new_line_group(gen_expression(expr))
}),
Expression::If {
if_keyword,
box condition,
then_keyword,
box true_clause,
else_keyword,
box false_clause,
} => {
// NOTE that we insert this start info _after_ the `if` keyword
// because we don't want to force multi-line layout for
//
// ```ditto
// -- comment
// if true then yes else no
// ```
let start_info = Info::new("start");
let end_info = Info::new("end");
let force_use_new_lines = if_keyword.0.has_trailing_comment();
let is_multiple_lines: ConditionResolver =
Rc::new(move |ctx: &mut ConditionResolverContext| -> Option<bool> {
if force_use_new_lines {
return Some(true);
}
condition_helpers::is_multiple_lines(ctx, &start_info, &end_info)
});
let mut items: PrintItems = conditions::if_true_or(
"multiLineConditionalIfMultipleLines",
is_multiple_lines,
{
// Multiline
//
// ```ditto
// if true then
// yes
// else if true then
// yes_again
// else
// no
// ```
let mut items = PrintItems::new();
items.extend(gen_if_keyword(if_keyword.clone()));
items.push_info(start_info);
items.extend(space());
items.extend(gen_expression(condition.clone()));
items.extend(space());
items.extend(gen_then_keyword(then_keyword.clone()));
items.push_signal(Signal::NewLine);
items.extend(ir_helpers::with_indent(gen_expression(true_clause.clone())));
items.push_signal(Signal::ExpectNewLine);
items.extend(gen_else_keyword(else_keyword.clone()));
if matches!(false_clause, Expression::If { .. }) {
items.extend(space());
items.extend(gen_expression(false_clause.clone()));
} else {
items.push_signal(Signal::NewLine);
items.extend(ir_helpers::with_indent(gen_expression(
false_clause.clone(),
)));
}
items
},
{
// Inline
//
// ```ditto
// if true then 5 else 5
// ```
let mut items = PrintItems::new();
items.extend(gen_if_keyword(if_keyword));
items.push_info(start_info);
items.push_signal(Signal::SpaceOrNewLine);
items.extend(gen_expression(condition));
items.push_signal(Signal::SpaceOrNewLine);
items.extend(gen_then_keyword(then_keyword));
items.push_signal(Signal::SpaceOrNewLine);
items.extend(gen_expression(true_clause));
items.push_signal(Signal::SpaceOrNewLine);
items.extend(gen_else_keyword(else_keyword));
items.push_signal(Signal::SpaceOrNewLine);
items.extend(gen_expression(false_clause));
items
},
)
.into();
items.push_info(end_info);
items
}
Expression::Effect {
do_keyword,
open_brace,
effect,
close_brace,
} => {
let mut items = PrintItems::new();
items.extend(gen_do_keyword(do_keyword));
items.extend(space());
items.extend(gen_open_brace(open_brace));
let mut effect_items = PrintItems::new();
gen_effect(effect, &mut effect_items);
items.extend(ir_helpers::with_indent(effect_items));
items.push_signal(Signal::ExpectNewLine);
items.extend(gen_close_brace(close_brace));
items
}
Expression::Function {
box parameters,
box return_type_annotation,
right_arrow,
box body,
} => {
let mut items = PrintItems::new();
items.extend(gen_parens_list(parameters, |(param, type_annotation)| {
let mut items = PrintItems::new();
match param {
FunctionParameter::Name(name) => {
items.extend(gen_name(name));
}
FunctionParameter::Unused(unused_name) => {
items.extend(gen_unused_name(unused_name));
}
}
if let Some(type_annotation) = type_annotation {
items.extend(gen_type_annotation(type_annotation));
}
items
}));
if let Some(return_type_annotation) = return_type_annotation {
items.extend(gen_type_annotation(return_type_annotation));
}
items.extend(space());
let right_arrow_has_trailing_comment = right_arrow.0.has_trailing_comment();
items.extend(gen_right_arrow(right_arrow));
items.extend(gen_body_expression(body, right_arrow_has_trailing_comment));
items
}
Expression::Call {
box function,
arguments,
} => {
let mut items = PrintItems::new();
items.extend(gen_expression(function));
items.extend(gen_parens_list(arguments, |box expr| {
ir_helpers::new_line_group(gen_expression(expr))
}));
items
}
Expression::Match {
match_keyword,
box expression,
with_keyword,
box head_arm,
tail_arms,
} => {
let mut items = PrintItems::new();
// REVIEW: do we want to support an inline format for single-arm matches?
//
// e.g. `match x with | foo -> bar`
//
// If so, we should probably make that leading `|` optional in the parser
// like we do for type declarations.
items.extend(gen_match_keyword(match_keyword));
items.extend(space());
items.extend(gen_expression(expression));
items.extend(space());
items.extend(gen_with_keyword(with_keyword));
items.extend(gen_match_arm(head_arm));
for match_arm in tail_arms {
items.extend(gen_match_arm(match_arm));
}
items
}
Expression::BinOp {
box lhs,
operator: BinOp::RightPizza(right_pizza_operator),
box rhs,
} => {
let mut items = PrintItems::new();
items.extend(gen_expression(lhs));
items.push_signal(Signal::ExpectNewLine);
items.extend(gen_right_pizza_operator(right_pizza_operator));
items.extend(space());
items.extend(gen_expression(rhs));
items
}
Expression::RecordAccess {
box target,
dot,
label,
} => {
let mut items = PrintItems::new();
items.extend(gen_expression(target));
items.extend(gen_dot(dot));
items.extend(gen_name(label));
items
}
Expression::Record(braces) => gen_braces_list(
braces,
|RecordField {
label,
equals,
box value,
}| {
let mut items = PrintItems::new();
items.extend(gen_name(label));
items.extend(space());
items.extend(gen_equals(equals));
let force_use_new_lines = value.has_leading_comments();
items.extend(group(gen_expression(value), force_use_new_lines));
items
},
),
}
}
fn gen_effect(effect: Effect, items: &mut PrintItems) {
items.push_signal(Signal::ExpectNewLine);
match effect {
Effect::Return {
return_keyword,
box expression,
} => {
items.extend(gen_return_keyword(return_keyword));
items.extend(space());
items.extend(gen_expression(expression));
}
Effect::Bind {
name,
left_arrow,
box expression,
semicolon,
box rest,
} => {
items.extend(gen_name(name));
items.extend(space());
let force_use_newlines =
left_arrow.0.has_trailing_comment() || expression.has_leading_comments();
items.extend(gen_left_arrow(left_arrow));
items.extend(gen_body_expression(expression, force_use_newlines));
items.extend(gen_semicolon(semicolon));
gen_effect(rest, items)
}
Effect::Expression {
box expression,
rest,
} => {
items.extend(gen_expression(expression));
if let Some((semicolon, box rest)) = rest {
items.extend(gen_semicolon(semicolon));
gen_effect(rest, items)
}
}
}
}
fn gen_match_arm(match_arm: MatchArm) -> PrintItems {
let mut items = PrintItems::new();
items.push_signal(Signal::ExpectNewLine);
items.extend(gen_pipe(match_arm.pipe));
items.extend(space());
items.extend(gen_pattern(match_arm.pattern));
items.extend(space());
let right_arrow_has_trailing_comment = match_arm.right_arrow.0.has_trailing_comment();
items.extend(gen_right_arrow(match_arm.right_arrow));
items.extend(gen_body_expression(
*match_arm.expression,
right_arrow_has_trailing_comment,
));
items
}
fn gen_pattern(pattern: Pattern) -> PrintItems {
match pattern {
Pattern::Variable { name } => gen_name(name),
Pattern::Unused { unused_name } => gen_unused_name(unused_name),
Pattern::NullaryConstructor { constructor } => gen_qualified_proper_name(constructor),
Pattern::Constructor {
constructor,
arguments,
} => {
let mut items = gen_qualified_proper_name(constructor);
items.extend(gen_parens_list1(
arguments,
|box pattern| gen_pattern(pattern),
false,
));
items
}
}
}
/// Generated a "body" expression, i.e. an expression on the right-hand-side
/// of an `=` or `->`.
pub fn gen_body_expression(expr: Expression, force_use_new_lines: bool) -> PrintItems {
let mut items = PrintItems::new();
let start_info = Info::new("start");
let end_info = Info::new("end");
let has_leading_comments = expr.has_leading_comments();
let deserves_new_line_if_multi_lines = matches!(
expr,
Expression::If { .. }
| Expression::Match { .. }
| Expression::BinOp {
operator: BinOp::RightPizza(_),
..
}
);
let expression_should_be_on_new_line: ConditionResolver =
Rc::new(move |ctx: &mut ConditionResolverContext| -> Option<bool> {
if force_use_new_lines || has_leading_comments {
return Some(true);
}
if deserves_new_line_if_multi_lines {
return condition_helpers::is_multiple_lines(ctx, &start_info, &end_info);
}
// return Some(false);
None // NOTE I'm not sure what the implications are of None vs Some(false) ?
});
items.push_condition(conditions::if_true_or(
"bodyExpressionOnNewLine",
expression_should_be_on_new_line,
{
let mut items = PrintItems::new();
items.push_info(start_info);
items.extend(group(gen_expression(expr.clone()), true));
items.push_info(end_info);
items
},
{
let mut items = PrintItems::new();
items.push_info(start_info);
items.extend(group(gen_expression(expr), false));
items.push_info(end_info);
items
},
));
items
}
pub fn gen_type_annotation(type_annotation: TypeAnnotation) -> PrintItems {
let mut items = PrintItems::new();
items.extend(gen_colon(type_annotation.0));
items.extend(space());
items.extend(gen_type(type_annotation.1));
items
}
#[cfg(test)]
mod tests {
use crate::test_macros::assert_expression_fmt as assert_fmt;
#[test]
fn it_formats_empty_arrays() {
assert_fmt!("[]");
assert_fmt!("[ ]", "[]");
assert_fmt!("-- comment\n[]");
assert_fmt!("[\n\t-- comment\n]");
assert_fmt!("[-- comment\n ]", "[ -- comment\n]");
assert_fmt!("[\n-- comment\n ]", "[\n\t-- comment\n]");
}
#[test]
fn it_formats_single_line_arrays() {
assert_fmt!("[ true ]", "[true]");
assert_fmt!("[ true , true ]", "[true, true]");
assert_fmt!("[ true, true, true, ]", "[true, true, true]");
assert_fmt!("[true,true,]", "[true, true]");
assert_fmt!("-- comment\n[ true , true ]", "-- comment\n[true, true]");
}
#[test]
fn it_formats_multi_line_arrays() {
assert_fmt!("[true,true]", "[\n\ttrue,\n\ttrue,\n]", 6);
assert_fmt!("[true,true]", "[\n\ttrue,\n\ttrue,\n]", 11);
assert_fmt!("[true,true]", "[true, true]", 12);
assert_fmt!("[ -- comment\n\ttrue,\n]");
assert_fmt!("[\n\t-- comment\n\ttrue,\n]");
assert_fmt!(
"[true, -- comment\ntrue]",
"[\n\ttrue, -- comment\n\ttrue,\n]"
);
assert_fmt!(
"[true,true, -- comment\n]",
"[\n\ttrue,\n\ttrue, -- comment\n]"
);
assert_fmt!(
"[ true, true, true, -- comment\n ]",
"[\n\ttrue,\n\ttrue,\n\ttrue, -- comment\n]"
);
}
#[test]
fn it_formats_nested_arrays() {
assert_fmt!("[[]]");
assert_fmt!(
"[[true, true]]",
"[\n\t[\n\t\ttrue,\n\t\ttrue,\n\t],\n]",
13
);
assert_fmt!(
"[[looooong], [\n--comment\n[[looooooong]]]]",
"[\n\t[looooong],\n\t[\n\t\t--comment\n\t\t[[looooooong]],\n\t],\n]",
5
);
}
#[test]
fn it_formats_literals() {
assert_fmt!("\"test\"");
assert_fmt!("12345");
assert_fmt!("12345.00");
}
#[test]
fn it_formats_calls() {
assert_fmt!("foo()");
assert_fmt!("(foo)()");
assert_fmt!("foo()()()");
assert_fmt!("foo(\n\t-- comment\n\ta,\n)");
assert_fmt!(
"foo(aaaaa, bbbbbbb, ccccccc)",
"foo(\n\taaaaa,\n\tbbbbbbb,\n\tccccccc,\n)",
5
);
assert_fmt!(
"foo(bar(a), baz(bbbbbbb, ccccc))",
"foo(\n\tbar(a),\n\tbaz(\n\t\tbbbbbbb,\n\t\tccccc,\n\t),\n)",
8
);
assert_fmt!(
"foo([aaaaa, bbbbbbb, ccccccc], ddddddd)",
"foo(\n\t[\n\t\taaaaa,\n\t\tbbbbbbb,\n\t\tccccccc,\n\t],\n\tddddddd,\n)",
8
);
}
#[test]
fn it_formats_functions() {
assert_fmt!("() -> foo");
assert_fmt!(
"(really_long_argument) -> foo",
"(really_long_argument) ->\n\tfoo",
20
);
assert_fmt!("() ->\n\t-- comment\n\tfoo");
assert_fmt!(
"(foo, -- comment\n) -> foo",
"(\n\tfoo, -- comment\n) -> foo"
);
assert_fmt!("(): Int \n-> foo", "(): Int -> foo");
assert_fmt!("(): Int -- comment\n -> foo");
assert_fmt!("(a: Int): Int -> foo");
assert_fmt!("(a: Int, b: Bool): Float -> unit");
assert_fmt!(
"(\n -- comment\na: Int): Int -> foo",
"(\n\t-- comment\n\ta: Int,\n): Int -> foo"
);
assert_fmt!("() -> [\n\t-- comment\n]");
assert_fmt!("() ->\n\t-- comment\n\t[5]");
assert_fmt!("() -> if true then yeh else nah");
assert_fmt!(
"() -> if loooooooooong then x else y",
"() ->\n\tif loooooooooong then\n\t\tx\n\telse\n\t\ty",
20
);
}
#[test]
fn it_formats_conditionals() {
assert_fmt!("if true then 5 else 5");
assert_fmt!("-- comment\nif true then 5 else 5");
assert_fmt!("if -- comment\n true then\n\t5\nelse\n\t5");
assert_fmt!("if true then\n\t--comment\n\t5\nelse\n\t5");
assert_fmt!("if -- comment\n true then\n\t5\nelse\n\t5");
assert_fmt!(
"if true then loooooooooooooooooong else 5",
"if true then\n\tloooooooooooooooooong\nelse\n\t5",
20
);
}
#[test]
fn it_formats_matches() {
assert_fmt!("match foo with\n| var -> 5");
assert_fmt!("-- comment\nmatch foo with\n| var -> 5");
assert_fmt!("match foo with\n-- comment\n| var -> 5");
assert_fmt!("match foo with\n| a -> 5\n| b -> 5\n| c -> 5");
assert_fmt!("match foo with\n| Foo.Bar -> -- comment\n\t5");
assert_fmt!("match Foo with\n| Foo(a, b, c) -> a");
assert_fmt!("match Foo with\n| Foo(\n\t--comment\n\ta,\n\tb,\n\tc,\n) -> a");
}
#[test]
fn it_formats_effects() {
assert_fmt!("do {\n\treturn 5\n}");
assert_fmt!("do {\n\tsome_effect()\n}");
assert_fmt!("do {\n\tx <- some_effect();\n\treturn x\n}");
assert_fmt!("do {\n\tsome_effect();\n\treturn 5\n}");
}
#[test]
fn it_formats_pipes() {
assert_fmt!("x\n|> y");
assert_fmt!("-- comment\nx\n|> y");
assert_fmt!("x\n|> y\n|> z");
assert_fmt!("(x |> y) |> z", "(\n\tx\n\t|> y\n)\n|> z");
}
#[test]
fn it_formats_record_literals() {
assert_fmt!("{}");
assert_fmt!("{\n\t-- comment\n}");
assert_fmt!("{ -- comment\n}");
assert_fmt!("{ foo = true }");
assert_fmt!("{ foo = true, bar = false, baz = () -> true }");
assert_fmt!("{\n\t-- comment\n\tfoo = Foo,\n\tbar = Bar,\n\tbaz = {},\n}");
assert_fmt!("{\n\t-- comment\n\tfoo =\n\t\t-- comment\n\t\tFoo,\n}");
}
#[test]
fn it_formats_record_access() {
assert_fmt!("foo.bar");
assert_fmt!("foo.bar.baz");
}
}
| 36.090909 | 97 | 0.522741 |
08848aa529b0090995be475e0b89c806e7f2b762
| 1,884 |
use error::Error;
/// Trait to create an instance of some type from an HTTP form. The
/// [Form](struct.Form.html) type requires that its generic parameter implements
/// this trait.
///
/// This trait can be automatically derived via the
/// [rocket_codegen](/rocket_codegen) plugin:
///
/// ```rust
/// #![feature(plugin, custom_derive)]
/// #![plugin(rocket_codegen)]
///
/// extern crate rocket;
///
/// #[derive(FromForm)]
/// struct TodoTask {
/// description: String,
/// completed: bool
/// }
/// ```
///
/// The type can then be parsed from incoming form data via the `data`
/// parameter and `Form` type.
///
/// ```rust
/// # #![feature(plugin, custom_derive)]
/// # #![plugin(rocket_codegen)]
/// # extern crate rocket;
/// # use rocket::request::Form;
/// # #[derive(FromForm)]
/// # struct TodoTask { description: String, completed: bool }
/// #[post("/submit", data = "<task>")]
/// fn submit_task(task: Form<TodoTask>) -> String {
/// format!("New task: {}", task.get().description)
/// }
/// # fn main() { }
/// ```
///
/// When deriving `FromForm`, every field in the structure must implement
/// [FromFormValue](trait.FromFormValue.html). If you implement `FormForm`
/// yourself, use the [FormItems](struct.FormItems.html) iterator to iterate
/// through the form key/value pairs.
pub trait FromForm<'f>: Sized {
/// The associated error to be returned when parsing fails.
type Error;
/// Parses an instance of `Self` from a raw HTTP form string
/// (`application/x-www-form-urlencoded data`) or returns an `Error` if one
/// cannot be parsed.
fn from_form_string(form_string: &'f str) -> Result<Self, Self::Error>;
}
/// This implementation should only be used during debugging!
impl<'f> FromForm<'f> for &'f str {
type Error = Error;
fn from_form_string(s: &'f str) -> Result<Self, Error> {
Ok(s)
}
}
| 30.387097 | 80 | 0.640658 |
8a0d50ef1f001a62f7b43d24e3608da421578967
| 22,896 |
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
use super::analysis;
use super::documents;
use super::documents::Document;
use super::documents::Documents;
use super::language_server;
use super::tsc;
use crate::diagnostics;
use crate::fs_util::specifier_to_file_path;
use deno_core::anyhow::anyhow;
use deno_core::error::AnyError;
use deno_core::resolve_url;
use deno_core::serde_json::json;
use deno_core::ModuleSpecifier;
use deno_runtime::tokio_util::create_basic_runtime;
use log::error;
use lspower::lsp;
use std::collections::HashMap;
use std::collections::HashSet;
use std::mem;
use std::sync::Arc;
use std::thread;
use tokio::sync::mpsc;
use tokio::sync::Mutex;
use tokio::time::sleep;
use tokio::time::Duration;
use tokio::time::Instant;
pub type DiagnosticRecord =
(ModuleSpecifier, Option<i32>, Vec<lsp::Diagnostic>);
pub type DiagnosticVec = Vec<DiagnosticRecord>;
type TsDiagnosticsMap = HashMap<String, Vec<diagnostics::Diagnostic>>;
#[derive(Debug, Hash, Clone, PartialEq, Eq)]
pub(crate) enum DiagnosticSource {
Deno,
DenoLint,
TypeScript,
}
#[derive(Debug, Default)]
struct DiagnosticCollection {
map: HashMap<(ModuleSpecifier, DiagnosticSource), Vec<lsp::Diagnostic>>,
versions: HashMap<ModuleSpecifier, HashMap<DiagnosticSource, i32>>,
changes: HashSet<ModuleSpecifier>,
}
impl DiagnosticCollection {
pub fn get(
&self,
specifier: &ModuleSpecifier,
source: DiagnosticSource,
) -> impl Iterator<Item = &lsp::Diagnostic> {
self
.map
.get(&(specifier.clone(), source))
.into_iter()
.flatten()
}
pub fn get_version(
&self,
specifier: &ModuleSpecifier,
source: &DiagnosticSource,
) -> Option<i32> {
let source_version = self.versions.get(specifier)?;
source_version.get(source).cloned()
}
pub fn set(&mut self, source: DiagnosticSource, record: DiagnosticRecord) {
let (specifier, maybe_version, diagnostics) = record;
self
.map
.insert((specifier.clone(), source.clone()), diagnostics);
if let Some(version) = maybe_version {
let source_version = self.versions.entry(specifier.clone()).or_default();
source_version.insert(source, version);
}
self.changes.insert(specifier);
}
pub fn take_changes(&mut self) -> Option<HashSet<ModuleSpecifier>> {
if self.changes.is_empty() {
None
} else {
Some(mem::take(&mut self.changes))
}
}
}
#[derive(Debug, Default)]
pub(crate) struct DiagnosticsServer {
channel: Option<mpsc::UnboundedSender<()>>,
collection: Arc<Mutex<DiagnosticCollection>>,
}
impl DiagnosticsServer {
pub(crate) async fn get(
&self,
specifier: &ModuleSpecifier,
source: DiagnosticSource,
) -> Vec<lsp::Diagnostic> {
self
.collection
.lock()
.await
.get(specifier, source)
.cloned()
.collect()
}
pub(crate) async fn invalidate(&self, specifiers: Vec<ModuleSpecifier>) {
let mut collection = self.collection.lock().await;
for specifier in &specifiers {
collection.versions.remove(specifier);
}
}
pub(crate) async fn invalidate_all(&self) {
let mut collection = self.collection.lock().await;
collection.versions.clear();
}
pub(crate) fn start(
&mut self,
language_server: Arc<Mutex<language_server::Inner>>,
client: lspower::Client,
ts_server: Arc<tsc::TsServer>,
) {
let (tx, mut rx) = mpsc::unbounded_channel::<()>();
self.channel = Some(tx);
let collection = self.collection.clone();
let _join_handle = thread::spawn(move || {
let runtime = create_basic_runtime();
runtime.block_on(async {
// Debounce timer delay. 150ms between keystrokes is about 45 WPM, so we
// want something that is longer than that, but not too long to
// introduce detectable UI delay; 200ms is a decent compromise.
const DELAY: Duration = Duration::from_millis(200);
// If the debounce timer isn't active, it will be set to expire "never",
// which is actually just 1 year in the future.
const NEVER: Duration = Duration::from_secs(365 * 24 * 60 * 60);
// A flag that is set whenever something has changed that requires the
// diagnostics collection to be updated.
let mut dirty = false;
let debounce_timer = sleep(NEVER);
tokio::pin!(debounce_timer);
loop {
// "race" the next message off the rx queue or the debounce timer.
// The debounce timer gets reset every time a message comes off the
// queue. When the debounce timer expires, a snapshot of the most
// up-to-date state is used to produce diagnostics.
tokio::select! {
maybe_request = rx.recv() => {
match maybe_request {
// channel has closed
None => break,
Some(_) => {
dirty = true;
debounce_timer.as_mut().reset(Instant::now() + DELAY);
}
}
}
_ = debounce_timer.as_mut(), if dirty => {
dirty = false;
debounce_timer.as_mut().reset(Instant::now() + NEVER);
let snapshot = language_server.lock().await.snapshot().unwrap();
update_diagnostics(
&client,
collection.clone(),
snapshot,
&ts_server
).await;
}
}
}
})
});
}
pub(crate) fn update(&self) -> Result<(), AnyError> {
if let Some(tx) = &self.channel {
tx.send(()).map_err(|err| err.into())
} else {
Err(anyhow!("diagnostics server not started"))
}
}
}
impl<'a> From<&'a diagnostics::DiagnosticCategory> for lsp::DiagnosticSeverity {
fn from(category: &'a diagnostics::DiagnosticCategory) -> Self {
match category {
diagnostics::DiagnosticCategory::Error => lsp::DiagnosticSeverity::Error,
diagnostics::DiagnosticCategory::Warning => {
lsp::DiagnosticSeverity::Warning
}
diagnostics::DiagnosticCategory::Suggestion => {
lsp::DiagnosticSeverity::Hint
}
diagnostics::DiagnosticCategory::Message => {
lsp::DiagnosticSeverity::Information
}
}
}
}
impl<'a> From<&'a diagnostics::Position> for lsp::Position {
fn from(pos: &'a diagnostics::Position) -> Self {
Self {
line: pos.line as u32,
character: pos.character as u32,
}
}
}
fn get_diagnostic_message(diagnostic: &diagnostics::Diagnostic) -> String {
if let Some(message) = diagnostic.message_text.clone() {
message
} else if let Some(message_chain) = diagnostic.message_chain.clone() {
message_chain.format_message(0)
} else {
"[missing message]".to_string()
}
}
fn to_lsp_range(
start: &diagnostics::Position,
end: &diagnostics::Position,
) -> lsp::Range {
lsp::Range {
start: start.into(),
end: end.into(),
}
}
fn to_lsp_related_information(
related_information: &Option<Vec<diagnostics::Diagnostic>>,
) -> Option<Vec<lsp::DiagnosticRelatedInformation>> {
related_information.as_ref().map(|related| {
related
.iter()
.filter_map(|ri| {
if let (Some(source), Some(start), Some(end)) =
(&ri.source, &ri.start, &ri.end)
{
let uri = lsp::Url::parse(source).unwrap();
Some(lsp::DiagnosticRelatedInformation {
location: lsp::Location {
uri,
range: to_lsp_range(start, end),
},
message: get_diagnostic_message(ri),
})
} else {
None
}
})
.collect()
})
}
fn ts_json_to_diagnostics(
diagnostics: Vec<diagnostics::Diagnostic>,
) -> Vec<lsp::Diagnostic> {
diagnostics
.iter()
.filter_map(|d| {
if let (Some(start), Some(end)) = (&d.start, &d.end) {
Some(lsp::Diagnostic {
range: to_lsp_range(start, end),
severity: Some((&d.category).into()),
code: Some(lsp::NumberOrString::Number(d.code as i32)),
code_description: None,
source: Some("deno-ts".to_string()),
message: get_diagnostic_message(d),
related_information: to_lsp_related_information(
&d.related_information,
),
tags: match d.code {
// These are codes that indicate the variable is unused.
2695 | 6133 | 6138 | 6192 | 6196 | 6198 | 6199 | 6205 | 7027
| 7028 => Some(vec![lsp::DiagnosticTag::Unnecessary]),
// These are codes that indicated the variable is deprecated.
2789 | 6385 | 6387 => Some(vec![lsp::DiagnosticTag::Deprecated]),
_ => None,
},
data: None,
})
} else {
None
}
})
.collect()
}
// Filters documents according to the `include` and the `exclude` lists (from `StateSnapshot::maybe_lint_config`).
// If a document is in the `exclude` list - then it be removed.
// If the `include` list is not empty, and a document is not in - then it be removed too.
fn filter_lint_documents(
snapshot: &language_server::StateSnapshot,
documents: &mut Vec<Document>,
) {
let lint_config = match &snapshot.maybe_lint_config {
Some(config) => config,
None => return,
};
documents.retain(|doc| {
let path = if let Ok(file_path) = specifier_to_file_path(doc.specifier()) {
file_path
} else {
return false;
};
// Skip files which is in the exclude list.
if lint_config
.files
.exclude
.iter()
.any(|i| path.starts_with(i))
{
return false;
}
// Early return if the include list is empty.
if lint_config.files.include.is_empty() {
return true;
}
// Ignore files not in the include list.
lint_config
.files
.include
.iter()
.any(|i| path.starts_with(i))
});
}
async fn generate_lint_diagnostics(
snapshot: &language_server::StateSnapshot,
collection: Arc<Mutex<DiagnosticCollection>>,
) -> Result<DiagnosticVec, AnyError> {
let mut documents = snapshot.documents.documents(true, true);
let workspace_settings = snapshot.config.settings.workspace.clone();
let maybe_lint_config = snapshot.maybe_lint_config.clone();
filter_lint_documents(snapshot, &mut documents);
tokio::task::spawn(async move {
let mut diagnostics_vec = Vec::new();
if workspace_settings.lint {
for document in documents {
let version = document.maybe_lsp_version();
let current_version = collection
.lock()
.await
.get_version(document.specifier(), &DiagnosticSource::DenoLint);
if version != current_version {
let diagnostics = match document.maybe_parsed_source() {
Some(Ok(parsed_source)) => {
if let Ok(references) = analysis::get_lint_references(
&parsed_source,
maybe_lint_config.as_ref(),
) {
references
.into_iter()
.map(|r| r.to_diagnostic())
.collect::<Vec<_>>()
} else {
Vec::new()
}
}
Some(Err(_)) => Vec::new(),
None => {
error!("Missing file contents for: {}", document.specifier());
Vec::new()
}
};
diagnostics_vec.push((
document.specifier().clone(),
version,
diagnostics,
));
}
}
}
Ok(diagnostics_vec)
})
.await
.unwrap()
}
async fn generate_ts_diagnostics(
snapshot: Arc<language_server::StateSnapshot>,
collection: Arc<Mutex<DiagnosticCollection>>,
ts_server: &tsc::TsServer,
) -> Result<DiagnosticVec, AnyError> {
let mut diagnostics_vec = Vec::new();
let specifiers: Vec<ModuleSpecifier> = {
let collection = collection.lock().await;
snapshot
.documents
.documents(true, true)
.iter()
.filter_map(|d| {
let version = d.maybe_lsp_version();
let current_version =
collection.get_version(d.specifier(), &DiagnosticSource::TypeScript);
if version != current_version {
Some(d.specifier().clone())
} else {
None
}
})
.collect()
};
if !specifiers.is_empty() {
let req = tsc::RequestMethod::GetDiagnostics(specifiers);
let ts_diagnostics_map: TsDiagnosticsMap =
ts_server.request(snapshot.clone(), req).await?;
for (specifier_str, ts_diagnostics) in ts_diagnostics_map {
let specifier = resolve_url(&specifier_str)?;
let version = snapshot
.documents
.get(&specifier)
.map(|d| d.maybe_lsp_version())
.flatten();
diagnostics_vec.push((
specifier,
version,
ts_json_to_diagnostics(ts_diagnostics),
));
}
}
Ok(diagnostics_vec)
}
fn resolution_error_as_code(
err: &deno_graph::ResolutionError,
) -> lsp::NumberOrString {
use deno_graph::ResolutionError;
use deno_graph::SpecifierError;
match err {
ResolutionError::InvalidDowngrade(_, _) => {
lsp::NumberOrString::String("invalid-downgrade".to_string())
}
ResolutionError::InvalidLocalImport(_, _) => {
lsp::NumberOrString::String("invalid-local-import".to_string())
}
ResolutionError::InvalidSpecifier(err, _) => match err {
SpecifierError::ImportPrefixMissing(_, _) => {
lsp::NumberOrString::String("import-prefix-missing".to_string())
}
SpecifierError::InvalidUrl(_) => {
lsp::NumberOrString::String("invalid-url".to_string())
}
},
ResolutionError::ResolverError(_, _, _) => {
lsp::NumberOrString::String("resolver-error".to_string())
}
}
}
fn diagnose_dependency(
diagnostics: &mut Vec<lsp::Diagnostic>,
documents: &Documents,
resolved: &deno_graph::Resolved,
) {
match resolved {
Some(Ok((specifier, range))) => {
if let Some(doc) = documents.get(specifier) {
if let Some(message) = doc.maybe_warning() {
diagnostics.push(lsp::Diagnostic {
range: documents::to_lsp_range(range),
severity: Some(lsp::DiagnosticSeverity::Warning),
code: Some(lsp::NumberOrString::String("deno-warn".to_string())),
source: Some("deno".to_string()),
message,
..Default::default()
})
}
} else {
let (code, message) = match specifier.scheme() {
"file" => (Some(lsp::NumberOrString::String("no-local".to_string())), format!("Unable to load a local module: \"{}\".\n Please check the file path.", specifier)),
"data" => (Some(lsp::NumberOrString::String("no-cache-data".to_string())), "Uncached data URL.".to_string()),
"blob" => (Some(lsp::NumberOrString::String("no-cache-blob".to_string())), "Uncached blob URL.".to_string()),
_ => (Some(lsp::NumberOrString::String("no-cache".to_string())), format!("Uncached or missing remote URL: \"{}\".", specifier)),
};
diagnostics.push(lsp::Diagnostic {
range: documents::to_lsp_range(range),
severity: Some(lsp::DiagnosticSeverity::Error),
code,
source: Some("deno".to_string()),
message,
data: Some(json!({ "specifier": specifier })),
..Default::default()
});
}
}
Some(Err(err)) => diagnostics.push(lsp::Diagnostic {
range: documents::to_lsp_range(err.range()),
severity: Some(lsp::DiagnosticSeverity::Error),
code: Some(resolution_error_as_code(err)),
source: Some("deno".to_string()),
message: err.to_string(),
..Default::default()
}),
_ => (),
}
}
/// Generate diagnostics for dependencies of a module, attempting to resolve
/// dependencies on the local file system or in the DENO_DIR cache.
async fn generate_deps_diagnostics(
snapshot: Arc<language_server::StateSnapshot>,
collection: Arc<Mutex<DiagnosticCollection>>,
) -> Result<DiagnosticVec, AnyError> {
tokio::task::spawn(async move {
let mut diagnostics_vec = Vec::new();
for document in snapshot.documents.documents(true, true) {
if !snapshot.config.specifier_enabled(document.specifier()) {
continue;
}
let version = document.maybe_lsp_version();
let current_version = collection
.lock()
.await
.get_version(document.specifier(), &DiagnosticSource::Deno);
if version != current_version {
let mut diagnostics = Vec::new();
for (_, dependency) in document.dependencies() {
diagnose_dependency(
&mut diagnostics,
&snapshot.documents,
&dependency.maybe_code,
);
diagnose_dependency(
&mut diagnostics,
&snapshot.documents,
&dependency.maybe_type,
);
}
diagnostics_vec.push((
document.specifier().clone(),
version,
diagnostics,
));
}
}
Ok(diagnostics_vec)
})
.await
.unwrap()
}
/// Publishes diagnostics to the client.
async fn publish_diagnostics(
client: &lspower::Client,
collection: &mut DiagnosticCollection,
snapshot: &language_server::StateSnapshot,
) {
if let Some(changes) = collection.take_changes() {
for specifier in changes {
let mut diagnostics: Vec<lsp::Diagnostic> =
if snapshot.config.settings.workspace.lint {
collection
.get(&specifier, DiagnosticSource::DenoLint)
.cloned()
.collect()
} else {
Vec::new()
};
if snapshot.config.specifier_enabled(&specifier) {
diagnostics.extend(
collection
.get(&specifier, DiagnosticSource::TypeScript)
.cloned(),
);
diagnostics
.extend(collection.get(&specifier, DiagnosticSource::Deno).cloned());
}
let version = snapshot
.documents
.get(&specifier)
.map(|d| d.maybe_lsp_version())
.flatten();
client
.publish_diagnostics(specifier.clone(), diagnostics, version)
.await;
}
}
}
/// Updates diagnostics for any specifiers that don't have the correct version
/// generated and publishes the diagnostics to the client.
async fn update_diagnostics(
client: &lspower::Client,
collection: Arc<Mutex<DiagnosticCollection>>,
snapshot: Arc<language_server::StateSnapshot>,
ts_server: &tsc::TsServer,
) {
let mark = snapshot.performance.mark("update_diagnostics", None::<()>);
let lint = async {
let mark = snapshot
.performance
.mark("update_diagnostics_lint", None::<()>);
let collection = collection.clone();
let diagnostics = generate_lint_diagnostics(&snapshot, collection.clone())
.await
.map_err(|err| {
error!("Error generating lint diagnostics: {}", err);
})
.unwrap_or_default();
let mut collection = collection.lock().await;
for diagnostic_record in diagnostics {
collection.set(DiagnosticSource::DenoLint, diagnostic_record);
}
publish_diagnostics(client, &mut collection, &snapshot).await;
snapshot.performance.measure(mark);
};
let ts = async {
let mark = snapshot
.performance
.mark("update_diagnostics_ts", None::<()>);
let collection = collection.clone();
let diagnostics =
generate_ts_diagnostics(snapshot.clone(), collection.clone(), ts_server)
.await
.map_err(|err| {
error!("Error generating TypeScript diagnostics: {}", err);
})
.unwrap_or_default();
let mut collection = collection.lock().await;
for diagnostic_record in diagnostics {
collection.set(DiagnosticSource::TypeScript, diagnostic_record);
}
publish_diagnostics(client, &mut collection, &snapshot).await;
snapshot.performance.measure(mark);
};
let deps = async {
let mark = snapshot
.performance
.mark("update_diagnostics_deps", None::<()>);
let collection = collection.clone();
let diagnostics =
generate_deps_diagnostics(snapshot.clone(), collection.clone())
.await
.map_err(|err| {
error!("Error generating Deno diagnostics: {}", err);
})
.unwrap_or_default();
let mut collection = collection.lock().await;
for diagnostic_record in diagnostics {
collection.set(DiagnosticSource::Deno, diagnostic_record);
}
publish_diagnostics(client, &mut collection, &snapshot).await;
snapshot.performance.measure(mark);
};
tokio::join!(lint, ts, deps);
snapshot.performance.measure(mark);
}
#[cfg(test)]
mod tests {
use super::*;
use crate::lsp::config::ConfigSnapshot;
use crate::lsp::config::Settings;
use crate::lsp::config::WorkspaceSettings;
use crate::lsp::documents::LanguageId;
use crate::lsp::language_server::StateSnapshot;
use std::path::Path;
use std::path::PathBuf;
use tempfile::TempDir;
fn mock_state_snapshot(
fixtures: &[(&str, &str, i32, LanguageId)],
location: &Path,
) -> StateSnapshot {
let mut documents = Documents::new(location);
for (specifier, source, version, language_id) in fixtures {
let specifier =
resolve_url(specifier).expect("failed to create specifier");
documents.open(
specifier.clone(),
*version,
language_id.clone(),
Arc::new(source.to_string()),
);
}
let config = ConfigSnapshot {
settings: Settings {
workspace: WorkspaceSettings {
enable: true,
lint: true,
..Default::default()
},
..Default::default()
},
..Default::default()
};
StateSnapshot {
config,
documents,
..Default::default()
}
}
fn setup(
sources: &[(&str, &str, i32, LanguageId)],
) -> (StateSnapshot, Arc<Mutex<DiagnosticCollection>>, PathBuf) {
let temp_dir = TempDir::new().expect("could not create temp dir");
let location = temp_dir.path().join("deps");
let state_snapshot = mock_state_snapshot(sources, &location);
let collection = Arc::new(Mutex::new(DiagnosticCollection::default()));
(state_snapshot, collection, location)
}
#[tokio::test]
async fn test_generate_lint_diagnostics() {
let (snapshot, collection, _) = setup(&[(
"file:///a.ts",
r#"import * as b from "./b.ts";
let a = "a";
console.log(a);
"#,
1,
LanguageId::TypeScript,
)]);
let result = generate_lint_diagnostics(&snapshot, collection).await;
assert!(result.is_ok());
let diagnostics = result.unwrap();
assert_eq!(diagnostics.len(), 1);
let (_, _, diagnostics) = &diagnostics[0];
assert_eq!(diagnostics.len(), 2);
}
}
| 30.325828 | 173 | 0.615086 |
f8889380b2abf9341ddb420215c4840b605bd766
| 11,731 |
/*!
* Methods for the various MIR types. These are intended for use after
* building is complete.
*/
use crate::mir::*;
use crate::ty::subst::Subst;
use crate::ty::{self, Ty, TyCtxt};
use crate::ty::layout::VariantIdx;
use crate::hir;
use crate::ty::util::IntTypeExt;
#[derive(Copy, Clone, Debug)]
pub struct PlaceTy<'tcx> {
pub ty: Ty<'tcx>,
/// Downcast to a particular variant of an enum, if included.
pub variant_index: Option<VariantIdx>,
}
// At least on 64 bit systems, `PlaceTy` should not be larger than two or three pointers.
#[cfg(target_arch = "x86_64")]
static_assert_size!(PlaceTy<'_>, 16);
impl<'tcx> PlaceTy<'tcx> {
pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> {
PlaceTy { ty, variant_index: None }
}
/// `place_ty.field_ty(tcx, f)` computes the type at a given field
/// of a record or enum-variant. (Most clients of `PlaceTy` can
/// instead just extract the relevant type directly from their
/// `PlaceElem`, but some instances of `ProjectionElem<V, T>` do
/// not carry a `Ty` for `T`.)
///
/// Note that the resulting type has not been normalized.
pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: &Field) -> Ty<'tcx> {
let answer = match self.ty.sty {
ty::Adt(adt_def, substs) => {
let variant_def = match self.variant_index {
None => adt_def.non_enum_variant(),
Some(variant_index) => {
assert!(adt_def.is_enum());
&adt_def.variants[variant_index]
}
};
let field_def = &variant_def.fields[f.index()];
field_def.ty(tcx, substs)
}
ty::Tuple(ref tys) => tys[f.index()].expect_ty(),
_ => bug!("extracting field of non-tuple non-adt: {:?}", self),
};
debug!("field_ty self: {:?} f: {:?} yields: {:?}", self, f, answer);
answer
}
/// Convenience wrapper around `projection_ty_core` for
/// `PlaceElem`, where we can just use the `Ty` that is already
/// stored inline on field projection elems.
pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: &PlaceElem<'tcx>) -> PlaceTy<'tcx> {
self.projection_ty_core(tcx, ty::ParamEnv::empty(), elem, |_, _, ty| ty)
}
/// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
/// projects `place_ty` onto `elem`, returning the appropriate
/// `Ty` or downcast variant corresponding to that projection.
/// The `handle_field` callback must map a `Field` to its `Ty`,
/// (which should be trivial when `T` = `Ty`).
pub fn projection_ty_core<V, T>(
self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
elem: &ProjectionElem<V, T>,
mut handle_field: impl FnMut(&Self, &Field, &T) -> Ty<'tcx>,
) -> PlaceTy<'tcx>
where
V: ::std::fmt::Debug,
T: ::std::fmt::Debug,
{
let answer = match *elem {
ProjectionElem::Deref => {
let ty = self.ty
.builtin_deref(true)
.unwrap_or_else(|| {
bug!("deref projection of non-dereferencable ty {:?}", self)
})
.ty;
PlaceTy::from_ty(ty)
}
ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } =>
PlaceTy::from_ty(self.ty.builtin_index().unwrap()),
ProjectionElem::Subslice { from, to } => {
PlaceTy::from_ty(match self.ty.sty {
ty::Array(inner, size) => {
let size = size.eval_usize(tcx, param_env);
let len = size - (from as u64) - (to as u64);
tcx.mk_array(inner, len)
}
ty::Slice(..) => self.ty,
_ => {
bug!("cannot subslice non-array type: `{:?}`", self)
}
})
}
ProjectionElem::Downcast(_name, index) =>
PlaceTy { ty: self.ty, variant_index: Some(index) },
ProjectionElem::Field(ref f, ref fty) =>
PlaceTy::from_ty(handle_field(&self, f, fty)),
};
debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
answer
}
}
BraceStructTypeFoldableImpl! {
impl<'tcx> TypeFoldable<'tcx> for PlaceTy<'tcx> {
ty,
variant_index,
}
}
impl<'tcx> Place<'tcx> {
pub fn ty_from<D>(
base: &PlaceBase<'tcx>,
projection: &Option<Box<Projection<'tcx>>>,
local_decls: &D,
tcx: TyCtxt<'tcx>
) -> PlaceTy<'tcx>
where D: HasLocalDecls<'tcx>
{
Place::iterate_over(base, projection, |place_base, place_projections| {
let mut place_ty = place_base.ty(local_decls);
for proj in place_projections {
place_ty = place_ty.projection_ty(tcx, &proj.elem);
}
place_ty
})
}
pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
where
D: HasLocalDecls<'tcx>,
{
Place::ty_from(&self.base, &self.projection, local_decls, tcx)
}
}
impl<'tcx> PlaceBase<'tcx> {
pub fn ty<D>(&self, local_decls: &D) -> PlaceTy<'tcx>
where D: HasLocalDecls<'tcx>
{
match self {
PlaceBase::Local(index) => PlaceTy::from_ty(local_decls.local_decls()[*index].ty),
PlaceBase::Static(data) => PlaceTy::from_ty(data.ty),
}
}
}
pub enum RvalueInitializationState {
Shallow,
Deep
}
impl<'tcx> Rvalue<'tcx> {
pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
where
D: HasLocalDecls<'tcx>,
{
match *self {
Rvalue::Use(ref operand) => operand.ty(local_decls, tcx),
Rvalue::Repeat(ref operand, count) => {
tcx.mk_array(operand.ty(local_decls, tcx), count)
}
Rvalue::Ref(reg, bk, ref place) => {
let place_ty = place.ty(local_decls, tcx).ty;
tcx.mk_ref(reg,
ty::TypeAndMut {
ty: place_ty,
mutbl: bk.to_mutbl_lossy()
}
)
}
Rvalue::Len(..) => tcx.types.usize,
Rvalue::Cast(.., ty) => ty,
Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
op.ty(tcx, lhs_ty, rhs_ty)
}
Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
let lhs_ty = lhs.ty(local_decls, tcx);
let rhs_ty = rhs.ty(local_decls, tcx);
let ty = op.ty(tcx, lhs_ty, rhs_ty);
tcx.intern_tup(&[ty, tcx.types.bool])
}
Rvalue::UnaryOp(UnOp::Not, ref operand) |
Rvalue::UnaryOp(UnOp::Neg, ref operand) => {
operand.ty(local_decls, tcx)
}
Rvalue::Discriminant(ref place) => {
let ty = place.ty(local_decls, tcx).ty;
match ty.sty {
ty::Adt(adt_def, _) => adt_def.repr.discr_type().to_ty(tcx),
ty::Generator(_, substs, _) => substs.discr_ty(tcx),
_ => {
// This can only be `0`, for now, so `u8` will suffice.
tcx.types.u8
}
}
}
Rvalue::NullaryOp(NullOp::Box, t) => tcx.mk_box(t),
Rvalue::NullaryOp(NullOp::SizeOf, _) => tcx.types.usize,
Rvalue::Aggregate(ref ak, ref ops) => {
match **ak {
AggregateKind::Array(ty) => {
tcx.mk_array(ty, ops.len() as u64)
}
AggregateKind::Tuple => {
tcx.mk_tup(ops.iter().map(|op| op.ty(local_decls, tcx)))
}
AggregateKind::Adt(def, _, substs, _, _) => {
tcx.type_of(def.did).subst(tcx, substs)
}
AggregateKind::Closure(did, substs) => {
tcx.mk_closure(did, substs)
}
AggregateKind::Generator(did, substs, movability) => {
tcx.mk_generator(did, substs, movability)
}
}
}
}
}
#[inline]
/// Returns `true` if this rvalue is deeply initialized (most rvalues) or
/// whether its only shallowly initialized (`Rvalue::Box`).
pub fn initialization_state(&self) -> RvalueInitializationState {
match *self {
Rvalue::NullaryOp(NullOp::Box, _) => RvalueInitializationState::Shallow,
_ => RvalueInitializationState::Deep
}
}
}
impl<'tcx> Operand<'tcx> {
pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
where
D: HasLocalDecls<'tcx>,
{
match self {
&Operand::Copy(ref l) |
&Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
&Operand::Constant(ref c) => c.ty,
}
}
}
impl<'tcx> BinOp {
pub fn ty(&self, tcx: TyCtxt<'tcx>, lhs_ty: Ty<'tcx>, rhs_ty: Ty<'tcx>) -> Ty<'tcx> {
// FIXME: handle SIMD correctly
match self {
&BinOp::Add | &BinOp::Sub | &BinOp::Mul | &BinOp::Div | &BinOp::Rem |
&BinOp::BitXor | &BinOp::BitAnd | &BinOp::BitOr => {
// these should be integers or floats of the same size.
assert_eq!(lhs_ty, rhs_ty);
lhs_ty
}
&BinOp::Shl | &BinOp::Shr | &BinOp::Offset => {
lhs_ty // lhs_ty can be != rhs_ty
}
&BinOp::Eq | &BinOp::Lt | &BinOp::Le |
&BinOp::Ne | &BinOp::Ge | &BinOp::Gt => {
tcx.types.bool
}
}
}
}
impl BorrowKind {
pub fn to_mutbl_lossy(self) -> hir::Mutability {
match self {
BorrowKind::Mut { .. } => hir::MutMutable,
BorrowKind::Shared => hir::MutImmutable,
// We have no type corresponding to a unique imm borrow, so
// use `&mut`. It gives all the capabilities of an `&uniq`
// and hence is a safe "over approximation".
BorrowKind::Unique => hir::MutMutable,
// We have no type corresponding to a shallow borrow, so use
// `&` as an approximation.
BorrowKind::Shallow => hir::MutImmutable,
}
}
}
impl BinOp {
pub fn to_hir_binop(self) -> hir::BinOpKind {
match self {
BinOp::Add => hir::BinOpKind::Add,
BinOp::Sub => hir::BinOpKind::Sub,
BinOp::Mul => hir::BinOpKind::Mul,
BinOp::Div => hir::BinOpKind::Div,
BinOp::Rem => hir::BinOpKind::Rem,
BinOp::BitXor => hir::BinOpKind::BitXor,
BinOp::BitAnd => hir::BinOpKind::BitAnd,
BinOp::BitOr => hir::BinOpKind::BitOr,
BinOp::Shl => hir::BinOpKind::Shl,
BinOp::Shr => hir::BinOpKind::Shr,
BinOp::Eq => hir::BinOpKind::Eq,
BinOp::Ne => hir::BinOpKind::Ne,
BinOp::Lt => hir::BinOpKind::Lt,
BinOp::Gt => hir::BinOpKind::Gt,
BinOp::Le => hir::BinOpKind::Le,
BinOp::Ge => hir::BinOpKind::Ge,
BinOp::Offset => unreachable!()
}
}
}
| 36.431677 | 94 | 0.498849 |
deb08a4608cc0358c533531e7b2d1fff79289caa
| 775 |
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
assert_eq!((vec!(0i, 1)).to_string(), "[0, 1]".to_string());
assert_eq!((&[1i, 2]).to_string(), "[1, 2]".to_string());
let foo = vec!(3i, 4);
let bar = &[4i, 5];
assert_eq!(foo.to_string(), "[3, 4]".to_string());
assert_eq!(bar.to_string(), "[4, 5]".to_string());
}
| 36.904762 | 68 | 0.656774 |
673191699c4f0934e81329db135f58543e50992d
| 616 |
#![no_std]
/// proxy file for when no_std support is enabled. Simply defines aliases for std modules so we don't need to
/// deal with different names elsewhere
pub mod boxed {
pub use alloc::boxed::Box;
}
pub mod convert {
pub use core::convert::From;
pub use core::convert::TryInto;
}
pub mod collections {
pub use hashbrown::HashMap;
}
pub mod default {
pub use core::default::Default;
}
pub mod iter {
pub use core::iter::Peekable;
}
pub mod fmt {
pub use alloc::fmt::Debug;
}
pub mod vec {
pub use alloc::vec::Vec;
}
pub mod string {
pub use alloc::string::String;
}
| 16.648649 | 109 | 0.660714 |
3378d7988454b725e72e00ee3acc0a1b7a494b35
| 10,374 |
use std::{
os::unix::{io::AsRawFd, net::UnixStream},
path::{Path, PathBuf},
process::{Child, Command},
};
use ic_canister_sandbox_backend_lib::RUN_AS_CANISTER_SANDBOX_FLAG;
use ic_types::CanisterId;
use once_cell::sync::OnceCell;
use std::os::unix::process::CommandExt;
use std::sync::Arc;
use ic_canister_sandbox_common::{
protocol, protocol::ctlsvc, rpc, sandbox_client_stub::SandboxClientStub,
sandbox_service::SandboxService, transport,
};
const SANDBOX_EXECUTABLE_NAME: &str = "canister_sandbox";
// These binaries support running in the canister sandbox mode.
const RUNNABLE_AS_SANDBOX: &[&str] = &["drun", "ic-replay"];
pub struct SocketedProcess {
pub child_handle: Child,
pub control_stream: UnixStream,
}
/// Spawns a subprocess and passes an (anonymous) unix domain socket to
/// it for control. The socket will arrive as file descriptor 3 in the
/// target process, and the other end of the socket will be returned
/// in this call to control the process.
pub fn spawn_socketed_process(
exec_path: &str,
argv: &[String],
) -> std::io::Result<SocketedProcess> {
let (sock_controller, sock_sandbox) = std::os::unix::net::UnixStream::pair()?;
let mut cmd = Command::new(exec_path);
cmd.args(argv);
// In case of Command we inherit the current process's environment. This should
// particularly include things such as Rust backtrace flags. It might be
// advisable to filter/configure that (in case there might be information in
// env that the sandbox process should not be privy to).
// The following block duplicates sock_sandbox fd under fd 3, errors are
// handled.
unsafe {
cmd.pre_exec(move || {
let fd = libc::dup2(sock_sandbox.as_raw_fd(), 3);
if fd != 3 {
return Err(std::io::Error::last_os_error());
}
Ok(())
})
};
let child_handle = cmd.spawn()?;
Ok(SocketedProcess {
child_handle,
control_stream: sock_controller,
})
}
/// Spawn a canister sandbox process and yield RPC interface object to
/// communicate with it. When the socket is closed by the other side,
/// we check if the safe_shutdown flag was set. If not this function
/// will initiate an exit (or a panic during testing).
///
/// # Panics & exit
///
/// This function panics upon socket close if safe_shutdown flag is
/// unset. The caller of the function is expected to set/unset the flag.
pub fn spawn_canister_sandbox_process(
exec_path: &str,
argv: &[String],
controller_service: Arc<dyn rpc::DemuxServer<ctlsvc::Request, ctlsvc::Reply> + Send + Sync>,
) -> std::io::Result<(Arc<dyn SandboxService>, Child, std::thread::JoinHandle<()>)> {
let SocketedProcess {
child_handle,
control_stream: socket,
} = spawn_socketed_process(exec_path, argv)?;
let socket = Arc::new(socket);
// Set up outgoing channel.
let out = transport::UnixStreamMuxWriter::<protocol::transport::ControllerToSandbox>::new(
Arc::clone(&socket),
);
// Construct RPC client to sandbox process.
let reply_handler = Arc::new(rpc::ReplyManager::<protocol::sbxsvc::Reply>::new());
let svc = Arc::new(SandboxClientStub::new(rpc::Channel::new(
out.make_sink::<protocol::sbxsvc::Request>(),
reply_handler.clone(),
)));
// Set up thread to handle incoming channel -- replies are routed
// to reply buffer, requests to the RPC request handler given.
let thread_handle = std::thread::spawn(move || {
let demux = transport::Demux::<_, _, protocol::transport::SandboxToController>::new(
Arc::new(rpc::ServerStub::new(
controller_service,
out.make_sink::<protocol::ctlsvc::Reply>(),
)),
reply_handler,
);
transport::socket_read_messages::<_, _>(
move |message| {
demux.handle(message);
},
socket,
);
});
Ok((svc, child_handle, thread_handle))
}
/// Spawns a sandbox process for the given canister.
pub fn create_sandbox_process(
controller_service: Arc<dyn rpc::DemuxServer<ctlsvc::Request, ctlsvc::Reply> + Send + Sync>,
canister_id: &CanisterId,
mut argv: Vec<String>,
) -> std::io::Result<(Arc<dyn SandboxService>, Child)> {
assert!(!argv.is_empty());
argv.push(canister_id.to_string());
let (sandbox_handle, child_handle, _recv_thread_handle) = spawn_canister_sandbox_process(
&argv[0],
&argv[1..],
Arc::clone(&controller_service) as Arc<_>,
)
.expect("Failed to start sandbox process");
Ok((sandbox_handle, child_handle))
}
/// Get the path of the current running binary.
fn current_binary_path() -> Option<PathBuf> {
std::env::args().next().map(PathBuf::from)
}
/// Gets the executable and arguments for spawning a canister sandbox.
pub(super) fn create_sandbox_argv() -> Option<Vec<String>> {
let current_binary_path = current_binary_path()?;
let current_binary_name = current_binary_path.file_name()?.to_str()?;
// The order of checks performed in this function is important.
// Please do not reorder.
//
// 1. If the current binary supports running the sandbox mode, then use it.
// This is important for `ic-replay` and `drun` where we do not control
// the location of the sandbox binary.
if RUNNABLE_AS_SANDBOX.contains(¤t_binary_name) {
let exec_path = current_binary_path.to_str()?.to_string();
return Some(vec![exec_path, RUN_AS_CANISTER_SANDBOX_FLAG.to_string()]);
}
// 2. If the sandbox binary is in the same folder as the current binary, then
// use it.
let current_binary_folder = current_binary_path.parent()?;
let sandbox_executable_path = current_binary_folder.join(SANDBOX_EXECUTABLE_NAME);
if Path::exists(&sandbox_executable_path) {
let exec_path = sandbox_executable_path.to_str()?.to_string();
return Some(vec![exec_path]);
}
// 3. The two checks above cover all production use cases.
// Find the sandbox binary for testing and local development.
create_sandbox_argv_for_testing()
}
/// Only for testing purposes.
/// Gets executable and arguments when running in CI or in a dev environment.
fn create_sandbox_argv_for_testing() -> Option<Vec<String>> {
// In CI we expect the sandbox executable to be in our path so this should
// succeed.
if let Ok(exec_path) = which::which(SANDBOX_EXECUTABLE_NAME) {
println!("Running sandbox with executable {:?}", exec_path);
return Some(vec![exec_path.to_str().unwrap().to_string()]);
}
static SANDBOX_COMPILED: OnceCell<()> = OnceCell::new();
// When running in a dev environment we expect `cargo` to be in our path and
// we should be able to find the `canister_sandbox` cargo manifest so this
// should succeed.
match (
which::which("cargo"),
canister_sandbox_cargo_manifest_for_testing(),
) {
(Ok(path), Some(manifest_path)) => {
println!(
"Building sandbox with cargo {:?} and manifest {:?}",
path, manifest_path
);
let path = path.to_str().unwrap().to_string();
SANDBOX_COMPILED
.get_or_init(|| build_sandbox_with_cargo_for_testing(&path, &manifest_path));
// Run `canister_sandbox` using `cargo run` so that we don't need to find the
// executable in the target folder.
Some(make_cargo_argv_for_testing(
&path,
&manifest_path,
CargoCommandType::Run,
))
}
_ => None,
}
}
/// Only for testing purposes.
/// Finds the cargo manifest of the `canister_sandbox` crate in the directory
/// path of the current manifest.
fn canister_sandbox_cargo_manifest_for_testing() -> Option<PathBuf> {
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").ok();
let mut next_parent = manifest_dir.as_ref().map(Path::new);
let mut current_manifest = None;
while let Some(parent) = next_parent {
let next: PathBuf = [parent, Path::new("Cargo.toml")].iter().collect();
if next.exists() {
current_manifest = Some(next);
}
next_parent = parent.parent();
}
// At this point `current_manifest` points to the top-level workspace
// manifest. Try to get the manifest of the `canister_sandbox` crate
// relative to it.
//
// Using the top-level cargo manifest would also be correct, but that would
// ignore the `dev-dependencies` resulting in a different metadata hash,
// which causes rebuilding of all dependencies that have already been
// built by `cargo test`.
let canister_sandbox: PathBuf = [
current_manifest.as_ref()?.parent()?,
Path::new("canister_sandbox"),
Path::new("Cargo.toml"),
]
.iter()
.collect();
if canister_sandbox.exists() {
Some(canister_sandbox)
} else {
None
}
}
/// Only for testing purposes.
fn build_sandbox_with_cargo_for_testing(cargo_path: &str, manifest_path: &Path) {
let argv = make_cargo_argv_for_testing(cargo_path, manifest_path, CargoCommandType::Build);
let output = Command::new(&argv[0])
.args(&argv[1..])
.output()
.expect("Failed to build canister_sandbox with cargo");
if !output.status.success() {
panic!(
"Failed to build canister_sandbox with cargo\nError: {:?}\nstderr: {:?}",
output.status, output.stderr
)
}
}
enum CargoCommandType {
Build,
Run,
}
/// Only for testing purposes.
fn make_cargo_argv_for_testing(
cargo_path: &str,
manifest_path: &Path,
cargo_command_type: CargoCommandType,
) -> Vec<String> {
let common_args = vec![
"--quiet",
"--manifest-path",
manifest_path.to_str().unwrap(),
"--bin",
SANDBOX_EXECUTABLE_NAME,
];
let argv = match cargo_command_type {
CargoCommandType::Run => vec![vec![cargo_path, "run"], common_args, vec!["--"]],
CargoCommandType::Build => vec![vec![cargo_path, "build"], common_args],
};
argv.into_iter()
.map(|s| s.into_iter().map(|s| s.to_string()))
.flatten()
.collect()
}
| 35.527397 | 96 | 0.648834 |
9117d829bf0ae423db256607b1d30db2b46e16a5
| 44,778 |
// Used to simulate a fairly large number of options/flags and parsing with thousands of positional
// args
//
// CLI used is adapted from ripgrep 48a8a3a691220f9e5b2b08f4051abe8655ea7e8a
use clap::{App, AppSettings, Arg, ArgSettings};
use criterion::{criterion_group, criterion_main, Criterion};
use std::collections::HashMap;
use std::io::Cursor;
use lazy_static::lazy_static;
pub fn build_rg_with_short_help(c: &mut Criterion) {
c.bench_function("build_rg_with_short_help", |b| b.iter(app_short));
}
pub fn build_rg_with_long_help(c: &mut Criterion) {
c.bench_function("build_rg_with_long_help", |b| b.iter(app_long));
}
pub fn write_rg_short_help(c: &mut Criterion) {
let mut app = app_short();
c.bench_function("write_rg_short_help", |b| b.iter(|| build_help(&mut app)));
}
pub fn write_rg_long_help(c: &mut Criterion) {
let mut app = app_long();
c.bench_function("write_rg_long_help", |b| b.iter(|| build_help(&mut app)));
}
pub fn parse_rg(c: &mut Criterion) {
c.bench_function("parse_rg", |b| {
b.iter(|| app_short().get_matches_from(vec!["rg", "pat"]))
});
}
pub fn parse_rg_with_complex(c: &mut Criterion) {
c.bench_function("parse_rg_with_complex", |b| {
b.iter(|| {
app_short().get_matches_from(vec![
"rg",
"pat",
"-cFlN",
"-pqr=some",
"--null",
"--no-filename",
"--no-messages",
"-SH",
"-C5",
"--follow",
"-e some",
])
})
});
}
pub fn parse_rg_with_lots(c: &mut Criterion) {
c.bench_function("parse_rg_with_lots", |b| {
b.iter(|| {
app_short().get_matches_from(vec![
"rg", "pat", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some", "some", "some", "some", "some",
"some", "some", "some", "some", "some", "some",
])
})
});
}
const ABOUT: &str = "
ripgrep (rg) recursively searches your current directory for a regex pattern.
ripgrep's regex engine uses finite automata and guarantees linear time
searching. Because of this, features like backreferences and arbitrary
lookaround are not supported.
Project home page: https://github.com/BurntSushi/ripgrep
Use -h for short descriptions and --help for more details.";
const USAGE: &str = "
rg [OPTIONS] <pattern> [<path> ...]
rg [OPTIONS] [-e PATTERN | -f FILE ]... [<path> ...]
rg [OPTIONS] --files [<path> ...]
rg [OPTIONS] --type-list";
const TEMPLATE: &str = "\
{bin} {version}
{author}
{about}
USAGE:{usage}
ARGS:
{positionals}
OPTIONS:
{unified}";
/// Build a clap application with short help strings.
fn app_short() -> App<'static> {
app(false, |k| USAGES[k].short)
}
/// Build a clap application with long help strings.
fn app_long() -> App<'static> {
app(true, |k| USAGES[k].long)
}
/// Build the help text of an application.
fn build_help(app: &mut App) -> String {
let mut buf = Cursor::new(Vec::with_capacity(50));
app.write_help(&mut buf).unwrap();
let content = buf.into_inner();
String::from_utf8(content).unwrap()
}
/// Build a clap application parameterized by usage strings.
///
/// The function given should take a clap argument name and return a help
/// string. `app` will panic if a usage string is not defined.
///
/// This is an intentionally stand-alone module so that it can be used easily
/// in a `build.rs` script to build shell completion files.
fn app<F>(_next_line_help: bool, doc: F) -> App<'static>
where
F: Fn(&'static str) -> &'static str,
{
let arg = |name| Arg::new(name).about(doc(name));
let flag = |name| arg(name).long(name);
App::new("ripgrep")
.author("BurntSushi") // simulating since it's only a bench
.version("0.4.0") // Simulating
.about(ABOUT)
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.override_usage(USAGE)
.help_template(TEMPLATE)
// Handle help/version manually to make their output formatting
// consistent with short/long views.
.arg(arg("help-short").short('h'))
.arg(flag("help"))
.arg(flag("version").short('V'))
// First, set up primary positional/flag arguments.
.arg(arg("pattern").required_unless_eq_any(&[
"file",
"files",
"help-short",
"help",
"regexp",
"type-list",
"version",
]))
.arg(
arg("path")
.setting(ArgSettings::MultipleValues)
.setting(ArgSettings::MultipleOccurrences),
)
.arg(
flag("regexp")
.short('e')
.settings(&[
ArgSettings::AllowHyphenValues,
ArgSettings::MultipleOccurrences,
ArgSettings::TakesValue,
])
.value_name("pattern"),
)
.arg(
flag("files")
// This should also conflict with `pattern`, but the first file
// path will actually be in `pattern`.
.conflicts_with_all(&["file", "regexp", "type-list"]),
)
.arg(flag("type-list").conflicts_with_all(&["file", "files", "pattern", "regexp"]))
// Second, set up common flags.
.arg(flag("text").short('a'))
.arg(flag("count").short('c'))
.arg(
flag("color")
.value_name("WHEN")
.setting(ArgSettings::HidePossibleValues)
.possible_values(&["never", "auto", "always", "ansi"]),
)
.arg(
flag("colors")
.value_name("SPEC")
.settings(&[ArgSettings::MultipleOccurrences, ArgSettings::TakesValue]),
)
.arg(flag("fixed-strings").short('F'))
.arg(
flag("glob")
.short('g')
.settings(&[ArgSettings::MultipleOccurrences, ArgSettings::TakesValue])
.value_name("GLOB"),
)
.arg(flag("ignore-case").short('i'))
.arg(flag("line-number").short('n'))
.arg(flag("no-line-number").short('N'))
.arg(flag("quiet").short('q'))
.arg(
flag("type")
.short('t')
.settings(&[ArgSettings::MultipleOccurrences, ArgSettings::TakesValue])
.value_name("TYPE"),
)
.arg(
flag("type-not")
.short('T')
.settings(&[ArgSettings::MultipleOccurrences, ArgSettings::TakesValue])
.value_name("TYPE"),
)
.arg(
flag("unrestricted")
.short('u')
.setting(ArgSettings::MultipleOccurrences),
)
.arg(flag("invert-match").short('v'))
.arg(flag("word-regexp").short('w'))
// Third, set up less common flags.
.arg(
flag("after-context")
.short('A')
.value_name("NUM")
.validator(validate_number),
)
.arg(
flag("before-context")
.short('B')
.value_name("NUM")
.validator(validate_number),
)
.arg(
flag("context")
.short('C')
.value_name("NUM")
.validator(validate_number),
)
.arg(flag("column"))
.arg(flag("context-separator").value_name("SEPARATOR"))
.arg(flag("debug"))
.arg(
flag("file")
.short('f')
.value_name("FILE")
.setting(ArgSettings::MultipleOccurrences),
)
.arg(flag("files-with-matches").short('l'))
.arg(flag("files-without-match"))
.arg(flag("with-filename").short('H'))
.arg(flag("no-filename"))
.arg(flag("heading").overrides_with("no-heading"))
.arg(flag("no-heading").overrides_with("heading"))
.arg(flag("hidden"))
.arg(
flag("ignore-file")
.value_name("FILE")
.setting(ArgSettings::MultipleOccurrences),
)
.arg(flag("follow").short('L'))
.arg(
flag("max-count")
.short('m')
.value_name("NUM")
.validator(validate_number),
)
.arg(
flag("maxdepth")
.value_name("NUM")
.validator(validate_number),
)
.arg(flag("mmap"))
.arg(flag("no-messages"))
.arg(flag("no-mmap"))
.arg(flag("no-ignore"))
.arg(flag("no-ignore-parent"))
.arg(flag("no-ignore-vcs"))
.arg(flag("null"))
.arg(flag("path-separator").value_name("SEPARATOR"))
.arg(flag("pretty").short('p'))
.arg(flag("replace").short('r').value_name("ARG"))
.arg(flag("case-sensitive").short('s'))
.arg(flag("smart-case").short('S'))
.arg(flag("sort-files"))
.arg(
flag("threads")
.short('j')
.value_name("ARG")
.validator(validate_number),
)
.arg(flag("vimgrep"))
.arg(
flag("type-add")
.value_name("TYPE")
.setting(ArgSettings::MultipleOccurrences),
)
.arg(
flag("type-clear")
.value_name("TYPE")
.setting(ArgSettings::MultipleOccurrences),
)
}
struct Usage {
short: &'static str,
long: &'static str,
}
macro_rules! doc {
($map:expr, $name:expr, $short:expr) => {
doc!($map, $name, $short, $short)
};
($map:expr, $name:expr, $short:expr, $long:expr) => {
$map.insert(
$name,
Usage {
short: $short,
long: concat!($long, "\n "),
},
);
};
}
lazy_static! {
static ref USAGES: HashMap<&'static str, Usage> = {
let mut h = HashMap::new();
doc!(
h,
"help-short",
"Show short help output.",
"Show short help output. Use --help to show more details."
);
doc!(
h,
"help",
"Show verbose help output.",
"When given, more details about flags are provided."
);
doc!(h, "version", "Prints version information.");
doc!(
h,
"pattern",
"A regular expression used for searching.",
"A regular expression used for searching. Multiple patterns \
may be given. To match a pattern beginning with a -, use [-]."
);
doc!(
h,
"regexp",
"A regular expression used for searching.",
"A regular expression used for searching. Multiple patterns \
may be given. To match a pattern beginning with a -, use [-]."
);
doc!(
h,
"path",
"A file or directory to search.",
"A file or directory to search. Directories are searched \
recursively."
);
doc!(
h,
"files",
"Print each file that would be searched.",
"Print each file that would be searched without actually \
performing the search. This is useful to determine whether a \
particular file is being searched or not."
);
doc!(
h,
"type-list",
"Show all supported file types.",
"Show all supported file types and their corresponding globs."
);
doc!(h, "text", "Search binary files as if they were text.");
doc!(h, "count", "Only show count of matches for each file.");
doc!(
h,
"color",
"When to use color. [default: auto]",
"When to use color in the output. The possible values are \
never, auto, always or ansi. The default is auto. When always \
is used, coloring is attempted based on your environment. When \
ansi used, coloring is forcefully done using ANSI escape color \
codes."
);
doc!(
h,
"colors",
"Configure color settings and styles.",
"This flag specifies color settings for use in the output. \
This flag may be provided multiple times. Settings are applied \
iteratively. Colors are limited to one of eight choices: \
red, blue, green, cyan, magenta, yellow, white and black. \
Styles are limited to nobold, bold, nointense or intense.\n\n\
The format of the flag is {type}:{attribute}:{value}. {type} \
should be one of path, line or match. {attribute} can be fg, bg \
or style. {value} is either a color (for fg and bg) or a text \
style. A special format, {type}:none, will clear all color \
settings for {type}.\n\nFor example, the following command will \
change the match color to magenta and the background color for \
line numbers to yellow:\n\n\
rg --colors 'match:fg:magenta' --colors 'line:bg:yellow' foo."
);
doc!(
h,
"fixed-strings",
"Treat the pattern as a literal string.",
"Treat the pattern as a literal string instead of a regular \
expression. When this flag is used, special regular expression \
meta characters such as (){}*+. do not need to be escaped."
);
doc!(
h,
"glob",
"Include or exclude files/directories.",
"Include or exclude files/directories for searching that \
match the given glob. This always overrides any other \
ignore logic. Multiple glob flags may be used. Globbing \
rules match .gitignore globs. Precede a glob with a ! \
to exclude it."
);
doc!(
h,
"ignore-case",
"Case insensitive search.",
"Case insensitive search. This is overridden by \
--case-sensitive."
);
doc!(
h,
"line-number",
"Show line numbers.",
"Show line numbers (1-based). This is enabled by default when \
searching in a tty."
);
doc!(
h,
"no-line-number",
"Suppress line numbers.",
"Suppress line numbers. This is enabled by default when NOT \
searching in a tty."
);
doc!(
h,
"quiet",
"Do not print anything to stdout.",
"Do not print anything to stdout. If a match is found in a file, \
stop searching. This is useful when ripgrep is used only for \
its exit code."
);
doc!(
h,
"type",
"Only search files matching TYPE.",
"Only search files matching TYPE. Multiple type flags may be \
provided. Use the --type-list flag to list all available \
types."
);
doc!(
h,
"type-not",
"Do not search files matching TYPE.",
"Do not search files matching TYPE. Multiple type-not flags may \
be provided. Use the --type-list flag to list all available \
types."
);
doc!(
h,
"unrestricted",
"Reduce the level of \"smart\" searching.",
"Reduce the level of \"smart\" searching. A single -u \
won't respect .gitignore (etc.) files. Two -u flags will \
additionally search hidden files and directories. Three \
-u flags will additionally search binary files. -uu is \
roughly equivalent to grep -r and -uuu is roughly \
equivalent to grep -a -r."
);
doc!(
h,
"invert-match",
"Invert matching.",
"Invert matching. Show lines that don't match given patterns."
);
doc!(
h,
"word-regexp",
"Only show matches surrounded by word boundaries.",
"Only show matches surrounded by word boundaries. This is \
equivalent to putting \\b before and after all of the search \
patterns."
);
doc!(h, "after-context", "Show NUM lines after each match.");
doc!(h, "before-context", "Show NUM lines before each match.");
doc!(h, "context", "Show NUM lines before and after each match.");
doc!(
h,
"column",
"Show column numbers",
"Show column numbers (1-based). This only shows the column \
numbers for the first match on each line. This does not try \
to account for Unicode. One byte is equal to one column. This \
implies --line-number."
);
doc!(
h,
"context-separator",
"Set the context separator string. [default: --]",
"The string used to separate non-contiguous context lines in the \
output. Escape sequences like \\x7F or \\t may be used. The \
default value is --."
);
doc!(
h,
"debug",
"Show debug messages.",
"Show debug messages. Please use this when filing a bug report."
);
doc!(
h,
"file",
"Search for patterns from the given file.",
"Search for patterns from the given file, with one pattern per \
line. When this flag is used or multiple times or in \
combination with the -e/--regexp flag, then all patterns \
provided are searched. Empty pattern lines will match all input \
lines, and the newline is not counted as part of the pattern."
);
doc!(
h,
"files-with-matches",
"Only show the path of each file with at least one match."
);
doc!(
h,
"files-without-match",
"Only show the path of each file that contains zero matches."
);
doc!(
h,
"with-filename",
"Show file name for each match.",
"Prefix each match with the file name that contains it. This is \
the default when more than one file is searched."
);
doc!(
h,
"no-filename",
"Never show the file name for a match.",
"Never show the file name for a match. This is the default when \
one file is searched."
);
doc!(
h,
"heading",
"Show matches grouped by each file.",
"This shows the file name above clusters of matches from each \
file instead of showing the file name for every match. This is \
the default mode at a tty."
);
doc!(
h,
"no-heading",
"Don't group matches by each file.",
"Don't group matches by each file. If -H/--with-filename is \
enabled, then file names will be shown for every line matched. \
This is the default mode when not at a tty."
);
doc!(
h,
"hidden",
"Search hidden files and directories.",
"Search hidden files and directories. By default, hidden files \
and directories are skipped."
);
doc!(
h,
"ignore-file",
"Specify additional ignore files.",
"Specify additional ignore files for filtering file paths. \
Ignore files should be in the gitignore format and are matched \
relative to the current working directory. These ignore files \
have lower precedence than all other ignore files. When \
specifying multiple ignore files, earlier files have lower \
precedence than later files."
);
doc!(h, "follow", "Follow symbolic links.");
doc!(
h,
"max-count",
"Limit the number of matches.",
"Limit the number of matching lines per file searched to NUM."
);
doc!(
h,
"maxdepth",
"Descend at most NUM directories.",
"Limit the depth of directory traversal to NUM levels beyond \
the paths given. A value of zero only searches the \
starting-points themselves.\n\nFor example, \
'rg --maxdepth 0 dir/' is a no-op because dir/ will not be \
descended into. 'rg --maxdepth 1 dir/' will search only the \
direct children of dir/."
);
doc!(
h,
"mmap",
"Searching using memory maps when possible.",
"Search using memory maps when possible. This is enabled by \
default when ripgrep thinks it will be faster. Note that memory \
map searching doesn't currently support all options, so if an \
incompatible option (e.g., --context) is given with --mmap, \
then memory maps will not be used."
);
doc!(
h,
"no-messages",
"Suppress all error messages.",
"Suppress all error messages. This is equivalent to redirecting \
stderr to /dev/null."
);
doc!(
h,
"no-mmap",
"Never use memory maps.",
"Never use memory maps, even when they might be faster."
);
doc!(
h,
"no-ignore",
"Don't respect ignore files.",
"Don't respect ignore files (.gitignore, .ignore, etc.). This \
implies --no-ignore-parent and --no-ignore-vcs."
);
doc!(
h,
"no-ignore-parent",
"Don't respect ignore files in parent directories.",
"Don't respect ignore files (.gitignore, .ignore, etc.) in \
parent directories."
);
doc!(
h,
"no-ignore-vcs",
"Don't respect VCS ignore files",
"Don't respect version control ignore files (.gitignore, etc.). \
This implies --no-ignore-parent. Note that .ignore files will \
continue to be respected."
);
doc!(
h,
"null",
"Print NUL byte after file names",
"Whenever a file name is printed, follow it with a NUL byte. \
This includes printing file names before matches, and when \
printing a list of matching files such as with --count, \
--files-with-matches and --files. This option is useful for use \
with xargs."
);
doc!(
h,
"path-separator",
"Path separator to use when printing file paths.",
"The path separator to use when printing file paths. This \
defaults to your platform's path separator, which is / on Unix \
and \\ on Windows. This flag is intended for overriding the \
default when the environment demands it (e.g., cygwin). A path \
separator is limited to a single byte."
);
doc!(h, "pretty", "Alias for --color always --heading -n.");
doc!(
h,
"replace",
"Replace matches with string given.",
"Replace every match with the string given when printing \
results. Neither this flag nor any other flag will modify your \
files.\n\nCapture group indices (e.g., $5) and names \
(e.g., $foo) are supported in the replacement string.\n\n\
Note that the replacement by default replaces each match, and \
NOT the entire line. To replace the entire line, you should \
match the entire line."
);
doc!(
h,
"case-sensitive",
"Search case sensitively.",
"Search case sensitively. This overrides -i/--ignore-case and \
-S/--smart-case."
);
doc!(
h,
"smart-case",
"Smart case search.",
"Searches case insensitively if the pattern is all lowercase. \
Search case sensitively otherwise. This is overridden by \
either -s/--case-sensitive or -i/--ignore-case."
);
doc!(
h,
"sort-files",
"Sort results by file path. Implies --threads=1.",
"Sort results by file path. Note that this currently \
disables all parallelism and runs search in a single thread."
);
doc!(
h,
"threads",
"The approximate number of threads to use.",
"The approximate number of threads to use. A value of 0 (which \
is the default) causes ripgrep to choose the thread count \
using heuristics."
);
doc!(
h,
"vimgrep",
"Show results in vim compatible format.",
"Show results with every match on its own line, including \
line numbers and column numbers. With this option, a line with \
more than one match will be printed more than once."
);
doc!(
h,
"type-add",
"Add a new glob for a file type.",
"Add a new glob for a particular file type. Only one glob can be \
added at a time. Multiple --type-add flags can be provided. \
Unless --type-clear is used, globs are added to any existing \
globs defined inside of ripgrep.\n\nNote that this MUST be \
passed to every invocation of ripgrep. Type settings are NOT \
persisted.\n\nExample: \
rg --type-add 'foo:*.foo' -tfoo PATTERN.\n\n\
--type-add can also be used to include rules from other types \
with the special include directive. The include directive \
permits specifying one or more other type names (separated by a \
comma) that have been defined and its rules will automatically \
be imported into the type specified. For example, to create a \
type called src that matches C++, Python and Markdown files, one \
can use:\n\n\
--type-add 'src:include:cpp,py,md'\n\n\
Additional glob rules can still be added to the src type by \
using the --type-add flag again:\n\n\
--type-add 'src:include:cpp,py,md' --type-add 'src:*.foo'\n\n\
Note that type names must consist only of Unicode letters or \
numbers. Punctuation characters are not allowed."
);
doc!(
h,
"type-clear",
"Clear globs for given file type.",
"Clear the file type globs previously defined for TYPE. This \
only clears the default type definitions that are found inside \
of ripgrep.\n\nNote that this MUST be passed to every \
invocation of ripgrep. Type settings are NOT persisted."
);
h
};
}
fn validate_number(s: &str) -> Result<(), String> {
s.parse::<usize>()
.map(|_| ())
.map_err(|err| err.to_string())
}
criterion_group!(
benches,
build_rg_with_short_help,
build_rg_with_long_help,
write_rg_short_help,
write_rg_long_help,
parse_rg,
parse_rg_with_complex,
parse_rg_with_lots
);
criterion_main!(benches);
| 46.937107 | 99 | 0.484479 |
0343fbf7cc54a41a503acaedcbb7cbb60234e542
| 6,358 |
// https://atcoder.jp/contests/abc244/tasks/abc244_b
#![allow(unused_macros, dead_code, unused_mut, unused_variables, non_snake_case, non_upper_case_globals)]
use std::io::{Read, Write};
use cp::{reader::Reader, writer::Writer};
#[macro_use]
pub mod cp {
#[macro_use]
pub mod reader {
use std::io::{BufRead, BufReader, Read};
use std::iter::Peekable;
use std::mem::transmute;
use std::str::{FromStr, SplitWhitespace};
#[derive(Debug)]
pub struct Reader<R: Read> {
pub reader: BufReader<R>,
tokens: Peekable<SplitWhitespace<'static>>,
line: Box<str>,
}
impl<R: Read> Reader<R> {
pub fn new(r: R) -> Reader<R> {
Reader {
tokens: "".split_whitespace().peekable(),
line: "".to_string().into_boxed_str(),
reader: BufReader::new(r),
}
}
/// read line if needed
fn prepare(&mut self) {
while self.tokens.peek().is_none() {
let mut line = String::new();
let n = self.reader.read_line(&mut line).unwrap();
if n == 0 { return; /* EOF */ }
self.line = line.into_boxed_str();
self.tokens = unsafe {
transmute::<_, &'static str>(&*self.line)
}.split_whitespace().peekable();
}
}
pub fn token<T: FromStr>(&mut self) -> T {
self.prepare();
self.tokens.next().unwrap().parse().ok().unwrap()
}
pub fn i(&mut self) -> i32 { self.token::<i32>() }
pub fn ii(&mut self) -> i64 { self.token::<i64>() }
pub fn f(&mut self) -> f32 { self.token::<f32>() }
pub fn us(&mut self) -> usize { self.token::<usize>() }
pub fn bytes(&mut self) -> Vec<u8> { self.token::<String>().into_bytes() }
}
macro_rules! r {
// read iter, e.g. re!(r, [i32;20]).collect::<HashSet<_>>()
($r:expr, [$type:ty; $len:expr]) => ((0..$len).map(|_| $r.token::<$type>()));
// read tuple, e.g. re!(r, usize, i32, String)
($r:expr, $($type:ty),+) => (($($r.token::<$type>()),+));
}
}
#[macro_use]
pub mod writer {
use std::fmt::Display;
use std::io::{BufWriter, Write};
//#region Writable
pub trait Writable<Mode> {
fn write_to<W: Write>(self, w: &mut W, sep: &str, end: &str);
}
#[non_exhaustive]
pub struct Slice;
#[non_exhaustive]
pub struct Many;
#[non_exhaustive]
pub struct One;
impl<T: Display> Writable<Slice> for &[T] {
fn write_to<W: Write>(self, w: &mut W, sep: &str, end: &str) {
self.iter().write_to(w, sep, end);
}
}
impl<I> Writable<Many> for I where I: Iterator, I::Item: Display {
fn write_to<W: Write>(mut self, w: &mut W, sep: &str, end: &str) {
if let Some(val) = self.next() {
write!(w, "{}", val).unwrap();
} else { return; }
self.for_each(|val| write!(w, "{}{}", sep, val).unwrap());
write!(w, "{}", end).unwrap();
}
}
impl<T: Display> Writable<One> for T {
fn write_to<W: Write>(self, w: &mut W, sep: &str, end: &str) {
write!(w, "{}{}", self, end).unwrap();
}
}
//#endregion Writable
//#region Writer
#[derive(Debug)]
pub struct Writer<W: Write> {
pub writer: BufWriter<W>,
}
impl<W: Write> Writer<W> {
pub fn new(w: W) -> Self {
Self {
writer: BufWriter::new(w),
}
}
pub fn y(&mut self, b: bool) {
self.writer.write_all((if b { "YES\n" } else { "NO\n" }).as_bytes()).unwrap();
}
pub fn w<M, T: Writable<M>>(&mut self, val: T) {
val.write_to(&mut self.writer, "", "");
}
pub fn n<M, T: Writable<M>>(&mut self, val: T) {
//! no sep, end with '\n'
val.write_to(&mut self.writer, "", "\n");
}
pub fn sn<M, T: Writable<M>>(&mut self, val: T) {
//! space sep, end with '\n'
val.write_to(&mut self.writer, " ", "\n");
}
pub fn wr<M, T: Writable<M>>(&mut self, val: T, sep: &str, end: &str) {
val.write_to(&mut self.writer, sep, end);
}
}
//#endregion Writer
macro_rules! wsn {
// write multiple vars, space sep, end with '\n'
($w:expr, $first:expr, $($val:expr),*) => {
$w.w($first);
($(write!($w.writer, " {}", $val).unwrap()),*);
$w.writer.write(&[b'\n']).unwrap();
};
}
macro_rules! wbn {
// write &[u8] consecutively (no sep) and end with '\n'
($w:expr, $($bytes:expr),*) => {
($($w.writer.write(&$bytes).unwrap()),*);
$w.writer.write(&[b'\n']).unwrap();
};
}
}
}
//#region constant
const d8: [(i32, i32); 8] = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)];
//#endregion constant
fn solve<R: Read, W: Write>(mut re: Reader<R>, mut wr: Writer<W>) {
let (mut x, mut y, mut d) = (0, 0, 0);
re.us();
re.bytes().iter().for_each(|&b| match b {
b'S' => match d {
0 => x += 1,
1 => y -= 1,
2 => x -= 1,
3 => y += 1,
_ => {}
},
b'R' => d = (d + 1) % 4,
_ => {}
});
wsn!(wr,x,y);
}
#[cfg(debug_assertions)]
fn main() {
use std::fs::File;
solve(
Reader::new(File::open("input.txt").unwrap()),
Writer::new(std::io::stdout()),
// Writer::new(File::create("output.txt").unwrap()),
)
}
#[cfg(not(debug_assertions))]
fn main() {
let (stdin, stdout) = (std::io::stdin(), std::io::stdout());
solve(Reader::new(stdin.lock()), Writer::new(stdout.lock()));
}
| 32.111111 | 105 | 0.437244 |
feb63df62250194c303d563efcb47bf071b056ce
| 1,165 |
use std::fmt::{Debug, Display, Formatter};
#[derive(Debug, Clone)]
pub enum Error {
ScTableCorrupt { reason: ErrorStr },
ScSplitCorrupt { reason: ErrorStr },
IOError { reason: ErrorStr, file: String },
RequiresExplode
}
#[derive(Debug, Clone)]
pub enum ErrorStr {
Owned(String),
StaticBorrow(&'static str)
}
impl From<String> for ErrorStr {
fn from(s: String) -> Self {
ErrorStr::Owned(s)
}
}
impl From<&'static str> for ErrorStr {
fn from(s: &'static str) -> Self {
ErrorStr::StaticBorrow(s)
}
}
impl Error {
pub(crate) fn sc_table_corrupt(reason: ErrorStr) -> Self {
Error::ScTableCorrupt { reason }
}
pub(crate) fn sc_split_corrupt(reason: ErrorStr) -> Self {
Error::ScSplitCorrupt { reason }
}
pub(crate) fn io_error(reason: ErrorStr, file: String) -> Self {
Error::IOError { reason, file }
}
pub(crate) fn requires_explode() -> Self {
Error::RequiresExplode
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
unimplemented!()
}
}
impl std::error::Error for Error {}
| 21.574074 | 73 | 0.609442 |
09e9fbf41ed3c4228957add547afd0d1381498ff
| 2,279 |
use proc_macro_hack::proc_macro_hack;
mod connect;
mod impl_arguments_compatible;
mod impl_ptr_ops;
mod impl_q_byte_array;
mod impl_q_core_application;
mod impl_q_object;
mod impl_q_string;
mod q_box;
mod q_debug_shim;
mod q_flags;
mod q_message_logger_macros;
mod q_ptr;
pub use crate::connect::{ArgumentsCompatible, AsReceiver, Receiver, Signal};
pub use crate::impl_q_core_application::QCoreApplicationArgs;
pub use crate::impl_q_object::FindChildError;
pub use crate::impl_q_string::qs;
pub use crate::q_box::QBox;
pub use crate::q_debug_shim::{qdbg, QDebugShim};
pub use crate::q_flags::QFlags;
pub use crate::q_ptr::QPtr;
pub use qt_macros::slot;
/// Initializes Qt resources specified by the `.qrc` file with the specified base name.
///
/// This macro should be used in combination with `qt_ritual_build::add_resources`.
/// Call `add_resources` in the build script of your crate, then call the macro like this:
/// ```ignore
/// QGuiApplication::init(|_| unsafe {
/// q_init_resource!("resources");
/// //...
/// })
/// ```
/// The argument must be equal to the base name
/// ([file stem](https://doc.rust-lang.org/std/path/struct.Path.html#method.file_stem))
/// of the `.qrc` file. Special characters (such as '-')
/// have to be replaced by the underscore character (`'_'`).
///
/// This macro is semantically equivalent to the
/// [Q_INIT_RESOURCE](https://doc.qt.io/qt-5/qdir.html#Q_INIT_RESOURCE) C++ macro.
///
/// [C++ documentation](https://doc.qt.io/qt-5/qdir.html#Q_INIT_RESOURCE):
/// <div style='border: 1px solid #5CFF95; background: #D6FFE4; padding: 16px;'>
/// <p>Initializes the resources specified by the .qrc file with the specified base name.
/// Normally, when resources are built as part of the application, the resources are loaded
/// automatically at startup. The <code>Q_INIT_RESOURCE()</code> macro is necessary
/// on some platforms for resources stored in a static library.</p>
/// <p>For example, if your application's resources are listed in a file called
/// <code>myapp.qrc</code>, you can ensure that the resources are initialized at startup
/// by adding this line to your <code>main()</code> function:</p>
/// <pre>
/// Q_INIT_RESOURCE(myapp);
/// </pre>
/// </div>
#[proc_macro_hack]
pub use qt_macros::q_init_resource;
| 37.983333 | 91 | 0.730145 |
1c29b1fac1b946d43b873f44820d72664104ec16
| 316 |
#![allow(dead_code)]
mod day01;
mod day02;
mod day03;
mod day04;
mod day05;
mod day06;
mod day07;
mod day08;
mod day09;
mod day10;
mod day11;
mod day12;
mod day13;
mod day14;
mod day15;
mod day16;
mod day17;
mod day18;
mod day21;
mod day22;
mod day23;
mod day24;
mod day25;
mod utils;
fn main() {
day25::run();
}
| 10.193548 | 20 | 0.696203 |
f7fccd2c22b49b64e07f0bed5a7d5771585fcffc
| 13,181 |
/// Append a the first few characters of an ANSI escape code to the given string.
#[macro_export]
#[doc(hidden)]
macro_rules! csi {
($( $l:expr ),*) => { concat!("\x1B[", $( $l ),*) };
}
/// Writes an ansi code to the given writer.
#[doc(hidden)]
#[macro_export]
macro_rules! write_ansi_code {
($writer:expr, $ansi_code:expr) => {{
use std::io::{self, ErrorKind};
write!($writer, "{}", $ansi_code)
.map_err(|e| io::Error::new(ErrorKind::Other, e))
.map_err($crate::ErrorKind::IoError)
}};
}
/// Writes/executes the given command.
#[doc(hidden)]
#[macro_export]
macro_rules! handle_command {
($writer:expr, $command:expr) => {{
// Silent warning when the macro is used inside the `command` module
#[allow(unused_imports)]
use $crate::{write_ansi_code, Command};
#[cfg(windows)]
{
let command = $command;
if command.is_ansi_code_supported() {
write_ansi_code!($writer, command.ansi_code())
} else {
command.execute_winapi().map_err($crate::ErrorKind::from)
}
}
#[cfg(unix)]
{
write_ansi_code!($writer, $command.ansi_code())
}
}};
}
/// Queues one or more command(s) for further execution.
///
/// Queued commands must be flushed to the underlying device to be executed.
/// This generally happens in the following cases:
///
/// * When `flush` is called manually on the given type implementing `io::Write`.
/// * The terminal will `flush` automatically if the buffer is full.
/// * Each line is flushed in case of `stdout`, because it is line buffered.
///
/// # Arguments
///
/// - [std::io::Writer](https://doc.rust-lang.org/std/io/trait.Write.html)
///
/// ANSI escape codes are written on the given 'writer', after which they are flushed.
///
/// - [Command](./trait.Command.html)
///
/// One or more commands
///
/// # Examples
///
/// ```rust
/// use std::io::{Write, stdout};
/// use crossterm::{queue, style::Print};
///
/// fn main() {
/// let mut stdout = stdout();
///
/// // `Print` will executed executed when `flush` is called.
/// queue!(stdout, Print("foo".to_string()));
///
/// // some other code (no execution happening here) ...
///
/// // when calling `flush` on `stdout`, all commands will be written to the stdout and therefore executed.
/// stdout.flush();
///
/// // ==== Output ====
/// // foo
/// }
/// ```
///
/// Have a look over at the [Command API](./#command-api) for more details.
///
/// # Notes
///
/// In case of Windows versions lower than 10, a direct WinApi call will be made.
/// The reason for this is that Windows versions lower than 10 do not support ANSI codes,
/// and can therefore not be written to the given `writer`.
/// Therefore, there is no difference between [execute](macro.execute.html)
/// and [queue](macro.queue.html) for those old Windows versions.
///
#[macro_export]
macro_rules! queue {
($writer:expr $(, $command:expr)* $(,)?) => {
Ok(()) $(
.and_then(|()| $crate::handle_command!($writer, $command))
)*
}
}
/// Executes one or more command(s).
///
/// # Arguments
///
/// - [std::io::Writer](https://doc.rust-lang.org/std/io/trait.Write.html)
///
/// ANSI escape codes are written on the given 'writer', after which they are flushed.
///
/// - [Command](./trait.Command.html)
///
/// One or more commands
///
/// # Examples
///
/// ```rust
/// use std::io::{Write, stdout};
/// use crossterm::{execute, style::Print};
///
/// fn main() {
/// // will be executed directly
/// execute!(stdout(), Print("sum:\n".to_string()));
///
/// // will be executed directly
/// execute!(stdout(), Print("1 + 1= ".to_string()), Print((1+1).to_string()));
///
/// // ==== Output ====
/// // sum:
/// // 1 + 1 = 2
/// }
/// ```
///
/// Have a look over at the [Command API](./#command-api) for more details.
///
/// # Notes
///
/// * In the case of UNIX and Windows 10, ANSI codes are written to the given 'writer'.
/// * In case of Windows versions lower than 10, a direct WinApi call will be made.
/// The reason for this is that Windows versions lower than 10 do not support ANSI codes,
/// and can therefore not be written to the given `writer`.
/// Therefore, there is no difference between [execute](macro.execute.html)
/// and [queue](macro.queue.html) for those old Windows versions.
#[macro_export]
macro_rules! execute {
($writer:expr $(, $command:expr)* $(,)? ) => {
// Queue each command, then flush
$crate::queue!($writer $(, $command)*).and_then(|()| {
$writer.flush().map_err($crate::ErrorKind::IoError)
})
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_display {
(for $($t:ty),+) => {
$(impl ::std::fmt::Display for $t {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::result::Result<(), ::std::fmt::Error> {
$crate::queue!(f, self).map_err(|_| ::std::fmt::Error)
}
})*
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! impl_from {
($from:path, $to:expr) => {
impl From<$from> for ErrorKind {
fn from(e: $from) -> Self {
$to(e)
}
}
};
}
#[cfg(test)]
mod tests {
use std::io;
use std::str;
// Helper for execute tests to confirm flush
#[derive(Default, Debug, Clone)]
pub(self) struct FakeWrite {
buffer: String,
flushed: bool,
}
impl io::Write for FakeWrite {
fn write(&mut self, content: &[u8]) -> io::Result<usize> {
let content = str::from_utf8(content)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
self.buffer.push_str(content);
self.flushed = false;
Ok(content.len())
}
fn flush(&mut self) -> io::Result<()> {
self.flushed = true;
Ok(())
}
}
#[cfg(not(windows))]
mod unix {
use std::io::Write;
use super::FakeWrite;
use crate::command::Command;
pub struct FakeCommand;
impl Command for FakeCommand {
type AnsiType = &'static str;
fn ansi_code(&self) -> Self::AnsiType {
"cmd"
}
}
#[test]
fn test_queue_one() {
let mut result = FakeWrite::default();
queue!(&mut result, FakeCommand).unwrap();
assert_eq!(&result.buffer, "cmd");
assert!(!result.flushed);
}
#[test]
fn test_queue_many() {
let mut result = FakeWrite::default();
queue!(&mut result, FakeCommand, FakeCommand).unwrap();
assert_eq!(&result.buffer, "cmdcmd");
assert!(!result.flushed);
}
#[test]
fn test_queue_trailing_comma() {
let mut result = FakeWrite::default();
queue!(&mut result, FakeCommand, FakeCommand,).unwrap();
assert_eq!(&result.buffer, "cmdcmd");
assert!(!result.flushed);
}
#[test]
fn test_execute_one() {
let mut result = FakeWrite::default();
execute!(&mut result, FakeCommand).unwrap();
assert_eq!(&result.buffer, "cmd");
assert!(result.flushed);
}
#[test]
fn test_execute_many() {
let mut result = FakeWrite::default();
execute!(&mut result, FakeCommand, FakeCommand).unwrap();
assert_eq!(&result.buffer, "cmdcmd");
assert!(result.flushed);
}
#[test]
fn test_execute_trailing_comma() {
let mut result = FakeWrite::default();
execute!(&mut result, FakeCommand, FakeCommand,).unwrap();
assert_eq!(&result.buffer, "cmdcmd");
assert!(result.flushed);
}
}
#[cfg(windows)]
mod windows {
use std::cell::RefCell;
use std::fmt::Debug;
use std::io::Write;
use super::FakeWrite;
use crate::command::Command;
use crate::error::Result as CrosstermResult;
// We need to test two different APIs: winapi and the write api. We
// don't know until runtime which we're supporting (via
// Command::is_ansi_code_supported), so we have to test them both. The
// CI environment hopefully includes both versions of windows.
// WindowsEventStream is a place for execute_winapi to push strings,
// when called.
type WindowsEventStream = Vec<&'static str>;
struct FakeCommand<'a> {
// Need to use a refcell because we want execute_winapi to be able
// push to the vector, but execute_winapi take &self.
stream: RefCell<&'a mut WindowsEventStream>,
value: &'static str,
}
impl<'a> FakeCommand<'a> {
fn new(stream: &'a mut WindowsEventStream, value: &'static str) -> Self {
Self {
value,
stream: RefCell::new(stream),
}
}
}
impl<'a> Command for FakeCommand<'a> {
type AnsiType = &'static str;
fn ansi_code(&self) -> Self::AnsiType {
self.value
}
fn execute_winapi(&self) -> CrosstermResult<()> {
self.stream.borrow_mut().push(self.value);
Ok(())
}
}
// Helper function for running tests against either winapi or an
// io::Write.
//
// This function will execute the `test` function, which should
// queue some commands against the given FakeWrite and
// WindowsEventStream. It will then test that the correct data sink
// was populated. It does not currently check is_ansi_code_supported;
// for now it simply checks that one of the two streams was correctly
// populated.
//
// If the stream was populated, it tests that the two arrays are equal.
// If the writer was populated, it tests that the contents of the
// write buffer are equal to the concatenation of `stream_result`.
fn test_harness<E: Debug>(
stream_result: &[&'static str],
test: impl FnOnce(&mut FakeWrite, &mut WindowsEventStream) -> Result<(), E>,
) {
let mut stream = WindowsEventStream::default();
let mut writer = FakeWrite::default();
if let Err(err) = test(&mut writer, &mut stream) {
panic!("Error returned from test function: {:?}", err);
}
// We need this for type inference, for whatever reason.
const EMPTY_RESULT: [&'static str; 0] = [];
// TODO: confirm that the correct sink was used, based on
// is_ansi_code_supported
match (writer.buffer.is_empty(), stream.is_empty()) {
(true, true) if stream_result == &EMPTY_RESULT => {}
(true, true) => panic!(
"Neither the event stream nor the writer were populated. Expected {:?}",
stream_result
),
// writer is populated
(false, true) => {
// Concat the stream result to find the string result
let result: String = stream_result.iter().copied().collect();
assert_eq!(result, writer.buffer);
assert_eq!(&stream, &EMPTY_RESULT);
}
// stream is populated
(true, false) => {
assert_eq!(stream, stream_result);
assert_eq!(writer.buffer, "");
}
// Both are populated
(false, false) => panic!(
"Both the writer and the event stream were written to.\n\
Only one should be used, based on is_ansi_code_supported.\n\
stream: {stream:?}\n\
writer: {writer:?}",
stream = stream,
writer = writer,
),
}
}
#[test]
fn test_queue_one() {
test_harness(&["cmd1"], |writer, stream| {
queue!(writer, FakeCommand::new(stream, "cmd1"))
})
}
#[test]
fn test_queue_some() {
test_harness(&["cmd1", "cmd2"], |writer, stream| {
queue!(
writer,
FakeCommand::new(stream, "cmd1"),
FakeCommand::new(stream, "cmd2"),
)
})
}
#[test]
fn test_many_queues() {
test_harness(&["cmd1", "cmd2", "cmd3"], |writer, stream| {
queue!(writer, FakeCommand::new(stream, "cmd1"))?;
queue!(writer, FakeCommand::new(stream, "cmd2"))?;
queue!(writer, FakeCommand::new(stream, "cmd3"))
})
}
}
}
| 32.07056 | 111 | 0.534254 |
fec9e3fadbbd9a1987297062625ee6499dd4be34
| 4,136 |
use crate::{Instrument, Instrumented, WithDispatch};
use core::future::Future;
use futures_core_preview::{
future::FutureObj,
task::{LocalSpawn, Spawn, SpawnError},
};
impl<T> Spawn for Instrumented<T>
where
T: Spawn,
{
/// Spawns a future that will be run to completion.
///
/// # Errors
///
/// The executor may be unable to spawn tasks. Spawn errors should
/// represent relatively rare scenarios, such as the executor
/// having been shut down so that it is no longer able to accept
/// tasks.
fn spawn_obj(&mut self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
let future = future.instrument(self.span.clone());
self.inner.spawn_obj(Box::pin(future))
}
/// Determines whether the executor is able to spawn new tasks.
///
/// This method will return `Ok` when the executor is *likely*
/// (but not guaranteed) to accept a subsequent spawn attempt.
/// Likewise, an `Err` return means that `spawn` is likely, but
/// not guaranteed, to yield an error.
#[inline]
fn status(&self) -> Result<(), SpawnError> {
self.inner.status()
}
}
impl<T> Spawn for WithDispatch<T>
where
T: Spawn,
{
/// Spawns a future that will be run to completion.
///
/// # Errors
///
/// The executor may be unable to spawn tasks. Spawn errors should
/// represent relatively rare scenarios, such as the executor
/// having been shut down so that it is no longer able to accept
/// tasks.
fn spawn_obj(&mut self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
self.inner.spawn_obj(Box::pin(self.with_dispatch(future)))
}
/// Determines whether the executor is able to spawn new tasks.
///
/// This method will return `Ok` when the executor is *likely*
/// (but not guaranteed) to accept a subsequent spawn attempt.
/// Likewise, an `Err` return means that `spawn` is likely, but
/// not guaranteed, to yield an error.
#[inline]
fn status(&self) -> Result<(), SpawnError> {
self.inner.status()
}
}
impl<T> LocalSpawn for Instrumented<T>
where
T: LocalSpawn,
{
/// Spawns a future that will be run to completion.
///
/// # Errors
///
/// The executor may be unable to spawn tasks. Spawn errors should
/// represent relatively rare scenarios, such as the executor
/// having been shut down so that it is no longer able to accept
/// tasks.
fn spawn_local_obj(&mut self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
let future = future.instrument(self.span.clone());
self.inner.spawn_local_obj(Box::pin(future))
}
/// Determines whether the executor is able to spawn new tasks.
///
/// This method will return `Ok` when the executor is *likely*
/// (but not guaranteed) to accept a subsequent spawn attempt.
/// Likewise, an `Err` return means that `spawn` is likely, but
/// not guaranteed, to yield an error.
#[inline]
fn status_local(&self) -> Result<(), SpawnError> {
self.inner.status_local()
}
}
impl<T> LocalSpawn for WithDispatch<T>
where
T: Spawn,
{
/// Spawns a future that will be run to completion.
///
/// # Errors
///
/// The executor may be unable to spawn tasks. Spawn errors should
/// represent relatively rare scenarios, such as the executor
/// having been shut down so that it is no longer able to accept
/// tasks.
fn spawn_local_obj(&mut self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
self.inner
.spawn_local_obj(Box::pin(self.with_dispatch(future)))
}
/// Determines whether the executor is able to spawn new tasks.
///
/// This method will return `Ok` when the executor is *likely*
/// (but not guaranteed) to accept a subsequent spawn attempt.
/// Likewise, an `Err` return means that `spawn` is likely, but
/// not guaranteed, to yield an error.
#[inline]
fn status_local(&self) -> Result<(), SpawnError> {
self.inner.status_local()
}
}
| 33.901639 | 98 | 0.636122 |
b9ef071d7515434fb4c40cb262875fa7020b4858
| 405 |
pub trait Classifiable{
fn classify_as(&self) -> i32;
fn default_classifiable_node(class : i32) -> Self;
}
impl Classifiable for i32 {
fn classify_as(&self) -> i32{
*self
}
fn default_classifiable_node(class : i32) -> i32{
class
}
}
impl Classifiable for (char, i32) {
fn classify_as(&self) -> i32{
self.1
}
fn default_classifiable_node(class : i32) -> (char, i32) {
('z', class)
}
}
| 16.875 | 59 | 0.659259 |
f5bf73b4300acc0955c78c1246c9194b91d2c648
| 17,219 |
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A pass that promotes borrows of constant rvalues.
//!
//! The rvalues considered constant are trees of temps,
//! each with exactly one initialization, and holding
//! a constant value with no interior mutability.
//! They are placed into a new MIR constant body in
//! `promoted` and the borrow rvalue is replaced with
//! a `Literal::Promoted` using the index into `promoted`
//! of that constant MIR.
//!
//! This pass assumes that every use is dominated by an
//! initialization and can otherwise silence errors, if
//! move analysis runs after promotion on broken MIR.
use rustc::mir::*;
use rustc::mir::visit::{PlaceContext, MutatingUseContext, MutVisitor, Visitor};
use rustc::mir::traversal::ReversePostorder;
use rustc::ty::TyCtxt;
use syntax_pos::Span;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use std::{iter, mem, usize};
/// State of a temporary during collection and promotion.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum TempState {
/// No references to this temp.
Undefined,
/// One direct assignment and any number of direct uses.
/// A borrow of this temp is promotable if the assigned
/// value is qualified as constant.
Defined {
location: Location,
uses: usize
},
/// Any other combination of assignments/uses.
Unpromotable,
/// This temp was part of an rvalue which got extracted
/// during promotion and needs cleanup.
PromotedOut
}
impl TempState {
pub fn is_promotable(&self) -> bool {
debug!("is_promotable: self={:?}", self);
if let TempState::Defined { uses, .. } = *self {
uses > 0
} else {
false
}
}
}
/// A "root candidate" for promotion, which will become the
/// returned value in a promoted MIR, unless it's a subset
/// of a larger candidate.
#[derive(Debug)]
pub enum Candidate {
/// Borrow of a constant temporary.
Ref(Location),
/// Currently applied to function calls where the callee has the unstable
/// `#[rustc_args_required_const]` attribute as well as the SIMD shuffle
/// intrinsic. The intrinsic requires the arguments are indeed constant and
/// the attribute currently provides the semantic requirement that arguments
/// must be constant.
Argument { bb: BasicBlock, index: usize },
}
struct TempCollector<'tcx> {
temps: IndexVec<Local, TempState>,
span: Span,
mir: &'tcx Mir<'tcx>,
}
impl<'tcx> Visitor<'tcx> for TempCollector<'tcx> {
fn visit_local(&mut self,
&index: &Local,
context: PlaceContext<'tcx>,
location: Location) {
debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
// We're only interested in temporaries
if self.mir.local_kind(index) != LocalKind::Temp {
return;
}
// Ignore drops, if the temp gets promoted,
// then it's constant and thus drop is noop.
// Non-uses are also irrelevent.
if context.is_drop() || !context.is_use() {
debug!(
"visit_local: context.is_drop={:?} context.is_use={:?}",
context.is_drop(), context.is_use(),
);
return;
}
let temp = &mut self.temps[index];
debug!("visit_local: temp={:?}", temp);
if *temp == TempState::Undefined {
match context {
PlaceContext::MutatingUse(MutatingUseContext::Store) |
PlaceContext::MutatingUse(MutatingUseContext::AsmOutput) |
PlaceContext::MutatingUse(MutatingUseContext::Call) => {
*temp = TempState::Defined {
location,
uses: 0
};
return;
}
_ => { /* mark as unpromotable below */ }
}
} else if let TempState::Defined { ref mut uses, .. } = *temp {
// We always allow borrows, even mutable ones, as we need
// to promote mutable borrows of some ZSTs e.g., `&mut []`.
let allowed_use = context.is_borrow() || context.is_nonmutating_use();
debug!("visit_local: allowed_use={:?}", allowed_use);
if allowed_use {
*uses += 1;
return;
}
/* mark as unpromotable below */
}
*temp = TempState::Unpromotable;
}
fn visit_source_info(&mut self, source_info: &SourceInfo) {
self.span = source_info.span;
}
}
pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec<Local, TempState> {
let mut collector = TempCollector {
temps: IndexVec::from_elem(TempState::Undefined, &mir.local_decls),
span: mir.span,
mir,
};
for (bb, data) in rpo {
collector.visit_basic_block_data(bb, data);
}
collector.temps
}
struct Promoter<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
source: &'a mut Mir<'tcx>,
promoted: Mir<'tcx>,
temps: &'a mut IndexVec<Local, TempState>,
/// If true, all nested temps are also kept in the
/// source MIR, not moved to the promoted MIR.
keep_original: bool
}
impl<'a, 'tcx> Promoter<'a, 'tcx> {
fn new_block(&mut self) -> BasicBlock {
let span = self.promoted.span;
self.promoted.basic_blocks_mut().push(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: SourceInfo {
span,
scope: OUTERMOST_SOURCE_SCOPE
},
kind: TerminatorKind::Return
}),
is_cleanup: false
})
}
fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
let last = self.promoted.basic_blocks().last().unwrap();
let data = &mut self.promoted[last];
data.statements.push(Statement {
source_info: SourceInfo {
span,
scope: OUTERMOST_SOURCE_SCOPE
},
kind: StatementKind::Assign(Place::Local(dest), box rvalue)
});
}
/// Copy the initialization of this temp to the
/// promoted MIR, recursing through temps.
fn promote_temp(&mut self, temp: Local) -> Local {
let old_keep_original = self.keep_original;
let loc = match self.temps[temp] {
TempState::Defined { location, uses } if uses > 0 => {
if uses > 1 {
self.keep_original = true;
}
location
}
state => {
span_bug!(self.promoted.span, "{:?} not promotable: {:?}",
temp, state);
}
};
if !self.keep_original {
self.temps[temp] = TempState::PromotedOut;
}
let no_stmts = self.source[loc.block].statements.len();
let new_temp = self.promoted.local_decls.push(
LocalDecl::new_temp(self.source.local_decls[temp].ty,
self.source.local_decls[temp].source_info.span));
debug!("promote({:?} @ {:?}/{:?}, {:?})",
temp, loc, no_stmts, self.keep_original);
// First, take the Rvalue or Call out of the source MIR,
// or duplicate it, depending on keep_original.
if loc.statement_index < no_stmts {
let (rvalue, source_info) = {
let statement = &mut self.source[loc.block].statements[loc.statement_index];
let rhs = match statement.kind {
StatementKind::Assign(_, ref mut rhs) => rhs,
_ => {
span_bug!(statement.source_info.span, "{:?} is not an assignment",
statement);
}
};
(if self.keep_original {
rhs.clone()
} else {
let unit = box Rvalue::Aggregate(box AggregateKind::Tuple, vec![]);
mem::replace(rhs, unit)
}, statement.source_info)
};
let mut rvalue = *rvalue;
self.visit_rvalue(&mut rvalue, loc);
self.assign(new_temp, rvalue, source_info.span);
} else {
let terminator = if self.keep_original {
self.source[loc.block].terminator().clone()
} else {
let terminator = self.source[loc.block].terminator_mut();
let target = match terminator.kind {
TerminatorKind::Call { destination: Some((_, target)), .. } => target,
ref kind => {
span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
}
};
Terminator {
source_info: terminator.source_info,
kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto {
target,
})
}
};
match terminator.kind {
TerminatorKind::Call { mut func, mut args, from_hir_call, .. } => {
self.visit_operand(&mut func, loc);
for arg in &mut args {
self.visit_operand(arg, loc);
}
let last = self.promoted.basic_blocks().last().unwrap();
let new_target = self.new_block();
*self.promoted[last].terminator_mut() = Terminator {
kind: TerminatorKind::Call {
func,
args,
cleanup: None,
destination: Some((Place::Local(new_temp), new_target)),
from_hir_call,
},
..terminator
};
}
ref kind => {
span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
}
};
};
self.keep_original = old_keep_original;
new_temp
}
fn promote_candidate(mut self, candidate: Candidate) {
let mut operand = {
let promoted = &mut self.promoted;
let promoted_id = Promoted::new(self.source.promoted.len());
let mut promoted_place = |ty, span| {
promoted.span = span;
promoted.local_decls[RETURN_PLACE] =
LocalDecl::new_return_place(ty, span);
Place::Promoted(box (promoted_id, ty))
};
let (blocks, local_decls) = self.source.basic_blocks_and_local_decls_mut();
match candidate {
Candidate::Ref(loc) => {
let ref mut statement = blocks[loc.block].statements[loc.statement_index];
match statement.kind {
StatementKind::Assign(_, box Rvalue::Ref(_, _, ref mut place)) => {
// Find the underlying local for this (necessarily interior) borrow.
let mut place = place;
while let Place::Projection(ref mut proj) = *place {
assert_ne!(proj.elem, ProjectionElem::Deref);
place = &mut proj.base;
};
let ty = place.ty(local_decls, self.tcx).to_ty(self.tcx);
let span = statement.source_info.span;
Operand::Move(mem::replace(place, promoted_place(ty, span)))
}
_ => bug!()
}
}
Candidate::Argument { bb, index } => {
let terminator = blocks[bb].terminator_mut();
match terminator.kind {
TerminatorKind::Call { ref mut args, .. } => {
let ty = args[index].ty(local_decls, self.tcx);
let span = terminator.source_info.span;
let operand = Operand::Copy(promoted_place(ty, span));
mem::replace(&mut args[index], operand)
}
// We expected a `TerminatorKind::Call` for which we'd like to promote an
// argument. `qualify_consts` saw a `TerminatorKind::Call` here, but
// we are seeing a `Goto`. That means that the `promote_temps` method
// already promoted this call away entirely. This case occurs when calling
// a function requiring a constant argument and as that constant value
// providing a value whose computation contains another call to a function
// requiring a constant argument.
TerminatorKind::Goto { .. } => return,
_ => bug!()
}
}
}
};
assert_eq!(self.new_block(), START_BLOCK);
self.visit_operand(&mut operand, Location {
block: BasicBlock::new(0),
statement_index: usize::MAX
});
let span = self.promoted.span;
self.assign(RETURN_PLACE, Rvalue::Use(operand), span);
self.source.promoted.push(self.promoted);
}
}
/// Replaces all temporaries with their promoted counterparts.
impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
fn visit_local(&mut self,
local: &mut Local,
_: PlaceContext<'tcx>,
_: Location) {
if self.source.local_kind(*local) == LocalKind::Temp {
*local = self.promote_temp(*local);
}
}
}
pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mut temps: IndexVec<Local, TempState>,
candidates: Vec<Candidate>) {
// Visit candidates in reverse, in case they're nested.
debug!("promote_candidates({:?})", candidates);
for candidate in candidates.into_iter().rev() {
match candidate {
Candidate::Ref(Location { block, statement_index }) => {
match mir[block].statements[statement_index].kind {
StatementKind::Assign(Place::Local(local), _) => {
if temps[local] == TempState::PromotedOut {
// Already promoted.
continue;
}
}
_ => {}
}
}
Candidate::Argument { .. } => {}
}
// Declare return place local so that `Mir::new` doesn't complain.
let initial_locals = iter::once(
LocalDecl::new_return_place(tcx.types.never, mir.span)
).collect();
let promoter = Promoter {
promoted: Mir::new(
IndexVec::new(),
// FIXME: maybe try to filter this to avoid blowing up
// memory usage?
mir.source_scopes.clone(),
mir.source_scope_local_data.clone(),
IndexVec::new(),
None,
initial_locals,
0,
vec![],
mir.span,
vec![],
),
tcx,
source: mir,
temps: &mut temps,
keep_original: false
};
promoter.promote_candidate(candidate);
}
// Eliminate assignments to, and drops of promoted temps.
let promoted = |index: Local| temps[index] == TempState::PromotedOut;
for block in mir.basic_blocks_mut() {
block.statements.retain(|statement| {
match statement.kind {
StatementKind::Assign(Place::Local(index), _) |
StatementKind::StorageLive(index) |
StatementKind::StorageDead(index) => {
!promoted(index)
}
_ => true
}
});
let terminator = block.terminator_mut();
match terminator.kind {
TerminatorKind::Drop { location: Place::Local(index), target, .. } => {
if promoted(index) {
terminator.kind = TerminatorKind::Goto {
target,
};
}
}
_ => {}
}
}
}
| 38.095133 | 98 | 0.515361 |
dd0a1e2ac0c874387f6b9c0d263670b3cfcda79b
| 7,347 |
use super::types::*;
use crate::blocking::StudioClient;
use crate::shared::{FetchResponse, GraphRef, Sdl, SdlType};
use crate::RoverClientError;
use graphql_client::*;
#[derive(GraphQLQuery)]
// The paths are relative to the directory where your `Cargo.toml` is located.
// Both json and the GraphQL schema language are supported as sources for the schema
#[graphql(
query_path = "src/operations/subgraph/fetch/fetch_query.graphql",
schema_path = ".schema/schema.graphql",
response_derives = "PartialEq, Debug, Serialize, Deserialize",
deprecated = "warn"
)]
/// This struct is used to generate the module containing `Variables` and
/// `ResponseData` structs.
/// Snake case of this name is the mod name. i.e. subgraph_fetch_query
pub(crate) struct SubgraphFetchQuery;
/// Fetches a schema from apollo studio and returns its SDL (String)
pub fn run(
input: SubgraphFetchInput,
client: &StudioClient,
) -> Result<FetchResponse, RoverClientError> {
let input_clone = input.clone();
let response_data = client.post::<SubgraphFetchQuery>(input.into())?;
get_sdl_from_response_data(input_clone, response_data)
}
fn get_sdl_from_response_data(
input: SubgraphFetchInput,
response_data: SubgraphFetchResponseData,
) -> Result<FetchResponse, RoverClientError> {
let graph_ref = input.graph_ref.clone();
let service_list = get_services_from_response_data(graph_ref, response_data)?;
let sdl_contents = get_sdl_for_service(&input.subgraph, service_list)?;
Ok(FetchResponse {
sdl: Sdl {
contents: sdl_contents,
r#type: SdlType::Subgraph,
},
})
}
fn get_services_from_response_data(
graph_ref: GraphRef,
response_data: SubgraphFetchResponseData,
) -> Result<ServiceList, RoverClientError> {
let service_data = response_data
.service
.ok_or(RoverClientError::GraphNotFound {
graph_ref: graph_ref.clone(),
})?;
// get list of services
let services = match service_data.implementing_services {
Some(services) => Ok(services),
None => Err(RoverClientError::ExpectedFederatedGraph {
graph_ref: graph_ref.clone(),
can_operation_convert: false,
}),
}?;
match services {
Services::FederatedImplementingServices(services) => Ok(services.services),
Services::NonFederatedImplementingService => {
Err(RoverClientError::ExpectedFederatedGraph {
graph_ref,
can_operation_convert: false,
})
}
}
}
fn get_sdl_for_service(
subgraph_name: &str,
services: ServiceList,
) -> Result<String, RoverClientError> {
// find the right service by name
let service = services.iter().find(|svc| svc.name == subgraph_name);
// if there is a service, get it's active sdl, otherwise, error and list
// available services to fetch
if let Some(service) = service {
Ok(service.active_partial_schema.sdl.clone())
} else {
let valid_subgraphs: Vec<String> = services.iter().map(|svc| svc.name.clone()).collect();
Err(RoverClientError::NoSubgraphInGraph {
invalid_subgraph: subgraph_name.to_string(),
valid_subgraphs,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn get_services_from_response_data_works() {
let json_response = json!({
"service": {
"implementingServices": {
"__typename": "FederatedImplementingServices",
"services": [
{
"name": "accounts",
"activePartialSchema": {
"sdl": "type Query {\n me: User\n}\n\ntype User @key(fields: \"id\") {\n id: ID!\n}\n"
}
},
{
"name": "accounts2",
"activePartialSchema": {
"sdl": "extend type User @key(fields: \"id\") {\n id: ID! @external\n age: Int\n}\n"
}
}
]
}
}
});
let data: SubgraphFetchResponseData = serde_json::from_value(json_response).unwrap();
let output = get_services_from_response_data(mock_graph_ref(), data);
let expected_json = json!([
{
"name": "accounts",
"activePartialSchema": {
"sdl": "type Query {\n me: User\n}\n\ntype User @key(fields: \"id\") {\n id: ID!\n}\n"
}
},
{
"name": "accounts2",
"activePartialSchema": {
"sdl": "extend type User @key(fields: \"id\") {\n id: ID! @external\n age: Int\n}\n"
}
}
]);
let expected_service_list: ServiceList = serde_json::from_value(expected_json).unwrap();
assert!(output.is_ok());
assert_eq!(output.unwrap(), expected_service_list);
}
#[test]
fn get_services_from_response_data_errs_with_no_services() {
let json_response = json!({
"service": {
"implementingServices": null
}
});
let data: SubgraphFetchResponseData = serde_json::from_value(json_response).unwrap();
let output = get_services_from_response_data(mock_graph_ref(), data);
assert!(output.is_err());
}
#[test]
fn get_sdl_for_service_returns_correct_sdl() {
let json_service_list = json!([
{
"name": "accounts",
"activePartialSchema": {
"sdl": "type Query {\n me: User\n}\n\ntype User @key(fields: \"id\") {\n id: ID!\n}\n"
}
},
{
"name": "accounts2",
"activePartialSchema": {
"sdl": "extend type User @key(fields: \"id\") {\n id: ID! @external\n age: Int\n}\n"
}
}
]);
let service_list: ServiceList = serde_json::from_value(json_service_list).unwrap();
let output = get_sdl_for_service("accounts2", service_list);
assert_eq!(
output.unwrap(),
"extend type User @key(fields: \"id\") {\n id: ID! @external\n age: Int\n}\n"
.to_string()
);
}
#[test]
fn get_sdl_for_service_errs_on_invalid_name() {
let json_service_list = json!([
{
"name": "accounts",
"activePartialSchema": {
"sdl": "type Query {\n me: User\n}\n\ntype User @key(fields: \"id\") {\n id: ID!\n}\n"
}
},
{
"name": "accounts2",
"activePartialSchema": {
"sdl": "extend type User @key(fields: \"id\") {\n id: ID! @external\n age: Int\n}\n"
}
}
]);
let service_list: ServiceList = serde_json::from_value(json_service_list).unwrap();
let output = get_sdl_for_service("harambe-was-an-inside-job", service_list);
assert!(output.is_err());
}
fn mock_graph_ref() -> GraphRef {
GraphRef {
name: "mygraph".to_string(),
variant: "current".to_string(),
}
}
}
| 34.331776 | 120 | 0.564993 |
29ba3f4d8fa6818c4ead05d00efec00dd52714aa
| 11,238 |
use std::ffi::{CStr, CString, OsString};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Once;
use super::*;
use crate::exception::*;
use crate::fs::HostStdioFds;
use crate::process::ProcessFilter;
use crate::signal::SigNum;
use crate::util::log::LevelFilter;
use crate::util::mem_util::from_untrusted::*;
use crate::util::sgx::allow_debug as sgx_allow_debug;
use sgx_tse::*;
pub static mut INSTANCE_DIR: String = String::new();
static mut ENCLAVE_PATH: String = String::new();
lazy_static! {
static ref INIT_ONCE: Once = Once::new();
static ref HAS_INIT: AtomicBool = AtomicBool::new(false);
}
macro_rules! ecall_errno {
($errno:expr) => {{
let errno: Errno = $errno;
-(errno as i32)
}};
}
#[no_mangle]
pub extern "C" fn occlum_ecall_init(log_level: *const c_char, instance_dir: *const c_char) -> i32 {
if HAS_INIT.load(Ordering::SeqCst) == true {
return ecall_errno!(EEXIST);
}
assert!(!instance_dir.is_null());
let log_level = {
let input_log_level = match parse_log_level(log_level) {
Err(e) => {
eprintln!("invalid log level: {}", e.backtrace());
return ecall_errno!(EINVAL);
}
Ok(log_level) => log_level,
};
// Use the input log level if and only if the enclave allows debug
if sgx_allow_debug() {
input_log_level
} else {
LevelFilter::Off
}
};
INIT_ONCE.call_once(|| {
// Init the log infrastructure first so that log messages will be printed afterwards
util::log::init(log_level);
// Init MPX for SFI if MPX is available
let report = rsgx_self_report();
if (report.body.attributes.xfrm & SGX_XFRM_MPX != 0) {
util::mpx_util::mpx_enable();
}
// Register exception handlers (support cpuid & rdtsc for now)
register_exception_handlers();
unsafe {
let dir_str: &str = CStr::from_ptr(instance_dir).to_str().unwrap();
INSTANCE_DIR.push_str(dir_str);
ENCLAVE_PATH.push_str(&INSTANCE_DIR);
ENCLAVE_PATH.push_str("/build/lib/libocclum-libos.signed.so");
}
HAS_INIT.store(true, Ordering::SeqCst);
});
0
}
#[no_mangle]
pub extern "C" fn occlum_ecall_new_process(
path_buf: *const c_char,
argv: *const *const c_char,
env: *const *const c_char,
host_stdio_fds: *const HostStdioFds,
) -> i32 {
if HAS_INIT.load(Ordering::SeqCst) == false {
return ecall_errno!(EAGAIN);
}
let (path, args, env, host_stdio_fds) =
match parse_arguments(path_buf, argv, env, host_stdio_fds) {
Ok(all_parsed_args) => all_parsed_args,
Err(e) => {
eprintln!("invalid arguments for LibOS: {}", e.backtrace());
return ecall_errno!(e.errno());
}
};
let _ = unsafe { backtrace::enable_backtrace(&ENCLAVE_PATH, PrintFormat::Short) };
panic::catch_unwind(|| {
backtrace::__rust_begin_short_backtrace(|| {
match do_new_process(&path, &args, env, &host_stdio_fds) {
Ok(pid_t) => pid_t as i32,
Err(e) => {
eprintln!("failed to boot up LibOS: {}", e.backtrace());
ecall_errno!(e.errno())
}
}
})
})
.unwrap_or(ecall_errno!(EFAULT))
}
#[no_mangle]
pub extern "C" fn occlum_ecall_exec_thread(libos_pid: i32, host_tid: i32) -> i32 {
if HAS_INIT.load(Ordering::SeqCst) == false {
return ecall_errno!(EAGAIN);
}
let _ = unsafe { backtrace::enable_backtrace(&ENCLAVE_PATH, PrintFormat::Short) };
panic::catch_unwind(|| {
backtrace::__rust_begin_short_backtrace(|| {
match do_exec_thread(libos_pid as pid_t, host_tid as pid_t) {
Ok(exit_status) => exit_status,
Err(e) => {
eprintln!("failed to execute a process: {}", e.backtrace());
ecall_errno!(e.errno())
}
}
})
})
.unwrap_or(ecall_errno!(EFAULT))
}
#[no_mangle]
pub extern "C" fn occlum_ecall_kill(pid: i32, sig: i32) -> i32 {
if HAS_INIT.load(Ordering::SeqCst) == false {
return ecall_errno!(EAGAIN);
}
let _ = unsafe { backtrace::enable_backtrace(&ENCLAVE_PATH, PrintFormat::Short) };
panic::catch_unwind(|| {
backtrace::__rust_begin_short_backtrace(|| match do_kill(pid, sig) {
Ok(()) => 0,
Err(e) => {
eprintln!("failed to kill: {}", e.backtrace());
ecall_errno!(e.errno())
}
})
})
.unwrap_or(ecall_errno!(EFAULT))
}
fn parse_log_level(level_chars: *const c_char) -> Result<LevelFilter> {
const DEFAULT_LEVEL: LevelFilter = LevelFilter::Off;
if level_chars.is_null() {
return Ok(DEFAULT_LEVEL);
}
let level_string = {
// level_chars has been guaranteed to be inside enclave
// and null terminated by ECall
let level_cstring = CString::from(unsafe { CStr::from_ptr(level_chars) });
level_cstring
.into_string()
.map_err(|e| errno!(EINVAL, "log_level contains valid utf-8 data"))?
.to_lowercase()
};
Ok(match level_string.as_str() {
"off" => LevelFilter::Off,
"panic" | "fatal" | "error" => LevelFilter::Error,
"warning" | "warn" => LevelFilter::Warn, // Panic, fatal and warning are log levels defined in OCI (Open Container Initiative)
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
"trace" => LevelFilter::Trace,
_ => DEFAULT_LEVEL, // Default
})
}
fn parse_arguments(
path_ptr: *const c_char,
argv: *const *const c_char,
env: *const *const c_char,
host_stdio_fds: *const HostStdioFds,
) -> Result<(PathBuf, Vec<CString>, Vec<CString>, HostStdioFds)> {
let path_buf = {
if path_ptr.is_null() {
return_errno!(EINVAL, "empty path");
}
// path_ptr has been guaranteed to be inside enclave
// and null terminated by ECall
let path_cstring = CString::from(unsafe { CStr::from_ptr(path_ptr) });
let path_string = path_cstring
.into_string()
.map_err(|e| errno!(EINVAL, "path contains valid utf-8 data"))?;
Path::new(&path_string).to_path_buf()
};
let program_cstring = {
let program_osstr = path_buf
.file_name()
.ok_or_else(|| errno!(EINVAL, "invalid path"))?;
let program_str = program_osstr
.to_str()
.ok_or_else(|| errno!(EINVAL, "invalid path"))?;
CString::new(program_str).map_err(|e| errno!(e))?
};
let mut args = clone_cstrings_safely(argv)?;
args.insert(0, program_cstring);
let env_merged = merge_env(env)?;
trace!(
"env_merged = {:?} (default env and untrusted env)",
env_merged
);
let host_stdio_fds = HostStdioFds::from_user(host_stdio_fds)?;
Ok((path_buf, args, env_merged, host_stdio_fds))
}
fn do_new_process(
program_path: &PathBuf,
argv: &Vec<CString>,
env_concat: Vec<CString>,
host_stdio_fds: &HostStdioFds,
) -> Result<pid_t> {
validate_program_path(program_path)?;
let file_actions = Vec::new();
let current = &process::IDLE;
let program_path_str = program_path.to_str().unwrap();
let new_tid = process::do_spawn_without_exec(
&program_path_str,
argv,
&env_concat,
&file_actions,
host_stdio_fds,
current,
)?;
Ok(new_tid)
}
fn do_exec_thread(libos_tid: pid_t, host_tid: pid_t) -> Result<i32> {
let status = process::task::exec(libos_tid, host_tid)?;
// sync file system
// TODO: only sync when all processes exit
use rcore_fs::vfs::FileSystem;
crate::fs::ROOT_INODE.fs().sync()?;
// Not to be confused with the return value of a main function.
// The exact meaning of status is described in wait(2) man page.
Ok(status)
}
fn validate_program_path(target_path: &PathBuf) -> Result<()> {
if !target_path.is_absolute() {
return_errno!(EINVAL, "program path must be absolute");
}
// Forbid paths like /bin/../root, which may circument our prefix-based path matching
let has_parent_component = {
target_path
.components()
.any(|component| component == std::path::Component::ParentDir)
};
if has_parent_component {
return_errno!(
EINVAL,
"program path cannot contain any parent component (i.e., \"..\")"
);
}
// Check whether the prefix of the program path matches one of the entry points
let is_valid_entry_point = &config::LIBOS_CONFIG
.entry_points
.iter()
.any(|valid_path_prefix| target_path.starts_with(valid_path_prefix));
if !is_valid_entry_point {
return_errno!(EACCES, "program path is NOT a valid entry point");
}
Ok(())
}
fn do_kill(pid: i32, sig: i32) -> Result<()> {
let filter = if pid > 0 {
ProcessFilter::WithPid(pid as pid_t)
} else if pid == -1 {
ProcessFilter::WithAnyPid
} else if pid < 0 {
return_errno!(EINVAL, "Invalid pid");
} else {
// pid == 0
return_errno!(EPERM, "Process 0 cannot be killed");
};
let signum = {
if sig < 0 {
return_errno!(EINVAL, "invalid arguments");
}
SigNum::from_u8(sig as u8)?
};
crate::signal::do_kill_from_outside_enclave(filter, signum)
}
fn merge_env(env: *const *const c_char) -> Result<Vec<CString>> {
#[derive(Debug)]
struct EnvDefaultInner {
content: Vec<CString>,
helper: HashMap<String, usize>, // Env key: index of content
}
let env_listed = &config::LIBOS_CONFIG.env.untrusted;
let mut env_checked: Vec<CString> = Vec::new();
let mut env_default = EnvDefaultInner {
content: Vec::new(),
helper: HashMap::new(),
};
// Use inner struct to parse env default
for (idx, val) in config::LIBOS_CONFIG.env.default.iter().enumerate() {
env_default.content.push(CString::new(val.clone())?);
let kv: Vec<&str> = val.to_str().unwrap().splitn(2, '=').collect(); // only split the first "="
env_default.helper.insert(kv[0].to_string(), idx);
}
// Filter out env which are not listed in Occlum.json env untrusted section
// and remove env default element if it is overrided
if (!env.is_null()) {
let env_untrusted = clone_cstrings_safely(env)?;
for iter in env_untrusted.iter() {
let env_kv: Vec<&str> = iter.to_str().unwrap().splitn(2, '=').collect();
if env_listed.contains(env_kv[0]) {
env_checked.push(iter.clone());
if let Some(idx) = env_default.helper.get(env_kv[0]) {
env_default.content.remove(*idx);
}
}
}
}
trace!("env_checked from env untrusted: {:?}", env_checked);
Ok([env_default.content, env_checked].concat())
}
| 32.386167 | 134 | 0.595747 |
fca074e35a06ca4b986c0bbd682d8aa82902a267
| 2,119 |
use crate::prelude::*;
use crate::vk;
use crate::{EntryCustom, Instance};
use std::ffi::CStr;
use std::mem;
#[derive(Clone)]
pub struct TimelineSemaphore {
handle: vk::Instance,
timeline_semaphore_fn: vk::KhrTimelineSemaphoreFn,
}
impl TimelineSemaphore {
pub fn new<L>(entry: &EntryCustom<L>, instance: &Instance) -> Self {
let timeline_semaphore_fn = vk::KhrTimelineSemaphoreFn::load(|name| unsafe {
mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr()))
});
Self {
handle: instance.handle(),
timeline_semaphore_fn,
}
}
pub fn name() -> &'static CStr {
vk::KhrTimelineSemaphoreFn::name()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkGetSemaphoreCounterValue.html>"]
pub unsafe fn get_semaphore_counter_value(
&self,
device: vk::Device,
semaphore: vk::Semaphore,
) -> VkResult<u64> {
let mut value = 0;
self.timeline_semaphore_fn
.get_semaphore_counter_value_khr(device, semaphore, &mut value)
.result_with_success(value)
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkWaitSemaphores.html>"]
pub unsafe fn wait_semaphores(
&self,
device: vk::Device,
wait_info: &vk::SemaphoreWaitInfo,
timeout: u64,
) -> VkResult<()> {
self.timeline_semaphore_fn
.wait_semaphores_khr(device, wait_info, timeout)
.into()
}
#[doc = "<https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkSignalSemaphore.html>"]
pub unsafe fn signal_semaphore(
&self,
device: vk::Device,
signal_info: &vk::SemaphoreSignalInfo,
) -> VkResult<()> {
self.timeline_semaphore_fn
.signal_semaphore_khr(device, signal_info)
.into()
}
pub fn fp(&self) -> &vk::KhrTimelineSemaphoreFn {
&self.timeline_semaphore_fn
}
pub fn instance(&self) -> vk::Instance {
self.handle
}
}
| 29.84507 | 118 | 0.620104 |
26580c393b14cc476b876280785965572dca5415
| 12,611 |
//! It's assumed that the importer is run with the current directory as the project repository; aka
//! `./data/` and `./importer/config` must exist.
// Disable some noisy clippy lints
#![allow(clippy::type_complexity)]
#[macro_use]
extern crate anyhow;
#[macro_use]
extern crate log;
use abstio::{CityName, MapName};
use abstutil::{basename, Timer};
use geom::Distance;
use map_model::RawToMapOptions;
use configuration::{load_configuration, ImporterConfiguration};
mod berlin;
mod configuration;
mod generic;
mod seattle;
mod soundcast;
mod uk;
mod utils;
// TODO Might be cleaner to express as a dependency graph?
#[tokio::main]
async fn main() {
let config: ImporterConfiguration = load_configuration();
let mut args = abstutil::CmdArgs::new();
let opts = RawToMapOptions {
build_ch: !args.enabled("--skip_ch"),
consolidate_all_intersections: args.enabled("--consolidate_all_intersections"),
keep_bldg_tags: args.enabled("--keep_bldg_tags"),
};
if let Some(path) = args.optional("--oneshot") {
let clip = args.optional("--oneshot_clip");
let drive_on_left = args.enabled("--oneshot_drive_on_left");
args.done();
oneshot(path, clip, !drive_on_left, opts);
return;
}
if args.enabled("--regen_all") {
assert!(opts.build_ch);
assert!(!opts.keep_bldg_tags);
let shard_num = args
.optional_parse("--shard_num", |s| s.parse::<usize>())
.unwrap_or(0);
let num_shards = args
.optional_parse("--num_shards", |s| s.parse::<usize>())
.unwrap_or(1);
regenerate_everything(config, shard_num, num_shards).await;
return;
}
// Otherwise, we're just operating on a single city.
let job = Job {
city: match args.optional("--city") {
Some(x) => CityName::parse(&x).unwrap(),
None => CityName::seattle(),
},
// Download all raw input files, then convert OSM to the intermediate RawMap.
osm_to_raw: args.enabled("--raw"),
// Convert the RawMap to the final Map format.
raw_to_map: args.enabled("--map"),
// Download trip demand data, then produce the typical weekday scenario.
scenario: args.enabled("--scenario"),
// Produce a city overview from all of the individual maps in a city.
city_overview: args.enabled("--city_overview"),
// Only process one map. If not specified, process all maps defined by clipping polygons in
// importer/config/$city/.
only_map: args.optional_free(),
};
args.done();
if !job.osm_to_raw && !job.raw_to_map && !job.scenario && !job.city_overview {
println!(
"Nothing to do! Pass some combination of --raw, --map, --scenario, --city_overview, \
or --oneshot"
);
std::process::exit(1);
}
let mut timer = Timer::new("import map data");
job.run(&config, opts, &mut timer).await;
}
async fn regenerate_everything(config: ImporterConfiguration, shard_num: usize, num_shards: usize) {
// Discover all cities by looking at config. But always operate on Seattle first. Special
// treatment ;)
let mut all_cities = CityName::list_all_cities_from_importer_config();
all_cities.retain(|x| x != &CityName::seattle());
all_cities.insert(0, CityName::seattle());
let mut timer = Timer::new("regenerate all maps");
for (cnt, city) in all_cities.into_iter().enumerate() {
let mut job = Job {
city: city.clone(),
osm_to_raw: true,
raw_to_map: true,
scenario: false,
city_overview: false,
only_map: None,
};
// Only some maps run extra tasks
if city == CityName::seattle() || city.country == "gb" {
job.scenario = true;
}
// TODO Autodetect this based on number of maps per city?
if city == CityName::new("gb", "leeds")
|| city == CityName::new("us", "nyc")
|| city == CityName::new("fr", "charleville_mezieres")
|| city == CityName::new("fr", "paris")
|| city == CityName::new("at", "salzburg")
|| city == CityName::new("ir", "tehran")
{
job.city_overview = true;
}
if cnt % num_shards == shard_num {
job.run(&config, RawToMapOptions::default(), &mut timer)
.await;
}
}
}
struct Job {
city: CityName,
osm_to_raw: bool,
raw_to_map: bool,
scenario: bool,
city_overview: bool,
only_map: Option<String>,
}
impl Job {
async fn run(
self,
config: &ImporterConfiguration,
opts: RawToMapOptions,
timer: &mut Timer<'_>,
) {
timer.start(format!("import {}", self.city.describe()));
let names = if let Some(n) = self.only_map {
println!("- Just working on {}", n);
vec![n]
} else {
println!("- Working on all {} maps", self.city.describe());
abstio::list_dir(format!(
"importer/config/{}/{}",
self.city.country, self.city.city
))
.into_iter()
.filter(|path| path.ends_with(".poly"))
.map(basename)
.collect()
};
let (maybe_popdat, maybe_huge_map, maybe_zoning_parcels) = if self.scenario
&& self.city == CityName::seattle()
{
let (popdat, huge_map) = seattle::ensure_popdat_exists(timer, config).await;
// Just assume --raw has been called...
let shapes: kml::ExtraShapes =
abstio::read_binary(CityName::seattle().input_path("zoning_parcels.bin"), timer);
(Some(popdat), Some(huge_map), Some(shapes))
} else {
(None, None, None)
};
for name in names {
if self.osm_to_raw {
// Still special-cased
if self.city == CityName::seattle() {
seattle::osm_to_raw(&name, timer, config).await;
} else {
let raw = match abstio::maybe_read_json::<generic::GenericCityImporter>(
format!(
"importer/config/{}/{}/cfg.json",
self.city.country, self.city.city
),
timer,
) {
Ok(city_cfg) => {
city_cfg
.osm_to_raw(MapName::from_city(&self.city, &name), timer, config)
.await
}
Err(err) => {
panic!("Can't import city {}: {}", self.city.describe(), err);
}
};
if self.city == CityName::new("de", "berlin") {
berlin::import_extra_data(&raw, config, timer).await;
} else if self.city == CityName::new("gb", "leeds") && name == "huge" {
uk::import_collision_data(&raw, config, timer).await;
} else if self.city == CityName::new("gb", "london") {
uk::import_collision_data(&raw, config, timer).await;
}
}
}
let name = MapName::from_city(&self.city, &name);
let mut maybe_map = if self.raw_to_map {
let mut map = utils::raw_to_map(&name, opts.clone(), timer);
// Another strange step in the pipeline.
if name == MapName::new("de", "berlin", "center") {
timer.start(format!(
"distribute residents from planning areas for {}",
name.describe()
));
berlin::distribute_residents(&mut map, timer);
timer.stop(format!(
"distribute residents from planning areas for {}",
name.describe()
));
} else if name.city == CityName::seattle() {
timer.start(format!("add GTFS schedules for {}", name.describe()));
seattle::add_gtfs_schedules(&mut map);
timer.stop(format!("add GTFS schedules for {}", name.describe()));
}
Some(map)
} else if self.scenario {
Some(map_model::Map::load_synchronously(name.path(), timer))
} else {
None
};
if self.scenario {
if self.city == CityName::seattle() {
timer.start(format!("scenario for {}", name.describe()));
let scenario = soundcast::make_weekday_scenario(
maybe_map.as_ref().unwrap(),
maybe_popdat.as_ref().unwrap(),
maybe_huge_map.as_ref().unwrap(),
timer,
);
scenario.save();
timer.stop(format!("scenario for {}", name.describe()));
// This is a strange ordering.
if name.map == "downtown"
|| name.map == "qa"
|| name.map == "south_seattle"
|| name.map == "wallingford"
{
timer.start(format!("adjust parking for {}", name.describe()));
seattle::adjust_private_parking(maybe_map.as_mut().unwrap(), &scenario);
timer.stop(format!("adjust parking for {}", name.describe()));
}
timer.start("match parcels to buildings");
seattle::match_parcels_to_buildings(
maybe_map.as_mut().unwrap(),
maybe_zoning_parcels.as_ref().unwrap(),
timer,
);
timer.stop("match parcels to buildings");
}
if self.city.country == "gb" {
uk::generate_scenario(maybe_map.as_ref().unwrap(), config, timer)
.await
.unwrap();
}
}
}
if self.city_overview {
timer.start(format!(
"generate city overview for {}",
self.city.describe()
));
abstio::write_binary(
abstio::path(format!(
"system/{}/{}/city.bin",
self.city.country, self.city.city
)),
&map_model::City::from_individual_maps(&self.city, timer),
);
timer.stop(format!(
"generate city overview for {}",
self.city.describe()
));
}
timer.stop(format!("import {}", self.city.describe()));
}
}
fn oneshot(osm_path: String, clip: Option<String>, drive_on_right: bool, opts: RawToMapOptions) {
let mut timer = abstutil::Timer::new("oneshot");
println!("- Running convert_osm on {}", osm_path);
let name = abstutil::basename(&osm_path);
let raw = convert_osm::convert(
convert_osm::Options {
osm_input: osm_path,
name: MapName::new("zz", "oneshot", &name),
clip,
map_config: map_model::MapConfig {
driving_side: if drive_on_right {
map_model::DrivingSide::Right
} else {
map_model::DrivingSide::Left
},
bikes_can_use_bus_lanes: true,
inferred_sidewalks: true,
street_parking_spot_length: Distance::meters(8.0),
},
onstreet_parking: convert_osm::OnstreetParking::JustOSM,
public_offstreet_parking: convert_osm::PublicOffstreetParking::None,
private_offstreet_parking: convert_osm::PrivateOffstreetParking::FixedPerBldg(1),
include_railroads: true,
extra_buildings: None,
},
&mut timer,
);
// Often helpful to save intermediate representation in case user wants to load into map_editor
raw.save();
let map = map_model::Map::create_from_raw(raw, opts, &mut timer);
timer.start("save map");
map.save();
timer.stop("save map");
println!("{} has been created", map.get_name().path());
}
| 37.091176 | 100 | 0.515185 |
0e4e6010aa3d6572d182818c6bf4330901e74ea0
| 25,116 |
// Copyright 2022 Matthew Ingwersen.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//! Handling of DNS QUERY messages.
use arrayvec::ArrayVec;
use super::{
generate_error, handle_processing_errors, set_up_response_header, ProcessingError,
ProcessingResult, Response, Server,
};
use crate::class::Class;
use crate::message::{writer, Qclass, Qtype, Question, Rcode, Reader, Writer};
use crate::name::Name;
use crate::rr::{Rdata, Rrset, RrsetList, Ttl, Type};
use crate::zone::{LookupAllResult, LookupResult, Zone};
impl Server {
/// Handles a DNS message with opcode QUERY.
pub(super) fn handle_query(
&self,
mut query: Reader,
response_buf: &mut [u8],
over_tcp: bool,
) -> Response {
// Read the query.
let question = match read_query(&mut query) {
Ok(question) => question,
Err(maybe_question) => {
return generate_error(
&query,
maybe_question.as_ref(),
Rcode::FormErr,
response_buf,
);
}
};
// Currently, the only special QTYPE we handle is * (ANY).
if matches!(
question.qtype,
Qtype::IXFR | Qtype::AXFR | Qtype::MAILB | Qtype::MAILA
) {
return generate_error(&query, Some(&question), Rcode::NotImp, response_buf);
}
// We do not support QCLASS * (ANY).
if question.qclass == Qclass::ANY {
return generate_error(&query, Some(&question), Rcode::NotImp, response_buf);
}
// When we support multiple zones, here is where we will find
// the appropriate zone. But for now, we just make sure that the
// QCLASS matches, and then we try to answer the query. The
// answering logic currently takes care of responding REFUSED if
// it turns out (after calling the Zone lookup methods) that the
// QNAME is not part of the single zone we serve.
if Class::from(question.qclass) != self.zone.class() {
return generate_error(&query, Some(&question), Rcode::Refused, response_buf);
}
match handle_non_axfr_query(&self.zone, query, question, response_buf, over_tcp) {
Some(response_len) => Response::Single(response_len),
None => Response::None,
}
}
}
/// Handles a non-AXFR DNS query. Returns the length of the DNS response
/// written into `response_buf` or `None` if no response should be sent.
fn handle_non_axfr_query<'a>(
zone: &Zone,
query: Reader,
question: Question,
response_buf: &'a mut [u8],
over_tcp: bool,
) -> Option<usize> {
let mut response = Writer::try_from(response_buf).ok()?;
set_up_response_header(&query, &mut response);
response.add_question(&question).ok()?;
let result = if question.qtype == Qtype::ANY {
answer_any(zone, &question.qname, &mut response)
} else {
answer(
zone,
&question.qname,
Type::from(question.qtype),
&mut response,
)
};
handle_processing_errors(result, &mut response, over_tcp);
Some(response.finish())
}
////////////////////////////////////////////////////////////////////////
// QUERY VALIDATION //
////////////////////////////////////////////////////////////////////////
/// Reads a query and returns its question. On error, the question is
/// returned only if it could be read.
fn read_query(query: &mut Reader) -> Result<Question, Option<Question>> {
if query.qdcount() != 1 {
Err(None)
} else if let Ok(question) = query.read_question() {
if query.qr()
|| query.tc()
|| query.ancount() > 0
|| query.nscount() > 0
|| query.arcount() > 0
{
Err(Some(question))
} else {
Ok(question)
}
} else {
Err(None)
}
}
////////////////////////////////////////////////////////////////////////
// ANSWERING LOGIC //
////////////////////////////////////////////////////////////////////////
/// Answers a query for a specific RR type once the appropriate zone to
/// search has been determined.
fn answer(zone: &Zone, qname: &Name, rr_type: Type, response: &mut Writer) -> ProcessingResult<()> {
match zone.lookup(qname, rr_type) {
LookupResult::Found(rrset) => {
response.set_aa(true);
response.add_answer_rrset(qname, rrset)?;
do_additional_section_processing(zone, rrset, response)
}
LookupResult::Cname(cname_rrset) => do_cname(zone, qname, cname_rrset, rr_type, response),
LookupResult::Referral(child_zone, ns_rrset) => {
do_referral(zone, child_zone, ns_rrset, response)
}
LookupResult::NoRecords => {
response.set_aa(true);
add_negative_caching_soa(zone, response)
}
LookupResult::NxDomain => {
response.set_rcode(Rcode::NxDomain);
response.set_aa(true);
add_negative_caching_soa(zone, response)
}
LookupResult::WrongZone => {
response.set_rcode(Rcode::Refused);
Ok(())
}
}
}
/// Answers a query with QTYPE * (ANY) once the appropriate zone to
/// search has been determined.
fn answer_any(zone: &Zone, qname: &Name, response: &mut Writer) -> ProcessingResult<()> {
match zone.lookup_all(qname) {
LookupAllResult::Found(rrsets) => {
response.set_aa(true);
let mut n_added = 0;
for rrset in rrsets.iter() {
response.add_answer_rrset(qname, rrset)?;
n_added += 1;
}
if n_added == 0 {
add_negative_caching_soa(zone, response)?;
}
Ok(())
}
LookupAllResult::Referral(child_zone, ns_rrset) => {
do_referral(zone, child_zone, ns_rrset, response)
}
LookupAllResult::NxDomain => {
response.set_rcode(Rcode::NxDomain);
response.set_aa(true);
add_negative_caching_soa(zone, response)
}
LookupAllResult::WrongZone => {
response.set_rcode(Rcode::Refused);
Ok(())
}
}
}
////////////////////////////////////////////////////////////////////////
// ANSWERING LOGIC - CNAME HANDLING //
////////////////////////////////////////////////////////////////////////
/// The maximum number of links in a CNAME chain that we will follow
/// before giving up and returning SERVFAIL.
///
/// By "links" we mean the number of CNAME records seen before reaching
/// the actual canonical name. For instance, the following answer shows
/// three links by our reckoning:
///
/// ```text
/// a.quandary.test. 3600 IN CNAME b.quandary.test.
/// b.quandary.test. 3600 IN CNAME c.quandary.test.
/// c.quandary.test. 3600 IN CNAME d.quandary.test.
/// d.quandary.test. 3600 IN A 127.0.0.1
/// ```
const MAX_CNAME_CHAIN_LEN: usize = 8;
/// A fixed-capacity vector that can contain up to
/// [`MAX_CNAME_CHAIN_LEN`]−1 domain names.
///
/// As we follow a CNAME chain, we conceptually add the current owner
/// to a list of previously seen owners before we re-run the query with
/// the CNAME as the new owner. Each time we begin the lookup process
/// with a new owner, we check whether it was previously seen, to ensure
/// that there are no loops. Furthermore, in order not to follow more
/// than [`MAX_CNAME_CHAIN_LEN`] links in the chain, this list must not
/// exceed [`MAX_CNAME_CHAIN_LEN`] previous owners.
///
/// The [`PreviousOwners`] type is an [`ArrayVec`] that fulfills this
/// need, with one important difference from the conceptual list in the
/// last paragraph. Since [`do_cname`] is passed the original QNAME as
/// a `&Name`, not a `Box<Name>`, the QNAME (which would be the first
/// element added to the previous owners list) is considered separately.
/// To compensate, [`PreviousOwners`] actually has a capacity of
/// [`MAX_CNAME_CHAIN_LEN`]−1.
type PreviousOwners = ArrayVec<Box<Name>, { MAX_CNAME_CHAIN_LEN - 1 }>;
/// Follows a CNAME chain to produce an answer when there is CNAME RRset
/// present at QNAME.
///
/// At most [`MAX_CNAME_CHAIN_LEN`] links in a CNAME chain will be
/// processed before this gives up and signals to respond with SERVFAIL.
/// Additionally, loops in the chain will be detected and will trigger a
/// SERVFAIL.
fn do_cname(
zone: &Zone,
qname: &Name,
cname_rrset: &Rrset,
rr_type: Type,
response: &mut Writer,
) -> ProcessingResult<()> {
// RFC 6604 § 2.1 reiterates RFC 1035: the AA bit is set based on
// the first owner name in the answer section. Thus, the AA bit
// should be set here.
response.set_aa(true);
response.add_answer_rrset(qname, cname_rrset)?;
follow_cname_1(zone, qname, cname_rrset, rr_type, response, ArrayVec::new())
}
/// Step 1 of the CNAME-following process. This includes parsing a
/// `Box<Name>` from the CNAME RRset and checking that the CNAME has not
/// already been looked up while processing the current chain.
fn follow_cname_1(
zone: &Zone,
qname: &Name,
cname_rrset: &Rrset,
rr_type: Type,
response: &mut Writer,
owners_seen: PreviousOwners,
) -> ProcessingResult<()> {
if let Some(cname) = cname_rrset
.rdatas()
.next()
.map(|rdata| Name::try_from_uncompressed_all(rdata))
.and_then(Result::ok)
{
if cname.as_ref() == qname || owners_seen.contains(&cname) {
// The CNAME chain contains a loop.
Err(ProcessingError::ServFail)
} else {
follow_cname_2(zone, qname, cname, rr_type, response, owners_seen)
}
} else {
Err(ProcessingError::ServFail)
}
}
/// Step 2 of the CNAME-following process. This is the point where we
/// actually re-run the query with the CNAME as the new QNAME.
fn follow_cname_2(
zone: &Zone,
qname: &Name,
cname: Box<Name>,
rr_type: Type,
response: &mut Writer,
mut owners_seen: PreviousOwners,
) -> ProcessingResult<()> {
// NOTE: RFC 1034 § 3.4.2 indicates that we should restart the query
// from the very beginning, even going into other available zones.
// (A possible motivation behind this instruction is the possibility
// that we might provide recursive service.) This is *not* the
// procedure that we follow. Rather, we re-run the query within the
// original QNAME's zone. This appears to be the behavior of some
// other authoritative servers, such as Knot.
//
// There is a good reason for this decision. Even if we did follow
// a CNAME chain into another zone for which we are authoritative,
// resolvers likely don't know that we are also authoritative for
// that other zone. A smart resolver, therefore, won't trust any
// records from the other zone that we might include. (See e.g. the
// scrub_sanitize subroutine in Unbound.)
match zone.lookup(&cname, rr_type) {
LookupResult::Found(rrset) => {
response.add_answer_rrset(&cname, rrset)?;
do_additional_section_processing(zone, rrset, response)
}
LookupResult::Cname(next_cname_rrset) => {
// The CNAME chain continues. If the CNAME chain is getting
// too long, we refuse to go any further; otherwise we
// restart the CNAME-following process with the next CNAME
// in the chain.
if owners_seen.is_full() {
Err(ProcessingError::ServFail)
} else {
response.add_answer_rrset(&cname, next_cname_rrset)?;
owners_seen.push(cname);
follow_cname_1(
zone,
qname,
next_cname_rrset,
rr_type,
response,
owners_seen,
)
}
}
LookupResult::Referral(child_zone, ns_rrset) => {
do_referral(zone, child_zone, ns_rrset, response)
}
// Per RFC 6604 § 3, the RCODE is set based on the last query
// cycle. Therefore, the no-records case should be NOERROR and
// the nonexistent-name case should be NXDOMAIN. Note that this
// seems to be a change from RFC 1034 § 3.4.2, whose step 3(c)
// calls for an authoritative name error (NXDOMAIN) only when
// the failed lookup is for the original QNAME.
LookupResult::NoRecords => add_negative_caching_soa(zone, response),
LookupResult::NxDomain => {
response.set_rcode(Rcode::NxDomain);
add_negative_caching_soa(zone, response)
}
LookupResult::WrongZone => Ok(()),
}
}
////////////////////////////////////////////////////////////////////////
// ANSWERING LOGIC - REFERRAL HANDLING //
////////////////////////////////////////////////////////////////////////
/// Creates a referral response.
///
/// When a lookup would take us out of authoritative data (that is,
/// when we reach a non-apex node with an NS RRset), [RFC 1034 § 4.3.2]
/// instructs us to create a referral response. This involves copying
/// the NS RRset into the authority section and available addresses for
/// the nameservers specified by the NS records into the additional
/// section.
///
/// [RFC 1034 § 4.3.2]: https://datatracker.ietf.org/doc/html/rfc1034#section-4.3.2
fn do_referral(
parent_zone: &Zone,
child_zone: &Name,
ns_rrset: &Rrset,
response: &mut Writer,
) -> ProcessingResult<()> {
response.add_authority_rrset(child_zone, ns_rrset)?;
// Now, we *must* include glue records; otherwise the delgation
// would not work. If glue records do not fit, we fail (and allow
// upstream error-handling code to send a response with the TC bit
// set).
//
// Additionally, we *try* to include any other nameserver addresses
// that are available within the parent zone's hierarchy, including
// so-called "sibling glue" (glue records in *another* zone
// delegated from the same parent zone). If they don't fit, we just
// don't include them.
//
// TODO: the "DNS Referral Glue Requirements" Internet Draft is
// relevant here. Depending on how it turns out, we may need to
// change our policies. The latest draft as of this writing is draft
// 4, in which § 3.2 states that sibling glue is NOT REQUIRED to
// be included if it does not fit—an allowance that we make use of
// here.
let mut glues = Vec::new();
let mut additionals = Vec::new();
for rdata in ns_rrset.rdatas() {
let nsdname = read_name_from_rdata(rdata, 0)?;
if nsdname.eq_or_subdomain_of(child_zone) {
glues.push(nsdname);
} else {
additionals.push(nsdname);
}
}
for nsdname in glues {
add_referral_additional_addresses(parent_zone, &nsdname, response)?;
}
for nsdname in additionals {
execute_allowing_truncation(|| {
add_referral_additional_addresses(parent_zone, &nsdname, response)
})?;
}
Ok(())
}
/// Looks up `name` in `zone`, *including in non-authoritative data*,
/// and adds any address (A or AAAA) RRsets found to the additional
/// section of `response`. Note that, on error, some of the addresses
/// may have been successfully written.
fn add_referral_additional_addresses(
parent_zone: &Zone,
name: &Name,
response: &mut Writer,
) -> writer::Result<()> {
if let LookupAllResult::Found(rrsets) = parent_zone.lookup_all_raw(name, false) {
add_additional_address_rrsets(name, rrsets, response)
} else {
Ok(())
}
}
////////////////////////////////////////////////////////////////////////
// HELPERS - ADDITIONAL SECTION PROCESSING //
////////////////////////////////////////////////////////////////////////
/// Performs "additional section processing," if any, for the given
/// RRset.
///
/// For certain RR types, RFCs 1034 and 1035 call for "additional
/// section processing," in which any available A records for domain
/// names embedded in the RDATA are included in the additional section
/// of the response. The goal is to reduce the number of queries a
/// resolver must make by preemptively including address information
/// that will very likely be needed next. Some later RFCs defining new
/// RR types (e.g. [RFC 2782] for SRV) also ask for this behavior. With
/// the advent of IPv6, [RFC 3596] includes AAAA records for IPv6
/// addresses in additional section processing as well.
///
/// Any address records are considered extra information, and should be
/// omitted if there is insufficent room (see [RFC 2181 § 9]). In
/// practice, some servers have "minimal responses" configuration
/// options that disable additional section processing altogether.
///
/// [RFC 2782]: https://datatracker.ietf.org/doc/html/rfc2782
/// [RFC 2181 § 9]: https://datatracker.ietf.org/doc/html/rfc2181#section-9
fn do_additional_section_processing(
zone: &Zone,
rrset: &Rrset,
response: &mut Writer,
) -> ProcessingResult<()> {
match rrset.rr_type {
Type::MB | Type::MD | Type::MF | Type::NS => {
for rdata in rrset.rdatas() {
let name = read_name_from_rdata(rdata, 0)?;
execute_allowing_truncation(|| add_additional_addresses(zone, &name, response))?;
}
}
Type::MX => {
for rdata in rrset.rdatas() {
let name = read_name_from_rdata(rdata, 2)?;
execute_allowing_truncation(|| add_additional_addresses(zone, &name, response))?;
}
}
Type::SRV => {
for rdata in rrset.rdatas() {
let name = read_name_from_rdata(rdata, 6)?;
execute_allowing_truncation(|| add_additional_addresses(zone, &name, response))?;
}
}
_ => (),
};
Ok(())
}
/// Looks up `name` in `zone` and adds any address (A or AAAA) RRsets
/// found to the additional section of `response`. Note that, on error,
/// some of the addresses may have been successfully written.
fn add_additional_addresses(zone: &Zone, name: &Name, response: &mut Writer) -> writer::Result<()> {
if let LookupAllResult::Found(rrsets) = zone.lookup_all(name) {
add_additional_address_rrsets(name, rrsets, response)
} else {
Ok(())
}
}
////////////////////////////////////////////////////////////////////////
// HELPERS - NEGATIVE CACHING SOA (RFC 2308 § 3) //
////////////////////////////////////////////////////////////////////////
/// Adds the SOA record from `zone` to the authority section of
/// `response` for negative caching [RFC 2308 § 3].
///
/// [RFC 2308 § 3]: https://datatracker.ietf.org/doc/html/rfc2308#section-3
fn add_negative_caching_soa(zone: &Zone, response: &mut Writer) -> ProcessingResult<()> {
// Note that per RFC 2308 § 3, the TTL we are to use is not the TTL
// of the SOA record itself, but rather the the SOA MINIMUM field.
let soa_rrset = zone.soa().ok_or(ProcessingError::ServFail)?;
let soa_rdata = soa_rrset.rdatas().next().ok_or(ProcessingError::ServFail)?;
let ttl = Ttl::from(read_soa_minimum(soa_rdata)?);
response
.add_authority_rr(
zone.name(),
soa_rrset.rr_type,
soa_rrset.class,
ttl,
soa_rdata,
)
.map_err(Into::into)
}
/// Reads the MINIMUM field from the provided SOA RDATA.
fn read_soa_minimum(rdata: &Rdata) -> ProcessingResult<u32> {
let mname_len = Name::validate_uncompressed(rdata).or(Err(ProcessingError::ServFail))?;
let rname_len =
Name::validate_uncompressed(&rdata[mname_len..]).or(Err(ProcessingError::ServFail))?;
let octets = rdata
.get(mname_len + rname_len + 16..)
.ok_or(ProcessingError::ServFail)?;
let array: [u8; 4] = octets.try_into().or(Err(ProcessingError::ServFail))?;
Ok(u32::from_be_bytes(array))
}
////////////////////////////////////////////////////////////////////////
// HELPERS - MISCELLEANEOUS //
////////////////////////////////////////////////////////////////////////
/// A helper that adds A and AAAA RRsets in `rrsets` to the additional
/// section of `response`. On error (including truncation), note that some
/// addresses may have been successfully written.
fn add_additional_address_rrsets(
name: &Name,
rrsets: &RrsetList,
response: &mut Writer,
) -> writer::Result<()> {
for rrset in rrsets.iter() {
if rrset.rr_type == Type::A || rrset.rr_type == Type::AAAA {
response.add_additional_rrset(name, rrset)?;
}
}
Ok(())
}
/// Executes `f`, without returning an error if `f` itself fails with
/// [`writer::Error::Truncation`]. On success, this returns `Ok(true)`
/// if truncation occured and `Ok(false)` if not.
fn execute_allowing_truncation(f: impl FnOnce() -> writer::Result<()>) -> writer::Result<bool> {
match f() {
Err(writer::Error::Truncation) => Ok(true),
result => result.and(Ok(false)),
}
}
/// Reads a serialized domain name from `rdata`, starting at `start` and
/// running to the end of `rdata`.
fn read_name_from_rdata(rdata: &Rdata, start: usize) -> ProcessingResult<Box<Name>> {
rdata
.get(start..)
.map(Name::try_from_uncompressed_all)
.and_then(Result::ok)
.ok_or(ProcessingError::ServFail)
}
////////////////////////////////////////////////////////////////////////
// TESTS //
////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use crate::zone::GluePolicy;
////////////////////////////////////////////////////////////////////
// CNAME-FOLLOWING TESTS //
////////////////////////////////////////////////////////////////////
#[test]
fn cname_handling_rejects_loops() {
let mut zone = new_zone();
add_cname_to_zone(&mut zone, 'a', 'b');
add_cname_to_zone(&mut zone, 'b', 'a');
test_cname(zone, new_name('a'), Err(ProcessingError::ServFail));
}
#[test]
fn cname_handling_allows_almost_too_long_chains() {
let mut zone = new_zone();
make_chain(&mut zone, MAX_CNAME_CHAIN_LEN);
test_cname(zone, new_name('a'), Ok(()));
}
#[test]
fn cname_handling_rejects_long_chains() {
let mut zone = new_zone();
make_chain(&mut zone, MAX_CNAME_CHAIN_LEN + 1);
test_cname(zone, new_name('a'), Err(ProcessingError::ServFail));
}
fn test_cname(zone: Zone, owner: Box<Name>, expected_result: ProcessingResult<()>) {
let mut buf = [0; 512];
let mut writer = Writer::try_from(&mut buf[..]).unwrap();
let cname_rrset = match zone.lookup(&owner, Type::CNAME) {
LookupResult::Found(cname_rrset) => cname_rrset,
_ => panic!(),
};
assert_eq!(
do_cname(&zone, &owner, cname_rrset, Type::A, &mut writer),
expected_result
);
}
fn make_chain(zone: &mut Zone, len: usize) {
let owners = ('a'..'z').collect::<Vec<char>>();
for i in 0..len {
add_cname_to_zone(zone, owners[i], owners[i + 1]);
}
}
fn new_zone() -> Zone {
let apex: Box<Name> = "quandary.test.".parse().unwrap();
let rdata = <&Rdata>::try_from(&[0; 22]).unwrap();
let mut zone = Zone::new(apex.clone(), Class::IN, GluePolicy::Narrow);
zone.add(&apex, Type::SOA, Class::IN, Ttl::from(0), rdata)
.unwrap();
zone
}
fn new_name(owner: char) -> Box<Name> {
(owner.to_string() + ".quandary.test.").parse().unwrap()
}
fn add_cname_to_zone(zone: &mut Zone, owner: char, target: char) {
let owner = new_name(owner);
let target = new_name(target);
let rdata = <&Rdata>::try_from(target.wire_repr()).unwrap();
zone.add(&owner, Type::CNAME, Class::IN, Ttl::from(0), rdata)
.unwrap();
}
}
| 38.939535 | 100 | 0.58405 |
e8ad58c616877e348202a57180c13bec68b60f99
| 11,055 |
use chrono::{DateTime, Local};
use regex::Regex;
use siphasher::sip::SipHasher13;
use std::cmp::Ordering;
use std::fmt;
use std::hash::{Hash, Hasher};
use crate::error::Kind;
use crate::{AsyncMigrate, Error, Migrate};
// regex used to match file names
pub fn file_match_re() -> Regex {
Regex::new(r"^(V)(\d+(?:\.\d+)?)__(\w+)").unwrap()
}
lazy_static::lazy_static! {
static ref RE: regex::Regex = file_match_re();
}
/// An enum set that represents the type of the Migration, at the moment only Versioned is supported
#[derive(Clone, Debug)]
enum Type {
Versioned,
}
/// An enum set that represents the target version up to which refinery should migrate, it is used by [Runner]
#[derive(Clone, Copy)]
pub enum Target {
Latest,
Version(u32),
}
// an Enum set that represents the state of the migration: Applied on the database,
// or Unapplied yet to be applied on the database
#[derive(Clone, Debug)]
enum State {
Applied,
Unapplied,
}
/// Represents a schema migration to be run on the database,
/// this struct is used by the [`embed_migrations!`] and [`include_migration_mods!`] macros to gather migration files
/// and shouldn't be needed by the user
///
/// [`embed_migrations!`]: macro.embed_migrations.html
/// [`include_migration_mods!`]: macro.include_migration_mods.html
#[derive(Clone, Debug)]
pub struct Migration {
state: State,
name: String,
checksum: u64,
version: i32,
prefix: Type,
sql: Option<String>,
applied_on: Option<DateTime<Local>>,
}
impl Migration {
/// Create an unapplied migration, name and version are parsed from the input_name,
/// which must be named in the format V{1}__{2}.rs where {1} represents the migration version and {2} the name.
pub fn unapplied(input_name: &str, sql: &str) -> Result<Migration, Error> {
let captures = RE
.captures(input_name)
.filter(|caps| caps.len() == 4)
.ok_or_else(|| Error::new(Kind::InvalidName, None))?;
let version: i32 = captures[2]
.parse()
.map_err(|_| Error::new(Kind::InvalidVersion, None))?;
let name: String = (&captures[3]).into();
let prefix = match &captures[1] {
"V" => Type::Versioned,
_ => unreachable!(),
};
// Previously, `std::collections::hash_map::DefaultHasher` was used
// to calculate the checksum and the implementation at that time
// was SipHasher13. However, that implementation is not guaranteed:
// > The internal algorithm is not specified, and so it and its
// > hashes should not be relied upon over releases.
// We now explicitly use SipHasher13 to both remain compatible with
// existing migrations and prevent breaking from possible future
// changes to `DefaultHasher`.
let mut hasher = SipHasher13::new();
name.hash(&mut hasher);
version.hash(&mut hasher);
sql.hash(&mut hasher);
let checksum = hasher.finish();
Ok(Migration {
state: State::Unapplied,
name,
version,
prefix,
sql: Some(sql.into()),
applied_on: None,
checksum,
})
}
// Create a migration from an applied migration on the database
pub(crate) fn applied(
version: i32,
name: String,
applied_on: DateTime<Local>,
checksum: u64,
) -> Migration {
Migration {
state: State::Applied,
name,
checksum,
version,
// applied migrations are always versioned
prefix: Type::Versioned,
sql: None,
applied_on: Some(applied_on),
}
}
// convert the Unapplied into an Applied Migration
pub(crate) fn set_applied(&mut self) {
self.applied_on = Some(Local::now());
self.state = State::Applied;
}
// Get migration sql content
pub(crate) fn sql(&self) -> Option<&str> {
self.sql.as_deref()
}
/// Get the Migration version
pub fn version(&self) -> u32 {
self.version as u32
}
/// Get the Migration Name
pub fn name(&self) -> &str {
&self.name
}
/// Get the Migration Name
pub fn applied_on(&self) -> Option<&DateTime<Local>> {
self.applied_on.as_ref()
}
/// Get the Migration checksum. Checksum is formed from the name version and sql of the Migration
pub fn checksum(&self) -> u64 {
self.checksum
}
}
impl fmt::Display for Migration {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "V{}__{}", self.version, self.name)
}
}
impl Eq for Migration {}
impl PartialEq for Migration {
fn eq(&self, other: &Migration) -> bool {
self.version == other.version
&& self.name == other.name
&& self.checksum() == other.checksum()
}
}
impl Ord for Migration {
fn cmp(&self, other: &Migration) -> Ordering {
self.version.cmp(&other.version)
}
}
impl PartialOrd for Migration {
fn partial_cmp(&self, other: &Migration) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// Struct that represents the report of the migration cycle,
/// a `Report` instance is returned by the [`Runner::run`] and [`Runner::run_async`] methods
/// via [`Result`]`<Report, Error>`, on case of an [`Error`] during a migration, you can acess the `Report` with [`Error.report`]
///
/// [`Error`]: struct.Error.html
/// [`Runner::run`]: struct.Runner.html#method.run
/// [`Runner::run_async`]: struct.Runner.html#method.run_async
/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
/// [`Error.report`]: struct.Error.html#method.report
#[derive(Clone, Debug)]
pub struct Report {
applied_migrations: Vec<Migration>,
}
impl Report {
/// Instantiate a new Report
pub(crate) fn new(applied_migrations: Vec<Migration>) -> Report {
Report { applied_migrations }
}
/// Retrieves the list of applied `Migration` of the migration cycle
pub fn applied_migrations(&self) -> &Vec<Migration> {
&self.applied_migrations
}
}
/// Struct that represents the entrypoint to run the migrations,
/// an instance of this struct is returned by the [`embed_migrations!`] and [`include_migration_mods!`] macros.
/// `Runner` should not need to be instantiated manually
///
/// [`embed_migrations!`]: macro.embed_migrations.html
/// [`include_migration_mods!`]: macro.include_migration_mods.html
pub struct Runner {
grouped: bool,
abort_divergent: bool,
abort_missing: bool,
migrations: Vec<Migration>,
target: Target,
}
impl Runner {
/// instantiate a new Runner
pub fn new(migrations: &[Migration]) -> Runner {
Runner {
grouped: false,
target: Target::Latest,
abort_divergent: true,
abort_missing: true,
migrations: migrations.to_vec(),
}
}
/// Get the gathered migrations.
pub fn get_migrations(&self) -> &Vec<Migration> {
&self.migrations
}
/// Set the target version up to which refinery should migrate, Latest migrates to the latest version available
/// Version migrates to a user provided version, a Version with a higher version than the latest will be ignored.
/// by default this is set to Latest
pub fn set_target(self, target: Target) -> Runner {
Runner { target, ..self }
}
/// Set true if all migrations should be grouped and run in a single transaction.
/// by default this is set to false, each migration runs on their own transaction
///
/// # Note
///
/// set_grouped won't probbaly work on MySQL Databases as MySQL lacks support for transactions around schema alteration operations,
/// meaning that if a migration fails to apply you will have to manually unpick the changes in order to try again (it’s impossible to roll back to an earlier point).
pub fn set_grouped(self, grouped: bool) -> Runner {
Runner { grouped, ..self }
}
/// Set true if migration process should abort if divergent migrations are found
/// i.e. applied migrations with the same version but different name or checksum from the ones on the filesystem.
/// by default this is set to true
pub fn set_abort_divergent(self, abort_divergent: bool) -> Runner {
Runner {
abort_divergent,
..self
}
}
/// Set true if migration process should abort if missing migrations are found
/// i.e. applied migrations that are not found on the filesystem,
/// or migrations found on filesystem with a version inferior to the last one applied but not applied.
/// by default this is set to true
pub fn set_abort_missing(self, abort_divergent: bool) -> Runner {
Runner {
abort_divergent,
..self
}
}
/// Queries the database for the last applied migration, returns None if there aren't applied Migrations
pub fn get_last_applied_migration<'a, C>(
&self,
conn: &'a mut C,
) -> Result<Option<Migration>, Error>
where
C: Migrate,
{
Migrate::get_last_applied_migration(conn)
}
/// Queries the database asychronously for the last applied migration, returns None if there aren't applied Migrations
pub async fn get_last_applied_migration_async<C>(
&self,
conn: &mut C,
) -> Result<Option<Migration>, Error>
where
C: AsyncMigrate + Send,
{
AsyncMigrate::get_last_applied_migration(conn).await
}
/// Queries the database for all previous applied migrations
pub fn get_applied_migrations<'a, C>(&self, conn: &'a mut C) -> Result<Vec<Migration>, Error>
where
C: Migrate,
{
Migrate::get_applied_migrations(conn)
}
/// Queries the database asynchronously for all previous applied migrations
pub async fn get_applied_migrations_async<C>(
&self,
conn: &mut C,
) -> Result<Vec<Migration>, Error>
where
C: AsyncMigrate + Send,
{
AsyncMigrate::get_applied_migrations(conn).await
}
/// Runs the Migrations in the supplied database connection
pub fn run<'a, C>(&self, conn: &'a mut C) -> Result<Report, Error>
where
C: Migrate,
{
Migrate::migrate(
conn,
&self.migrations,
self.abort_divergent,
self.abort_missing,
self.grouped,
self.target,
)
}
/// Runs the Migrations asynchronously in the supplied database connection
pub async fn run_async<C>(&self, conn: &mut C) -> Result<Report, Error>
where
C: AsyncMigrate + Send,
{
AsyncMigrate::migrate(
conn,
&self.migrations,
self.abort_divergent,
self.abort_missing,
self.grouped,
self.target,
)
.await
}
}
| 31.676218 | 169 | 0.623971 |
cc4233510fa3ef35abea9aa09c55c337fbf24a45
| 238 |
use std::path::PathBuf;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Ut1Error {
#[error("{0} is not a directory.")]
NotADirectory(PathBuf),
#[error("No blocklist named {0} found")]
BlocklistNotFound(PathBuf),
}
| 21.636364 | 44 | 0.659664 |
50a36c38b4bac88bea011f7865f6aed615ce678a
| 1,267 |
use crate::sys;
/// Specifies the type of recurrent neural network used.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum RnnMode {
/// A single-gate recurrent neural network with a ReLU activation function.
RnnReLu,
/// A single-gate recurrent neural network with a tanh activation function.
RnnTanh,
/// A four-gate Long Short-Term Memory (LSTM) network with no peephole connections.
Lstm,
/// A three-gate network consisting of Gated Recurrent Units.
Gru,
}
impl From<sys::cudnnRNNMode_t> for RnnMode {
fn from(raw: sys::cudnnRNNMode_t) -> Self {
match raw {
sys::cudnnRNNMode_t::CUDNN_RNN_RELU => Self::RnnReLu,
sys::cudnnRNNMode_t::CUDNN_RNN_TANH => Self::RnnTanh,
sys::cudnnRNNMode_t::CUDNN_LSTM => Self::Lstm,
sys::cudnnRNNMode_t::CUDNN_GRU => Self::Gru,
}
}
}
impl From<RnnMode> for sys::cudnnRNNMode_t {
fn from(mode: RnnMode) -> Self {
match mode {
RnnMode::RnnReLu => sys::cudnnRNNMode_t::CUDNN_RNN_RELU,
RnnMode::RnnTanh => sys::cudnnRNNMode_t::CUDNN_RNN_TANH,
RnnMode::Lstm => sys::cudnnRNNMode_t::CUDNN_LSTM,
RnnMode::Gru => sys::cudnnRNNMode_t::CUDNN_GRU,
}
}
}
| 34.243243 | 87 | 0.640095 |
1dbdc5e4a5004d8bf49744ebf2c218eebe9b90a5
| 731 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(alloc_jemalloc, alloc_system)]
#[cfg(not(any(target_env = "msvc", target_os = "bitrig")))]
extern crate alloc_jemalloc;
#[cfg(any(target_env = "msvc", target_os = "bitrig"))]
extern crate alloc_system;
fn main() {
println!("{:?}", Box::new(3));
}
| 34.809524 | 68 | 0.71409 |
031dd41ebae7320ef11a15397d071233fb3c5449
| 8,507 |
use ::nes_rom::ines;
use super::*;
use super::ppu;
pub enum PrgBankMode {
Switch32K,
FixFirst,
FixLast,
}
pub enum ChrBankMode {
Switch8K,
Switch4K,
}
pub struct Mapper1 {
pub context: Context,
pub prg_bank_mode: PrgBankMode,
pub chr_bank_mode: ChrBankMode,
pub shift_register: u8,
pub shift_count: u8,
pub cpu_cycle: u64,
pub last_write_cpu_cycle: u64,
pub ram_enable: bool,
pub uses_chr_ram: bool,
}
impl Mapper1 {
pub fn new() -> Mapper1 {
Mapper1 {
context: Context::new(),
prg_bank_mode: PrgBankMode::FixLast,
chr_bank_mode: ChrBankMode::Switch8K,
shift_register: 0,
shift_count: 0,
cpu_cycle: 0,
last_write_cpu_cycle: 0,
ram_enable: false,
uses_chr_ram: false,
}
}
pub fn from_ines(rom: &ines::Ines) -> Mapper1 {
let mut mapper1 = Mapper1::new();
mapper1.context.prg_rom = rom.prg_data.clone();
mapper1.context.chr = rom.chr_data.clone();
mapper1.context.prg_ram = vec![0; SIZE_8K];
if mapper1.context.chr.len() == 0 {
// set chr ram
mapper1.context.chr = vec![0; SIZE_8K];
mapper1.uses_chr_ram = true;
}
mapper1.context.prg_addr_mapper.set_banking_region(0, 0, SIZE_16K);
mapper1.context.prg_addr_mapper.set_banking_region_to_last_bank(0, SIZE_16K, mapper1.context.prg_rom.len());
mapper1.context.wram_addr_mapper.set_banking_region(0, 0, SIZE_8K);
mapper1.context.chr_addr_mapper.set_banking_region(0, 0, SIZE_8K);
set_nametable_from_mirroring_type(&mut mapper1.context, rom.nametable_mirroring);
mapper1
}
pub fn clear_shift(&mut self) {
self.shift_register = 0;
self.shift_count = 0;
}
pub fn ctrl_handler(&mut self, data: u8) {
// mirroring
match data & 0x03 {
0 => set_nametable_single_screen_lower(&mut self.context),
1 => set_nametable_single_screen_upper(&mut self.context),
2 => set_nametable_vertical(&mut self.context),
3 => set_nametable_horizontal(&mut self.context),
_ => panic!("mapper 1 mirroring out of bounds")
}
// prg bank mode
match (data & 0x0C) >> 2 {
0 | 1 => { self.prg_bank_mode = PrgBankMode::Switch32K; }
2 => { self.prg_bank_mode = PrgBankMode::FixFirst; }
3 => { self.prg_bank_mode = PrgBankMode::FixLast; }
_ => panic!("mapper 1 prg bank mode out of bounds")
}
match (data & 0x10) >> 4 {
0 => { self.chr_bank_mode = ChrBankMode::Switch8K; }
1 => { self.chr_bank_mode = ChrBankMode::Switch4K; }
_ => panic!("mapper 1 chr bank mode out of bounds")
}
}
pub fn chr_bank0_handler(&mut self, data: u8) {
match self.chr_bank_mode {
ChrBankMode::Switch8K => {
self.context.chr_addr_mapper.set_banking_region(0, (data >> 1) as usize, SIZE_8K);
}
ChrBankMode::Switch4K => {
self.context.chr_addr_mapper.set_banking_region(0, data as usize, SIZE_4K);
}
}
}
pub fn chr_bank1_handler(&mut self, data: u8) {
match self.chr_bank_mode {
ChrBankMode::Switch8K => {
// ignored in 8kb mode
}
ChrBankMode::Switch4K => {
self.context.chr_addr_mapper.set_banking_region(1, data as usize, SIZE_4K);
}
}
}
pub fn prg_bank_handler(&mut self, data: u8) {
let bank = (data & 0x0F) as usize;
let ram_enable = data & 0x10;
match self.prg_bank_mode {
PrgBankMode::Switch32K => {
self.context.prg_addr_mapper.set_banking_region(0, bank >> 1, SIZE_32K);
}
PrgBankMode::FixFirst => {
self.context.prg_addr_mapper.set_banking_region(0, 0, SIZE_16K);
self.context.prg_addr_mapper.set_banking_region(1, bank, SIZE_16K);
}
PrgBankMode::FixLast => {
self.context.prg_addr_mapper.set_banking_region(0, bank, SIZE_16K);
self.context.prg_addr_mapper.set_banking_region_to_last_bank(1, SIZE_16K, self.context.prg_rom.len());
}
}
if ram_enable == 0x10 { self.ram_enable = true; }
}
pub fn write_handler(&mut self, pinout: mos::Pinout) {
if (pinout.data & 0x80) > 0 {
self.clear_shift();
}
else {
self.shift_register = (self.shift_register << 1) | (pinout.data & 0x01);
self.shift_count += 1;
}
// every fifth write
if self.shift_count == 5 {
let reg_index = ((pinout.address & 0x6000) >> 13) as u8 & 0x0F;
match reg_index {
0 => { self.ctrl_handler(self.shift_register); }
1 => { self.chr_bank0_handler(self.shift_register); }
2 => { self.chr_bank1_handler(self.shift_register); }
3 => { self.prg_bank_handler(self.shift_register); }
_ => panic!("mmc1 register out of bounds")
}
self.clear_shift();
}
}
}
impl Mapper for Mapper1 {
// cpu
fn read_cpu_internal_ram(&mut self, mut pinout: mos::Pinout) -> mos::Pinout {
pinout.data = self.context.sys_ram[(pinout.address & 0x7FF) as usize];
pinout
}
fn read_cpu_exp(&mut self, pinout: mos::Pinout) -> mos::Pinout {
// open bus
pinout
}
fn read_cpu_wram(&mut self, mut pinout: mos::Pinout) -> mos::Pinout {
if self.ram_enable {
let internal_address = self.context.wram_addr_mapper.translate_address(pinout.address);
pinout.data = self.context.prg_ram[internal_address as usize];
}
pinout
}
fn read_cpu_prg(&mut self, mut pinout: mos::Pinout) -> mos::Pinout {
let internal_address = self.context.prg_addr_mapper.translate_address(pinout.address);
pinout.data = self.context.prg_rom[internal_address as usize];
pinout
}
fn write_cpu_internal_ram(&mut self, pinout: mos::Pinout) -> mos::Pinout {
self.context.sys_ram[(pinout.address & 0x7FF) as usize] = pinout.data;
pinout
}
fn write_cpu_exp(&mut self, pinout: mos::Pinout) -> mos::Pinout {
// open bus
pinout
}
fn write_cpu_wram(&mut self, pinout: mos::Pinout) -> mos::Pinout {
if self.ram_enable {
let internal_address = self.context.wram_addr_mapper.translate_address(pinout.address);
self.context.prg_ram[internal_address as usize] = pinout.data;
}
pinout
}
fn write_cpu_prg(&mut self, pinout: mos::Pinout) -> mos::Pinout {
if self.cpu_cycle > self.last_write_cpu_cycle {
self.write_handler(pinout);
}
pinout
}
// ppu
fn read_ppu_chr(&mut self, mut pinout: ppu::Pinout) -> ppu::Pinout {
let internal_address = self.context.chr_addr_mapper.translate_address(pinout.address);
pinout.data = self.context.chr[internal_address as usize];
pinout
}
fn read_ppu_nt(&mut self, mut pinout: ppu::Pinout) -> ppu::Pinout {
let internal_address = self.context.nt_addr_mapper.translate_address(pinout.address & 0x2fff);
pinout.data = self.context.vram[internal_address as usize];
pinout
}
fn write_ppu_chr(&mut self, pinout: ppu::Pinout) -> ppu::Pinout {
if self.uses_chr_ram {
let internal_address = self.context.chr_addr_mapper.translate_address(pinout.address);
self.context.chr[internal_address as usize] = pinout.data;
}
pinout
}
fn write_ppu_nt(&mut self, pinout: ppu::Pinout) -> ppu::Pinout {
let internal_address = self.context.nt_addr_mapper.translate_address(pinout.address & 0x2fff);
self.context.vram[internal_address as usize] =pinout.data;
pinout
}
fn cpu_tick(&mut self, pinout: mos::Pinout) -> mos::Pinout {
self.cpu_cycle += 1;
if !pinout.ctrl.contains(mos::Ctrl::RW) {
//check if this is a write cycle
self.last_write_cpu_cycle = self.cpu_cycle;
}
pinout
}
fn ppu_tick(&mut self, pinout: ppu::Pinout) -> ppu::Pinout {
pinout
}
}
| 31.981203 | 118 | 0.589867 |
08979fe7d745e77979f0b4d237ade6d3568cd40a
| 52,914 |
use super::*;
use crate::{
actors::{Actor, ActorPath, Dispatcher, DynActorRef, SystemPath, Transport},
component::{Component, ComponentContext, ExecuteResult},
};
use std::{pin::Pin, sync::Arc};
use crate::{
actors::{NamedPath, Transport::Tcp},
messaging::{
ActorRegistration,
DispatchData,
DispatchEnvelope,
EventEnvelope,
MsgEnvelope,
NetMessage,
PathResolvable,
PolicyRegistration,
RegistrationEnvelope,
RegistrationError,
RegistrationEvent,
RegistrationPromise,
},
net::{
buffers::*,
events::NetworkEvent,
ConnectionState,
NetworkBridgeErr,
Protocol,
SocketAddr,
},
timer::timer_manager::Timer,
};
use arc_swap::ArcSwap;
use futures::{
self,
task::{Context, Poll},
};
use lookup::{ActorLookup, ActorStore, InsertResult, LookupResult};
use queue_manager::QueueManager;
use rustc_hash::FxHashMap;
use std::{collections::VecDeque, net::IpAddr, time::Duration};
pub mod lookup;
pub mod queue_manager;
// Default values for network config.
const RETRY_CONNECTIONS_INTERVAL: u64 = 5000;
const BOOT_TIMEOUT: u64 = 5000;
const MAX_RETRY_ATTEMPTS: u8 = 10;
type NetHashMap<K, V> = FxHashMap<K, V>;
/// Configuration builder for the network dispatcher
///
/// # Example
///
/// This example binds to local host on a free port chosen by the operating system.
///
/// ```
/// use kompact::prelude::*;
///
/// let mut conf = KompactConfig::default();
/// conf.system_components(DeadletterBox::new, NetworkConfig::default().build());
/// let system = conf.build().expect("system");
/// # system.shutdown().expect("shutdown");
/// ```
#[derive(Clone, Debug)]
pub struct NetworkConfig {
addr: SocketAddr,
transport: Transport,
buffer_config: BufferConfig,
custom_allocator: Option<Arc<dyn ChunkAllocator>>,
tcp_nodelay: bool,
max_connection_retry_attempts: u8,
connection_retry_interval: u64,
boot_timeout: u64,
}
impl NetworkConfig {
/// Create a new config with `addr` and protocol [TCP](Transport::Tcp)
/// NetworkDispatcher and NetworkThread will use the default `BufferConfig`
pub fn new(addr: SocketAddr) -> Self {
NetworkConfig {
addr,
transport: Transport::Tcp,
buffer_config: BufferConfig::default(),
custom_allocator: None,
tcp_nodelay: true,
max_connection_retry_attempts: MAX_RETRY_ATTEMPTS,
connection_retry_interval: RETRY_CONNECTIONS_INTERVAL,
boot_timeout: BOOT_TIMEOUT,
}
}
/// Create a new config with `addr` and protocol [TCP](Transport::Tcp)
/// Note: Only the NetworkThread and NetworkDispatcher will use the `BufferConfig`, not Actors
pub fn with_buffer_config(addr: SocketAddr, buffer_config: BufferConfig) -> Self {
buffer_config.validate();
let mut cfg = NetworkConfig::new(addr);
cfg.set_buffer_config(buffer_config);
cfg
}
/// Create a new config with `addr` and protocol [TCP](Transport::Tcp)
/// Note: Only the NetworkThread and NetworkDispatcher will use the `BufferConfig`, not Actors
pub fn with_custom_allocator(
addr: SocketAddr,
buffer_config: BufferConfig,
custom_allocator: Arc<dyn ChunkAllocator>,
) -> Self {
buffer_config.validate();
NetworkConfig {
addr,
transport: Transport::Tcp,
buffer_config,
custom_allocator: Some(custom_allocator),
tcp_nodelay: true,
max_connection_retry_attempts: MAX_RETRY_ATTEMPTS,
connection_retry_interval: RETRY_CONNECTIONS_INTERVAL,
boot_timeout: BOOT_TIMEOUT,
}
}
/// Replace the current socket address with `addr`.
pub fn with_socket(mut self, addr: SocketAddr) -> Self {
self.addr = addr;
self
}
/// Complete the configuration and provide a function that produces a network dispatcher
///
/// Returns the appropriate function type for use
/// with [system_components](KompactConfig::system_components).
pub fn build(self) -> impl Fn(KPromise<()>) -> NetworkDispatcher {
move |notify_ready| NetworkDispatcher::with_config(self.clone(), notify_ready)
}
/// Returns a pointer to the configurations [BufferConfig](net::buffers::BufferConfig).
pub fn get_buffer_config(&self) -> &BufferConfig {
&self.buffer_config
}
/// Sets the configurations [BufferConfig](net::buffers::BufferConfig) to `buffer_config`
pub fn set_buffer_config(&mut self, buffer_config: BufferConfig) -> () {
self.buffer_config = buffer_config;
}
/// Returns a pointer to the `CustomAllocator` option so that it can be cloned by the caller.
pub fn get_custom_allocator(&self) -> &Option<Arc<dyn ChunkAllocator>> {
&self.custom_allocator
}
/// Reads the `tcp_nodelay` parameter of the [NetworkConfig](NetworkConfig).
pub fn get_tcp_nodelay(&self) -> bool {
self.tcp_nodelay
}
/// If set to `true` the Nagle algorithm will be turned off for all Tcp Network-channels.
///
/// Decreases network-latency at the cost of reduced throughput and increased congestion.
///
/// Default value is `false`, i.e. the Nagle algorithm is turned on by default.
pub fn set_tcp_nodelay(&mut self, nodelay: bool) {
self.tcp_nodelay = nodelay;
}
/// Configures how many attempts at re-establishing a connection will be made before giving up
/// and discarding the enqueued outgoing messages.
///
/// Default value is 10 times.
pub fn set_max_connection_retry_attempts(&mut self, count: u8) {
self.max_connection_retry_attempts = count;
}
/// Returns the number of times the system will retry before giving up on a connection.
pub fn get_max_connection_retry_attempts(&self) -> u8 {
self.max_connection_retry_attempts
}
/// Configures how long to wait (in ms) between attempts at establishing a connection.
///
/// Default value is 5000 ms.
pub fn set_connection_retry_interval(&mut self, milliseconds: u64) {
self.connection_retry_interval = milliseconds;
}
/// How long (in ms) the system will wait between attempts at re-establishing connection.
pub fn get_connection_retry_interval(&self) -> u64 {
self.connection_retry_interval
}
/// Configures how long the system will wait (in ms) for the network layer to set-up
///
/// Default value is 5000 ms.
pub fn set_boot_timeout(&mut self, milliseconds: u64) {
self.boot_timeout = milliseconds;
}
/// How long (in ms) the system will wait (in ms) for the network layer to set-up
pub fn get_boot_timeout(&self) -> u64 {
self.boot_timeout
}
}
/// Socket defaults to `127.0.0.1:0` (i.e. a random local port) and protocol is [TCP](Transport::Tcp)
impl Default for NetworkConfig {
fn default() -> Self {
NetworkConfig {
addr: "127.0.0.1:0".parse().unwrap(),
transport: Transport::Tcp,
buffer_config: BufferConfig::default(),
custom_allocator: None,
tcp_nodelay: true,
max_connection_retry_attempts: MAX_RETRY_ATTEMPTS,
connection_retry_interval: RETRY_CONNECTIONS_INTERVAL,
boot_timeout: BOOT_TIMEOUT,
}
}
}
/// A port providing `NetworkStatusUpdates´ to listeners.
pub struct NetworkStatusPort;
impl Port for NetworkStatusPort {
type Indication = NetworkStatus;
type Request = NetworkStatusRequest;
}
/// Information regarding changes to the systems connections to remote systems
#[derive(Clone, Debug)]
pub enum NetworkStatus {
/// Indicates that a connection has been established to the remote system
ConnectionEstablished(SystemPath),
/// Indicates that a connection has been lost to the remote system.
/// The system will automatically try to recover the connection for a configurable amount of
/// retries. The end of the automatic retries is signalled by a `ConnectionDropped` message.
ConnectionLost(SystemPath),
/// Indicates that a connection has been dropped and no more automatic retries to re-establish
/// the connection will be attempted and all queued messages have been dropped.
ConnectionDropped(SystemPath),
/// Indicates that a connection has been gracefully closed.
ConnectionClosed(SystemPath),
/// Indicates that a system has been blocked
BlockedSystem(SystemPath),
/// Indicates that an IpAddr has been blocked
BlockedIp(IpAddr),
/// Indicates that a system has been allowed after previously being blocked
UnblockedSystem(SystemPath),
/// Indicates that an IpAddr has been allowed after previously being blocked
UnblockedIp(IpAddr),
}
/// Sent by Actors and Components to request information about the Network
#[derive(Clone, Debug)]
pub enum NetworkStatusRequest {
/// Request that the connection to the given address is gracefully closed.
DisconnectSystem(SystemPath),
/// Request that a connection is established to the given System.
ConnectSystem(SystemPath),
/// Request that a SystemPath to be blocked from this system. An established connection
/// will be dropped and future attempts to establish a connection by that given SystemPath
/// will be denied.
BlockSystem(SystemPath),
/// Request an IpAddr to be blocked.
BlockIp(IpAddr),
/// Request a System to be allowed after previously being blocked
UnblockSystem(SystemPath),
/// Request an IpAddr to be allowed after previously being blocked
UnblockIp(IpAddr),
}
/// A network-capable dispatcher for sending messages to remote actors
///
/// Construct this using [NetworkConfig](NetworkConfig::build).
///
/// This dispatcher automatically creates channels to requested target
/// systems on demand and maintains them while in use.
///
/// The current implementation only supports [TCP](Transport::Tcp) as
/// a transport protocol.
///
/// If possible, this implementation will "reflect" messages
/// to local actors directly back up, instead of serialising them first.
#[derive(ComponentDefinition)]
pub struct NetworkDispatcher {
ctx: ComponentContext<NetworkDispatcher>,
/// Local map of connection statuses
connections: NetHashMap<SocketAddr, ConnectionState>,
/// Network configuration for this dispatcher
cfg: NetworkConfig,
/// Shared lookup structure for mapping [actor paths](ActorPath) and [actor refs](ActorRef)
lookup: Arc<ArcSwap<ActorStore>>,
// Fields initialized at [Start](ControlEvent::Start) – they require ComponentContextual awareness
/// Bridge into asynchronous networking layer
net_bridge: Option<net::Bridge>,
/// A cached version of the bound system path
system_path: Option<SystemPath>,
/// Management for queuing Frames during network unavailability (conn. init. and MPSC unreadiness)
queue_manager: QueueManager,
/// Reaper which cleans up deregistered actor references in the actor lookup table
reaper: lookup::gc::ActorRefReaper,
notify_ready: Option<KPromise<()>>,
/// Stores the number of retry-attempts for connections. Checked and incremented periodically by the reaper.
retry_map: FxHashMap<SocketAddr, u8>,
garbage_buffers: VecDeque<BufferChunk>,
/// The dispatcher emits NetworkStatusUpdates to the `NetworkStatusPort`.
network_status_port: ProvidedPort<NetworkStatusPort>,
}
impl NetworkDispatcher {
/// Create a new dispatcher with the default configuration
///
/// See also [NetworkConfig](NetworkConfig).
///
/// # Example
///
/// This example binds to local host on a free port chosen by the operating system.
///
/// ```
/// use kompact::prelude::*;
///
/// let mut conf = KompactConfig::default();
/// conf.system_components(DeadletterBox::new, NetworkDispatcher::new);
/// let system = conf.build().expect("system");
/// # system.shutdown().expect("shutdown");
/// ```
pub fn new(notify_ready: KPromise<()>) -> Self {
let config = NetworkConfig::default();
NetworkDispatcher::with_config(config, notify_ready)
}
/// Create a new dispatcher with the given configuration
///
/// For better readability in combination with [system_components](KompactConfig::system_components),
/// use [NetworkConfig::build](NetworkConfig::build) instead.
pub fn with_config(cfg: NetworkConfig, notify_ready: KPromise<()>) -> Self {
let lookup = Arc::new(ArcSwap::from_pointee(ActorStore::new()));
// Just a temporary assignment...will be replaced from config on start
let reaper = lookup::gc::ActorRefReaper::default();
NetworkDispatcher {
ctx: ComponentContext::uninitialised(),
connections: Default::default(),
cfg,
lookup,
net_bridge: None,
system_path: None,
queue_manager: QueueManager::new(),
reaper,
notify_ready: Some(notify_ready),
garbage_buffers: VecDeque::new(),
retry_map: Default::default(),
network_status_port: ProvidedPort::uninitialised(),
}
}
/// Return a reference to the cached system path
///
/// Mutable, since it will update the cached value, if necessary.
pub fn system_path_ref(&mut self) -> &SystemPath {
match self.system_path {
Some(ref path) => path,
None => {
let _ = self.system_path(); // just to fill the cache
if let Some(ref path) = self.system_path {
path
} else {
unreachable!(
"Cached value should have been filled by calling self.system_path()!"
);
}
}
}
}
fn start(&mut self) -> () {
debug!(self.ctx.log(), "Starting self and network bridge");
self.reaper = lookup::gc::ActorRefReaper::from_config(self.ctx.config());
let dispatcher = self
.actor_ref()
.hold()
.expect("Self can hardly be deallocated!");
let bridge_logger = self.ctx.log().new(o!("owner" => "Bridge"));
let network_thread_logger = self.ctx.log().new(o!("owner" => "NetworkThread"));
let (mut bridge, _addr) = net::Bridge::new(
self.lookup.clone(),
network_thread_logger,
bridge_logger,
self.cfg.addr,
dispatcher.clone(),
&self.cfg,
);
let deadletter: DynActorRef = self.ctx.system().deadletter_ref().dyn_ref();
self.lookup.rcu(|current| {
let mut next = ActorStore::clone(¤t);
next.insert(PathResolvable::System, deadletter.clone())
.expect("Deadletter shouldn't error");
next
});
bridge.set_dispatcher(dispatcher);
self.schedule_retries();
self.net_bridge = Some(bridge);
}
fn stop(&mut self) -> () {
if let Some(bridge) = self.net_bridge.take() {
if let Err(e) = bridge.stop() {
error!(
self.ctx().log(),
"NetworkBridge did not shut down as expected! Error was:\n {:?}\n", e
);
}
}
}
fn kill(&mut self) -> () {
if let Some(bridge) = self.net_bridge.take() {
if let Err(e) = bridge.kill() {
error!(
self.ctx().log(),
"NetworkBridge did not shut down as expected! Error was:\n {:?}\n", e
);
}
}
}
fn schedule_reaper(&mut self) {
if !self.reaper.is_scheduled() {
// First time running; mark as scheduled and jump straight to scheduling
self.reaper.schedule();
} else {
// Repeated schedule; prune deallocated ActorRefs and update strategy accordingly
let num_reaped = self.reaper.run(&self.lookup);
if num_reaped == 0 {
// No work done; slow down interval
self.reaper.strategy_mut().incr();
} else {
self.reaper.strategy_mut().decr();
}
}
let next_wakeup = self.reaper.strategy().curr();
debug!(
self.ctx().log(),
"Scheduling reaping at {:?}ms", next_wakeup
);
let mut retry_queue = VecDeque::new();
for mut trash in self.garbage_buffers.drain(..) {
if !trash.free() {
retry_queue.push_back(trash);
}
}
// info!(self.ctx().log(), "tried to clean {} buffer(s)", retry_queue.len()); // manual verification in testing
self.garbage_buffers.append(&mut retry_queue);
self.schedule_once(Duration::from_millis(next_wakeup), move |target, _id| {
target.schedule_reaper();
Handled::Ok
});
}
fn schedule_retries(&mut self) {
// First check the retry_map if we should re-request connections
let drain = self.retry_map.clone();
self.retry_map.clear();
for (addr, retry) in drain {
if retry < self.cfg.max_connection_retry_attempts {
// Make sure we will re-request connection later
self.retry_map.insert(addr, retry + 1);
if let Some(bridge) = &self.net_bridge {
// Do connection attempt
debug!(
self.ctx().log(),
"Dispatcher retrying connection to host {}, attempt {}/{}",
addr,
retry,
self.cfg.max_connection_retry_attempts
);
bridge.connect(Transport::Tcp, addr).unwrap();
}
} else {
// Too many retries, give up on the connection.
info!(
self.ctx().log(),
"Dispatcher giving up on remote host {}, dropping queues", addr
);
self.queue_manager.drop_queue(&addr);
self.connections.remove(&addr);
self.network_status_port
.trigger(NetworkStatus::ConnectionDropped(SystemPath::with_socket(
Transport::Tcp,
addr,
)));
}
}
self.schedule_once(
Duration::from_millis(self.cfg.connection_retry_interval),
move |target, _id| {
target.schedule_retries();
Handled::Ok
},
);
}
fn on_event(&mut self, ev: EventEnvelope) {
match ev {
EventEnvelope::Network(ev) => match ev {
NetworkEvent::Connection(addr, conn_state) => {
if let Err(e) = self.on_conn_state(addr, conn_state) {
error!(
self.ctx().log(),
"Error while connecting to {}, \n{:?}", addr, e
)
}
}
NetworkEvent::Data(_) => {
// TODO shouldn't be receiving these here, as they should be routed directly to the ActorRef
debug!(self.ctx().log(), "Received important data!");
}
NetworkEvent::RejectedData(addr, data) => {
// These are messages which we routed to a network-thread before they lost the connection.
self.queue_manager.enqueue_priority_data(data, addr);
}
NetworkEvent::BlockedSocket(socket_addr, trigger_status_port) => {
let sys_path = SystemPath::new(Tcp, socket_addr.ip(), socket_addr.port());
self.connections
.insert(socket_addr, ConnectionState::Blocked);
if trigger_status_port {
self.network_status_port
.trigger(NetworkStatus::BlockedSystem(sys_path));
}
}
NetworkEvent::BlockedIp(ip_addr) => {
self.network_status_port
.trigger(NetworkStatus::BlockedIp(ip_addr));
}
NetworkEvent::UnblockedSocket(socket_addr, trigger_status_port) => {
let sys_path = SystemPath::new(Tcp, socket_addr.ip(), socket_addr.port());
self.connections
.insert(socket_addr, ConnectionState::Closed);
if trigger_status_port {
self.network_status_port
.trigger(NetworkStatus::UnblockedSystem(sys_path));
}
}
NetworkEvent::UnblockedIp(ip_addr) => {
self.network_status_port
.trigger(NetworkStatus::UnblockedIp(ip_addr));
}
},
}
}
fn on_conn_state(
&mut self,
addr: SocketAddr,
state: ConnectionState,
) -> Result<(), NetworkBridgeErr> {
use self::ConnectionState::*;
match state {
Connected => {
info!(
self.ctx().log(),
"registering newly connected conn at {:?}", addr
);
self.network_status_port
.trigger(NetworkStatus::ConnectionEstablished(
SystemPath::with_socket(Transport::Tcp, addr),
));
let _ = self.retry_map.remove(&addr);
if self.queue_manager.has_data(&addr) {
// Drain as much as possible
while let Some(frame) = self.queue_manager.pop_data(&addr) {
if let Some(bridge) = &self.net_bridge {
//println!("Sending queued frame to newly established connection");
bridge.route(addr, frame, net::Protocol::Tcp)?;
}
}
}
}
Closed => {
self.network_status_port
.trigger(NetworkStatus::ConnectionClosed(SystemPath::with_socket(
Transport::Tcp,
addr,
)));
// Ack the closing
if let Some(bridge) = &self.net_bridge {
bridge.ack_closed(addr)?;
}
}
Lost => {
if self.retry_map.get(&addr).is_none() {
warn!(self.ctx().log(), "connection lost to {:?}", addr);
self.retry_map.insert(addr, 0); // Make sure we try to re-establish the connection
}
self.network_status_port
.trigger(NetworkStatus::ConnectionLost(SystemPath::with_socket(
Transport::Tcp,
addr,
)));
if let Some(bridge) = &self.net_bridge {
bridge.ack_closed(addr)?;
}
}
ref _other => (), // Don't care
}
self.connections.insert(addr, state);
Ok(())
}
/// Forwards `msg` up to a local `dst` actor, if it exists.
fn route_local(&mut self, dst: ActorPath, msg: DispatchData) -> () {
let lookup = self.lookup.load();
let lookup_result = lookup.get_by_actor_path(&dst);
match msg.into_local() {
Ok(netmsg) => match lookup_result {
LookupResult::Ref(actor) => {
actor.enqueue(netmsg);
}
LookupResult::Group(group) => {
group.route(netmsg, self.log());
}
LookupResult::None => {
error!(
self.ctx.log(),
"No local actor found at {:?}. Forwarding to DeadletterBox",
netmsg.receiver,
);
self.ctx.deadletter_ref().enqueue(MsgEnvelope::Net(netmsg));
}
LookupResult::Err(e) => {
error!(
self.ctx.log(),
"An error occurred during local actor lookup at {:?}. Forwarding to DeadletterBox. The error was: {}",
netmsg.receiver,
e
);
self.ctx.deadletter_ref().enqueue(MsgEnvelope::Net(netmsg));
}
},
Err(e) => {
error!(self.log(), "Could not serialise msg: {:?}. Dropping...", e);
}
}
}
fn route_remote_udp(
&mut self,
addr: SocketAddr,
data: DispatchData,
) -> Result<(), NetworkBridgeErr> {
if let Some(bridge) = &self.net_bridge {
bridge.route(addr, data, net::Protocol::Udp)?;
} else {
warn!(
self.ctx.log(),
"Dropping UDP message to {}, as bridge is not connected.", addr
);
}
Ok(())
}
fn route_remote_tcp(
&mut self,
addr: SocketAddr,
data: DispatchData,
) -> Result<(), NetworkBridgeErr> {
let state: &mut ConnectionState =
self.connections.entry(addr).or_insert(ConnectionState::New);
let next: Option<ConnectionState> = match *state {
ConnectionState::New => {
debug!(
self.ctx.log(),
"No connection found; establishing and queuing frame"
);
self.queue_manager.enqueue_data(data, addr);
if let Some(ref mut bridge) = self.net_bridge {
debug!(self.ctx.log(), "Establishing new connection to {:?}", addr);
self.retry_map.insert(addr, 0); // Make sure we will re-request connection later
bridge.connect(Transport::Tcp, addr).unwrap();
Some(ConnectionState::Initializing)
} else {
error!(self.ctx.log(), "No network bridge found; dropping message");
Some(ConnectionState::Closed)
}
}
ConnectionState::Connected => {
if self.queue_manager.has_data(&addr) {
self.queue_manager.enqueue_data(data, addr);
if let Some(bridge) = &self.net_bridge {
while let Some(queued_data) = self.queue_manager.pop_data(&addr) {
bridge.route(addr, queued_data, net::Protocol::Tcp)?;
}
}
None
} else {
// Send frame
if let Some(bridge) = &self.net_bridge {
bridge.route(addr, data, net::Protocol::Tcp)?;
}
None
}
}
ConnectionState::Initializing => {
self.queue_manager.enqueue_data(data, addr);
None
}
ConnectionState::Closed => {
self.queue_manager.enqueue_data(data, addr);
if let Some(bridge) = &self.net_bridge {
bridge.connect(Tcp, addr)?;
}
Some(ConnectionState::Initializing)
}
ConnectionState::Lost => {
// May be recovered...
self.queue_manager.enqueue_data(data, addr);
None
}
ConnectionState::Blocked => {
warn!(
self.ctx.log(),
"Tried sending a message to a blocked connection: {:?}. Dropping message.",
addr
);
None
}
};
if let Some(next) = next {
*state = next;
}
Ok(())
}
fn resolve_path(&mut self, resolvable: &PathResolvable) -> Result<ActorPath, PathParseError> {
match resolvable {
PathResolvable::Path(actor_path) => Ok(actor_path.clone()),
PathResolvable::Alias(alias) => self
.system_path()
.into_named_with_string(alias)
.map(|p| p.into()),
PathResolvable::Segments(segments) => self
.system_path()
.into_named_with_vec(segments.to_vec())
.map(|p| p.into()),
PathResolvable::ActorId(id) => Ok(self.system_path().into_unique(*id).into()),
PathResolvable::System => Ok(self.deadletter_path()),
}
}
/// Forwards `msg` to destination described by `dst`, routing it across the network
/// if needed.
fn route(&mut self, dst: ActorPath, msg: DispatchData) -> Result<(), NetworkBridgeErr> {
if self.system_path_ref() == dst.system() {
self.route_local(dst, msg);
Ok(())
} else {
let proto = dst.system().protocol();
match proto {
Transport::Local => {
self.route_local(dst, msg);
Ok(())
}
Transport::Tcp => {
let addr = SocketAddr::new(*dst.address(), dst.port());
self.route_remote_tcp(addr, msg)
}
Transport::Udp => {
let addr = SocketAddr::new(*dst.address(), dst.port());
self.route_remote_udp(addr, msg)
}
}
}
}
fn deadletter_path(&mut self) -> ActorPath {
ActorPath::Named(NamedPath::with_system(self.system_path(), Vec::new()))
}
fn register_actor(
&mut self,
registration: ActorRegistration,
update: bool,
promise: RegistrationPromise,
) {
let ActorRegistration { actor, path } = registration;
let res = self
.resolve_path(&path)
.map_err(RegistrationError::InvalidPath)
.and_then(|ap| {
let lease = self.lookup.load();
if lease.contains(&path) && !update {
warn!(
self.ctx.log(),
"Detected duplicate path during registration. The path will not be re-registered"
);
drop(lease);
Err(RegistrationError::DuplicateEntry)
} else {
drop(lease);
let mut result: Result<InsertResult, PathParseError> = Ok(InsertResult::None);
self.lookup.rcu(|current| {
let mut next = ActorStore::clone(¤t);
result = next.insert(path.clone(), actor.clone());
next
});
if let Ok(ref res) = result {
if !res.is_empty() {
info!(self.ctx.log(), "Replaced entry for path={:?}", path);
}
}
result.map(|_| ap)
.map_err(RegistrationError::InvalidPath)
}
});
if res.is_ok() && !self.reaper.is_scheduled() {
self.schedule_reaper();
}
debug!(self.log(), "Completed actor registration with {:?}", res);
match promise {
RegistrationPromise::Fulfil(promise) => {
promise.fulfil(res).unwrap_or_else(|e| {
error!(self.ctx.log(), "Could not notify listeners: {:?}", e)
});
}
RegistrationPromise::None => (), // ignore
}
}
fn register_policy(
&mut self,
registration: PolicyRegistration,
update: bool,
promise: RegistrationPromise,
) {
let PolicyRegistration { policy, path } = registration;
let lease = self.lookup.load();
let path_res = PathResolvable::Segments(path);
let res = self
.resolve_path(&path_res)
.map_err(RegistrationError::InvalidPath)
.and_then(|ap| {
if lease.contains(&path_res) && !update {
warn!(
self.ctx.log(),
"Detected duplicate path during registration. The path will not be re-registered",
);
drop(lease);
Err(RegistrationError::DuplicateEntry)
} else {
drop(lease);
//let PathResolvable::Segments(path) = path_res;
// This should work, we just assigned it above,
// but the Rust compiler can't figure that out
let_irrefutable!(path, PathResolvable::Segments(path) = path_res);
let mut result: Result<InsertResult, PathParseError> = Ok(InsertResult::None);
self.lookup.rcu(|current| {
let mut next = ActorStore::clone(¤t);
result = next.set_routing_policy(&path, policy.clone());
next
});
if let Ok(ref res) = result {
if !res.is_empty() {
info!(self.ctx.log(), "Replaced entry for path={:?}", path);
}
}
result.map(|_| ap).map_err(RegistrationError::InvalidPath)
}
});
debug!(self.log(), "Completed policy registration with {:?}", res);
match promise {
RegistrationPromise::Fulfil(promise) => {
promise.fulfil(res).unwrap_or_else(|e| {
error!(self.ctx.log(), "Could not notify listeners: {:?}", e)
});
}
RegistrationPromise::None => (), // ignore
}
}
fn close_channel(&mut self, addr: SocketAddr) -> () {
if let Some(state) = self.connections.get_mut(&addr) {
match state {
ConnectionState::Connected => {
trace!(
self.ctx.log(),
"Closing channel to connected system {}",
addr
);
if let Some(bridge) = &self.net_bridge {
while self.queue_manager.has_data(&addr) {
if let Some(data) = self.queue_manager.pop_data(&addr) {
if let Err(e) = bridge.route(addr, data, Protocol::Tcp) {
error!(self.ctx.log(), "Bridge error while routing {:?}", e);
}
}
}
if let Err(e) = bridge.close_channel(addr) {
error!(self.ctx.log(), "Bridge error closing channel {:?}", e);
}
}
}
_ => {
warn!(
self.ctx.log(),
"Trying to close channel to a system which is not connected {}", addr
);
}
}
} else {
warn!(self.ctx.log(), "Closing channel to unknown system {}", addr);
}
}
}
impl Actor for NetworkDispatcher {
type Message = DispatchEnvelope;
fn receive_local(&mut self, msg: Self::Message) -> Handled {
match msg {
DispatchEnvelope::Msg { src: _, dst, msg } => {
if let Err(e) = self.route(dst, msg) {
error!(self.ctx.log(), "Failed to route message: {:?}", e);
};
}
DispatchEnvelope::ForwardedMsg { msg } => {
// Look up destination (local or remote), then route or err
if let Err(e) = self.route(msg.receiver.clone(), DispatchData::NetMessage(msg)) {
error!(self.ctx.log(), "Failed to route message: {:?}", e);
};
}
DispatchEnvelope::Registration(reg) => {
trace!(self.log(), "Got registration request: {:?}", reg);
let RegistrationEnvelope {
event,
update,
promise,
} = reg;
match event {
RegistrationEvent::Actor(rea) => self.register_actor(rea, update, promise),
RegistrationEvent::Policy(rep) => self.register_policy(rep, update, promise),
}
}
DispatchEnvelope::Event(ev) => self.on_event(ev),
DispatchEnvelope::LockedChunk(trash) => self.garbage_buffers.push_back(trash),
}
Handled::Ok
}
fn receive_network(&mut self, msg: NetMessage) -> Handled {
warn!(self.ctx.log(), "Received network message: {:?}", msg,);
Handled::Ok
}
}
impl Dispatcher for NetworkDispatcher {
/// Generates a [SystemPath](SystemPath) from this dispatcher's configuration
///
/// This is only possible after the socket is bound and will panic if attempted earlier!
fn system_path(&mut self) -> SystemPath {
match self.system_path {
Some(ref path) => path.clone(),
None => {
let bound_addr = match self.net_bridge {
Some(ref net_bridge) => net_bridge.local_addr().clone().expect("If net bridge is ready, port should be as well!"),
None => panic!("You must wait until the socket is bound before attempting to create a system path!"),
};
let sp = SystemPath::new(self.cfg.transport, bound_addr.ip(), bound_addr.port());
self.system_path = Some(sp.clone());
sp
}
}
}
fn network_status_port(&mut self) -> &mut ProvidedPort<NetworkStatusPort> {
&mut self.network_status_port
}
}
impl ComponentLifecycle for NetworkDispatcher {
fn on_start(&mut self) -> Handled {
info!(self.ctx.log(), "Starting network...");
self.start();
info!(self.ctx.log(), "Started network just fine.");
if let Some(promise) = self.notify_ready.take() {
promise
.complete()
.unwrap_or_else(|e| error!(self.ctx.log(), "Could not start network! {:?}", e))
}
Handled::Ok
}
fn on_stop(&mut self) -> Handled {
info!(self.ctx.log(), "Stopping network...");
self.stop();
info!(self.ctx.log(), "Stopped network.");
Handled::Ok
}
fn on_kill(&mut self) -> Handled {
info!(self.ctx.log(), "Killing network...");
self.kill();
info!(self.ctx.log(), "Killed network.");
Handled::Ok
}
}
impl Provide<NetworkStatusPort> for NetworkDispatcher {
fn handle(&mut self, event: <NetworkStatusPort as Port>::Request) -> Handled {
debug!(
self.ctx.log(),
"Received NetworkStatusPort Request {:?}", event
);
match event {
NetworkStatusRequest::DisconnectSystem(system_path) => {
self.close_channel(system_path.socket_address());
}
NetworkStatusRequest::ConnectSystem(system_path) => {
if let Some(bridge) = &self.net_bridge {
bridge
.connect(system_path.protocol(), system_path.socket_address())
.unwrap();
}
}
NetworkStatusRequest::BlockIp(ip_addr) => {
debug!(self.ctx.log(), "Got BlockIp: {:?}", ip_addr);
if let Some(bridge) = &self.net_bridge {
bridge.block_ip(ip_addr).unwrap();
}
}
NetworkStatusRequest::BlockSystem(system_path) => {
debug!(self.ctx.log(), "Got BlockSystem: {:?}", system_path);
if let Some(bridge) = &self.net_bridge {
bridge.block_socket(system_path.socket_address()).unwrap();
}
}
NetworkStatusRequest::UnblockIp(ip_addr) => {
debug!(self.ctx.log(), "Got UnblockIp: {:?}", ip_addr);
if let Some(bridge) = &self.net_bridge {
bridge.unblock_ip(ip_addr).unwrap();
}
}
NetworkStatusRequest::UnblockSystem(system_path) => {
debug!(self.ctx.log(), "Got UnblockSystem: {:?}", system_path);
if let Some(bridge) = &self.net_bridge {
bridge.unblock_socket(system_path.socket_address()).unwrap();
}
}
}
Handled::Ok
}
}
/// Helper for forwarding [MsgEnvelope]s to actor references
impl futures::sink::Sink<DispatchEnvelope> for ActorRefStrong<DispatchEnvelope> {
type Error = ();
fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn start_send(self: Pin<&mut Self>, item: DispatchEnvelope) -> Result<(), Self::Error> {
self.tell(item);
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}
/// Helper for forwarding [MsgEnvelope]s to actor references
impl futures::sink::Sink<NetMessage> for DynActorRef {
type Error = ();
fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn start_send(self: Pin<&mut Self>, item: NetMessage) -> Result<(), Self::Error> {
DynActorRef::enqueue(&self.as_ref(), item);
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}
#[cfg(test)]
mod tests {
use super::{super::*, *};
use crate::prelude_test::net_test_helpers::{PingerAct, PongerAct};
use std::{thread, time::Duration};
/*
// replace ignore with panic cfg gate when https://github.com/rust-lang/rust/pull/74754 is merged
#[test]
#[ignore]
#[should_panic(expected = "KompactSystem: Poisoned")]
fn failed_network() {
let mut cfg = KompactConfig::new();
println!("Configuring network");
cfg.system_components(DeadletterBox::new, {
// shouldn't be able to bind on port 80 without root rights
let net_config =
NetworkConfig::new("127.0.0.1:80".parse().expect("Address should work"));
net_config.build()
});
println!("Starting KompactSystem");
let system = cfg.build().expect("KompactSystem");
thread::sleep(Duration::from_secs(1));
//unreachable!("System should not start correctly! {:?}", system.label());
println!("KompactSystem started just fine.");
let named_path = ActorPath::Named(NamedPath::with_system(
system.system_path(),
vec!["test".into()],
));
println!("Got path: {}", named_path);
}
*/
#[test]
fn network_cleanup() {
let mut cfg = KompactConfig::default();
println!("Configuring network");
cfg.system_components(DeadletterBox::new, {
let net_config =
NetworkConfig::new("127.0.0.1:0".parse().expect("Address should work"));
net_config.build()
});
println!("Starting KompactSystem");
let system = cfg.build().expect("KompactSystem");
println!("KompactSystem started just fine.");
let named_path = ActorPath::Named(NamedPath::with_system(
system.system_path(),
vec!["test".into()],
));
println!("Got path: {}", named_path);
let port = system.system_path().port();
println!("Got port: {}", port);
println!("Shutting down first system...");
system
.shutdown()
.expect("KompactSystem failed to shut down!");
println!("System shut down.");
let mut cfg2 = KompactConfig::default();
println!("Configuring network");
cfg2.system_components(DeadletterBox::new, {
let net_config =
NetworkConfig::new(SocketAddr::new("127.0.0.1".parse().unwrap(), port));
net_config.build()
});
println!("Starting 2nd KompactSystem");
let system2 = cfg2.build().expect("KompactSystem");
thread::sleep(Duration::from_millis(100));
println!("2nd KompactSystem started just fine.");
let named_path2 = ActorPath::Named(NamedPath::with_system(
system2.system_path(),
vec!["test".into()],
));
println!("Got path: {}", named_path);
assert_eq!(named_path, named_path2);
system2
.shutdown()
.expect("2nd KompactSystem failed to shut down!");
}
/// This is similar to network_cleanup test that will trigger a failed binding.
/// The retry should occur when system2 is building and should succeed after system1 is killed.
#[test]
fn network_cleanup_with_timeout() {
let mut cfg = KompactConfig::default();
println!("Configuring network");
cfg.system_components(DeadletterBox::new, {
let net_config =
NetworkConfig::new("127.0.0.1:0".parse().expect("Address should work"));
net_config.build()
});
println!("Starting KompactSystem");
let system = cfg.build().expect("KompactSystem");
println!("KompactSystem started just fine.");
let named_path = ActorPath::Named(NamedPath::with_system(
system.system_path(),
vec!["test".into()],
));
println!("Got path: {}", named_path);
let port = system.system_path().port();
println!("Got port: {}", port);
thread::Builder::new()
.name("System1 Killer".to_string())
.spawn(move || {
thread::sleep(Duration::from_millis(100));
println!("Shutting down first system...");
system
.shutdown()
.expect("KompactSystem failed to shut down!");
println!("System shut down.");
})
.ok();
let mut cfg2 = KompactConfig::default();
println!("Configuring network");
cfg2.system_components(DeadletterBox::new, {
let net_config =
NetworkConfig::new(SocketAddr::new("127.0.0.1".parse().unwrap(), port));
net_config.build()
});
println!("Starting 2nd KompactSystem");
let system2 = cfg2.build().expect("KompactSystem");
thread::sleep(Duration::from_millis(100));
println!("2nd KompactSystem started just fine.");
let named_path2 = ActorPath::Named(NamedPath::with_system(
system2.system_path(),
vec!["test".into()],
));
println!("Got path: {}", named_path);
assert_eq!(named_path, named_path2);
system2
.shutdown()
.expect("2nd KompactSystem failed to shut down!");
}
#[test]
fn test_system_path_timing() {
let mut cfg = KompactConfig::default();
println!("Configuring network");
cfg.system_components(DeadletterBox::new, NetworkConfig::default().build());
println!("Starting KompactSystem");
let system = cfg.build().expect("KompactSystem");
println!("KompactSystem started just fine.");
let named_path = ActorPath::Named(NamedPath::with_system(
system.system_path(),
vec!["test".into()],
));
println!("Got path: {}", named_path);
// if nothing panics the test succeeds
}
#[test]
// Identical with `remote_lost_and_continued_connection` up to the final sleep time and assertion
// system1 times out in its reconnection attempts and drops the enqueued buffers.
// After indirectly asserting that the queue was dropped we start up a new pinger, and assert that it succeeds.
fn cleanup_bufferchunks_from_dead_actors() {
let system1 = || {
let mut cfg = KompactConfig::default();
cfg.system_components(
DeadletterBox::new,
NetworkConfig::new("127.0.0.1:0".parse().expect("Address should work")).build(),
);
cfg.build().expect("KompactSystem")
};
let system2 = |port| {
let mut cfg = KompactConfig::default();
cfg.system_components(
DeadletterBox::new,
NetworkConfig::new(SocketAddr::new("127.0.0.1".parse().unwrap(), port)).build(),
);
cfg.build().expect("KompactSystem")
};
// Set-up system2a
let system2a = system2(0);
let port = system2a.system_path().port();
//let (ponger_unique, pouf) = remote.create_and_register(PongerAct::new);
let (ponger_named, ponf) = system2a.create_and_register(PongerAct::new_lazy);
let poaf = system2a.register_by_alias(&ponger_named, "custom_name");
ponf.wait_expect(Duration::from_millis(1000), "Ponger failed to register!");
poaf.wait_expect(Duration::from_millis(1000), "Ponger failed to register!");
let named_path = ActorPath::Named(NamedPath::with_system(
system2a.system_path(),
vec!["custom_name".into()],
));
let named_path_clone = named_path;
// Set-up system1
let system1: KompactSystem = system1();
let (pinger_named, pinf) =
system1.create_and_register(move || PingerAct::new_eager(named_path_clone));
pinf.wait_expect(Duration::from_millis(1000), "Pinger failed to register!");
// Kill system2a
system2a.shutdown().ok();
// Start system1
system1.start(&pinger_named);
// Wait for the pings to be sent from the actor to the NetworkDispatch and onto the Thread
thread::sleep(Duration::from_millis(100));
// Kill the actor and wait for its BufferChunk to reach the NetworkDispatch and let the reaping try at least once
system1.kill(pinger_named);
// TODO no sleeps!
thread::sleep(Duration::from_millis(5000));
// Assertion 1: The Network_Dispatcher on system1 has >0 buffers to cleanup
let mut garbage_len = 0;
let sc: &dyn SystemComponents = system1.get_system_components();
if let Some(cc) = sc.downcast::<CustomComponents<DeadletterBox, NetworkDispatcher>>() {
garbage_len = cc.dispatcher.on_definition(|nd| nd.garbage_buffers.len());
}
assert_ne!(0, garbage_len);
// Start up system2b
println!("Setting up system2b");
let system2b = system2(port);
let (ponger_named, ponf) = system2b.create_and_register(PongerAct::new_lazy);
let poaf = system2b.register_by_alias(&ponger_named, "custom_name");
ponf.wait_expect(Duration::from_millis(1000), "Ponger failed to register!");
poaf.wait_expect(Duration::from_millis(1000), "Ponger failed to register!");
println!("Starting actor on system2b");
system2b.start(&ponger_named);
// We give the connection plenty of time to re-establish and transfer it's old queue and cleanup the BufferChunk
// TODO no sleeps!
thread::sleep(Duration::from_millis(10000));
// Assertion 2: The Network_Dispatcher on system1 now has 0 buffers to cleanup.
if let Some(cc) = sc.downcast::<CustomComponents<DeadletterBox, NetworkDispatcher>>() {
garbage_len = cc.dispatcher.on_definition(|nd| nd.garbage_buffers.len());
}
assert_eq!(0, garbage_len);
system1
.shutdown()
.expect("Kompact didn't shut down properly");
system2b
.shutdown()
.expect("Kompact didn't shut down properly");
}
}
| 39.341264 | 134 | 0.551291 |
e2bccef03241b58748175c2bbaddfa6d77dd32f5
| 378 |
#[allow(dead_code)]
enum SpecialPoint {
Point(i32, i32),
Special(String),
}
fn main() {
let sp = SpecialPoint::Point(0, 0);
match sp {
SpecialPoint::Point(x, y) => {
println!("I'am SpecialPoint(x={}, y={})", x, y);
}
SpecialPoint::Special(why) => {
println!("I'am Special because I am {}", why);
}
}
}
| 22.235294 | 60 | 0.497354 |
5dd6bca8ec8079bac73a8e88de808a0bb5d65311
| 10,125 |
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use futures::{stream, Future, Stream};
use tempfile::Builder;
use uuid::Uuid;
use grpcio::{ChannelBuilder, Environment, Result, WriteFlags};
use kvproto::import_sstpb::*;
use kvproto::kvrpcpb::*;
use kvproto::tikvpb::*;
use pd_client::PdClient;
use test_raftstore::*;
use test_sst_importer::*;
use tikv_util::HandyRwLock;
const CLEANUP_SST_MILLIS: u64 = 10;
fn new_cluster() -> (Cluster<ServerCluster>, Context) {
let count = 1;
let mut cluster = new_server_cluster(0, count);
let cleanup_interval = Duration::from_millis(CLEANUP_SST_MILLIS);
cluster.cfg.raft_store.cleanup_import_sst_interval.0 = cleanup_interval;
cluster.run();
let region_id = 1;
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
(cluster, ctx)
}
fn new_cluster_and_tikv_import_client(
) -> (Cluster<ServerCluster>, Context, TikvClient, ImportSstClient) {
let (cluster, ctx) = new_cluster();
let ch = {
let env = Arc::new(Environment::new(1));
let node = ctx.get_peer().get_store_id();
ChannelBuilder::new(env).connect(cluster.sim.rl().get_addr(node))
};
let tikv = TikvClient::new(ch.clone());
let import = ImportSstClient::new(ch);
(cluster, ctx, tikv, import)
}
#[test]
fn test_upload_sst() {
let (_cluster, ctx, _, import) = new_cluster_and_tikv_import_client();
let data = vec![1; 1024];
let crc32 = calc_data_crc32(&data);
let length = data.len() as u64;
// Mismatch crc32
let meta = new_sst_meta(0, length);
assert!(send_upload_sst(&import, &meta, &data).is_err());
// Mismatch length
let meta = new_sst_meta(crc32, 0);
assert!(send_upload_sst(&import, &meta, &data).is_err());
let mut meta = new_sst_meta(crc32, length);
meta.set_region_id(ctx.get_region_id());
meta.set_region_epoch(ctx.get_region_epoch().clone());
send_upload_sst(&import, &meta, &data).unwrap();
// Can't upload the same uuid file again.
assert!(send_upload_sst(&import, &meta, &data).is_err());
}
#[test]
fn test_ingest_sst() {
let (_cluster, ctx, _tikv, import) = new_cluster_and_tikv_import_client();
let temp_dir = Builder::new().prefix("test_ingest_sst").tempdir().unwrap();
let sst_path = temp_dir.path().join("test.sst");
let sst_range = (0, 100);
let (mut meta, data) = gen_sst_file(sst_path, sst_range);
// No region id and epoch.
send_upload_sst(&import, &meta, &data).unwrap();
let mut ingest = IngestRequest::default();
ingest.set_context(ctx.clone());
ingest.set_sst(meta.clone());
let resp = import.ingest(&ingest).unwrap();
assert!(resp.has_error());
// Set region id and epoch.
meta.set_region_id(ctx.get_region_id());
meta.set_region_epoch(ctx.get_region_epoch().clone());
send_upload_sst(&import, &meta, &data).unwrap();
// Can't upload the same file again.
assert!(send_upload_sst(&import, &meta, &data).is_err());
ingest.set_sst(meta.clone());
let resp = import.ingest(&ingest).unwrap();
assert!(!resp.has_error());
}
#[test]
fn test_ingest_sst_without_crc32() {
let (_cluster, ctx, tikv, import) = new_cluster_and_tikv_import_client();
let temp_dir = Builder::new()
.prefix("test_ingest_sst_without_crc32")
.tempdir()
.unwrap();
let sst_path = temp_dir.path().join("test.sst");
let sst_range = (0, 100);
let (mut meta, data) = gen_sst_file(sst_path, sst_range);
meta.set_region_id(ctx.get_region_id());
meta.set_region_epoch(ctx.get_region_epoch().clone());
// Set crc32 == 0 and length != 0 still ingest success
send_upload_sst(&import, &meta, &data).unwrap();
meta.set_crc32(0);
let mut ingest = IngestRequest::default();
ingest.set_context(ctx.clone());
ingest.set_sst(meta.clone());
let resp = import.ingest(&ingest).unwrap();
assert!(!resp.has_error(), "{:?}", resp.get_error());
// Check ingested kvs
check_ingested_kvs(&tikv, &ctx, sst_range);
}
#[test]
fn test_download_sst() {
let (_cluster, ctx, tikv, import) = new_cluster_and_tikv_import_client();
let temp_dir = Builder::new()
.prefix("test_download_sst")
.tempdir()
.unwrap();
let sst_path = temp_dir.path().join("test.sst");
let sst_range = (0, 100);
let (mut meta, _) = gen_sst_file(sst_path, sst_range);
meta.set_region_id(ctx.get_region_id());
meta.set_region_epoch(ctx.get_region_epoch().clone());
// Checks that downloading a non-existing storage returns error.
let mut download = DownloadRequest::default();
download.set_sst(meta.clone());
download.set_storage_backend(external_storage::make_local_backend(temp_dir.path()));
download.set_name("missing.sst".to_owned());
let result = import.download(&download).unwrap();
assert!(
result.has_error(),
"unexpected download reply: {:?}",
result
);
// Checks that downloading an empty SST returns OK (but cannot be ingested)
download.set_name("test.sst".to_owned());
download.mut_sst().mut_range().set_start(vec![sst_range.1]);
download
.mut_sst()
.mut_range()
.set_end(vec![sst_range.1 + 1]);
let result = import.download(&download).unwrap();
assert!(result.get_is_empty());
// Now perform a proper download.
download.mut_sst().mut_range().set_start(Vec::new());
download.mut_sst().mut_range().set_end(Vec::new());
let result = import.download(&download).unwrap();
assert!(!result.get_is_empty());
assert_eq!(result.get_range().get_start(), &[sst_range.0]);
assert_eq!(result.get_range().get_end(), &[sst_range.1 - 1]);
// Do an ingest and verify the result is correct.
let mut ingest = IngestRequest::default();
ingest.set_context(ctx.clone());
ingest.set_sst(meta);
let resp = import.ingest(&ingest).unwrap();
assert!(!resp.has_error());
check_ingested_kvs(&tikv, &ctx, sst_range);
}
#[test]
fn test_cleanup_sst() {
let (mut cluster, ctx, _, import) = new_cluster_and_tikv_import_client();
let temp_dir = Builder::new().prefix("test_cleanup_sst").tempdir().unwrap();
let sst_path = temp_dir.path().join("test_split.sst");
let sst_range = (0, 100);
let (mut meta, data) = gen_sst_file(sst_path, sst_range);
meta.set_region_id(ctx.get_region_id());
meta.set_region_epoch(ctx.get_region_epoch().clone());
send_upload_sst(&import, &meta, &data).unwrap();
// Can not upload the same file when it exists.
assert!(send_upload_sst(&import, &meta, &data).is_err());
// The uploaded SST should be deleted if the region split.
let region = cluster.get_region(&[]);
cluster.must_split(®ion, &[100]);
check_sst_deleted(&import, &meta, &data);
let left = cluster.get_region(&[]);
let right = cluster.get_region(&[100]);
let sst_path = temp_dir.path().join("test_merge.sst");
let sst_range = (0, 100);
let (mut meta, data) = gen_sst_file(sst_path, sst_range);
meta.set_region_id(left.get_id());
meta.set_region_epoch(left.get_region_epoch().clone());
send_upload_sst(&import, &meta, &data).unwrap();
// The uploaded SST should be deleted if the region merged.
cluster.pd_client.must_merge(left.get_id(), right.get_id());
let res = cluster.pd_client.get_region_by_id(left.get_id());
assert!(res.wait().unwrap().is_none());
check_sst_deleted(&import, &meta, &data);
}
#[test]
fn test_ingest_sst_region_not_found() {
let (_cluster, mut ctx_not_found, _, import) = new_cluster_and_tikv_import_client();
let temp_dir = Builder::new()
.prefix("test_ingest_sst_errors")
.tempdir()
.unwrap();
ctx_not_found.set_region_id(1 << 31); // A large region id that must no exists.
let sst_path = temp_dir.path().join("test_split.sst");
let sst_range = (0, 100);
let (mut meta, _data) = gen_sst_file(sst_path, sst_range);
meta.set_region_id(ctx_not_found.get_region_id());
meta.set_region_epoch(ctx_not_found.get_region_epoch().clone());
let mut ingest = IngestRequest::default();
ingest.set_context(ctx_not_found);
ingest.set_sst(meta);
let resp = import.ingest(&ingest).unwrap();
assert!(resp.get_error().has_region_not_found());
}
fn new_sst_meta(crc32: u32, length: u64) -> SstMeta {
let mut m = SstMeta::default();
m.set_uuid(Uuid::new_v4().as_bytes().to_vec());
m.set_crc32(crc32);
m.set_length(length);
m
}
fn send_upload_sst(
client: &ImportSstClient,
meta: &SstMeta,
data: &[u8],
) -> Result<UploadResponse> {
let mut r1 = UploadRequest::default();
r1.set_meta(meta.clone());
let mut r2 = UploadRequest::default();
r2.set_data(data.to_vec());
let reqs: Vec<_> = vec![r1, r2]
.into_iter()
.map(|r| (r, WriteFlags::default()))
.collect();
let (tx, rx) = client.upload().unwrap();
let stream = stream::iter_ok(reqs);
stream.forward(tx).and_then(|_| rx).wait()
}
fn check_ingested_kvs(tikv: &TikvClient, ctx: &Context, sst_range: (u8, u8)) {
for i in sst_range.0..sst_range.1 {
let mut m = RawGetRequest::default();
m.set_context(ctx.clone());
m.set_key(vec![i]);
let resp = tikv.raw_get(&m).unwrap();
assert!(resp.get_error().is_empty());
assert!(!resp.has_region_error());
assert_eq!(resp.get_value(), &[i]);
}
}
fn check_sst_deleted(client: &ImportSstClient, meta: &SstMeta, data: &[u8]) {
for _ in 0..10 {
if send_upload_sst(client, meta, data).is_ok() {
// If we can upload the file, it means the previous file has been deleted.
return;
}
thread::sleep(Duration::from_millis(CLEANUP_SST_MILLIS));
}
send_upload_sst(client, meta, data).unwrap();
}
| 32.66129 | 88 | 0.659951 |
fe3790e01191f4af7bb79820edfa1915c686988e
| 8,218 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub fn endpoint_resolver() -> impl aws_endpoint::ResolveAwsEndpoint {
aws_endpoint::PartitionResolver::new(
aws_endpoint::Partition::builder()
.id("aws")
.region_regex(r#"^(us|eu|ap|sa|ca|me|af)\-\w+\-\d+$"#)
.default_endpoint(aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
})
.regionalized(aws_endpoint::partition::Regionalized::Regionalized)
.endpoint(
"fips-us-east-1",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs-fips.us-east-1.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder()
.region("us-east-1")
.build(),
},
)
.endpoint(
"fips-us-east-2",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs-fips.us-east-2.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder()
.region("us-east-2")
.build(),
},
)
.endpoint(
"fips-us-west-1",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs-fips.us-west-1.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder()
.region("us-west-1")
.build(),
},
)
.endpoint(
"fips-us-west-2",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs-fips.us-west-2.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder()
.region("us-west-2")
.build(),
},
)
.endpoint(
"us-east-1",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
},
)
.build()
.expect("invalid partition"),
vec![
aws_endpoint::Partition::builder()
.id("aws-cn")
.region_regex(r#"^cn\-\w+\-\d+$"#)
.default_endpoint(aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.amazonaws.com.cn",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
})
.regionalized(aws_endpoint::partition::Regionalized::Regionalized)
.build()
.expect("invalid partition"),
aws_endpoint::Partition::builder()
.id("aws-iso")
.region_regex(r#"^us\-iso\-\w+\-\d+$"#)
.default_endpoint(aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.c2s.ic.gov",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
})
.regionalized(aws_endpoint::partition::Regionalized::Regionalized)
.endpoint(
"us-iso-east-1",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.c2s.ic.gov",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
},
)
.build()
.expect("invalid partition"),
aws_endpoint::Partition::builder()
.id("aws-iso-b")
.region_regex(r#"^us\-isob\-\w+\-\d+$"#)
.default_endpoint(aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.sc2s.sgov.gov",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
})
.regionalized(aws_endpoint::partition::Regionalized::Regionalized)
.build()
.expect("invalid partition"),
aws_endpoint::Partition::builder()
.id("aws-us-gov")
.region_regex(r#"^us\-gov\-\w+\-\d+$"#)
.default_endpoint(aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.{region}.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder().build(),
})
.regionalized(aws_endpoint::partition::Regionalized::Regionalized)
.endpoint(
"us-gov-east-1",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.us-gov-east-1.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder()
.region("us-gov-east-1")
.build(),
},
)
.endpoint(
"us-gov-west-1",
aws_endpoint::partition::endpoint::Metadata {
uri_template: "sqs.us-gov-west-1.amazonaws.com",
protocol: aws_endpoint::partition::endpoint::Protocol::Https,
signature_versions: aws_endpoint::partition::endpoint::SignatureVersion::V4,
credential_scope: aws_endpoint::CredentialScope::builder()
.region("us-gov-west-1")
.build(),
},
)
.build()
.expect("invalid partition"),
],
)
}
| 54.065789 | 100 | 0.524459 |
bb5f0d420db34ab565ca858909968ecaded94342
| 12,032 |
//! Simple entity-component system. Macro-free stable Rust using compile-time reflection!
//!
//! # Example
//! ```
//! extern crate recs;
//! use recs::{Ecs, EntityId};
//!
//! #[derive(Clone, PartialEq, Debug)]
//! struct Age{years: u32}
//!
//! #[derive(Clone, PartialEq, Debug)]
//! struct Iq{points: i32}
//!
//! fn main() {
//!
//! // Create an ECS instance
//! let mut system: Ecs = Ecs::new();
//!
//! // Add entity to the system
//! let forrest: EntityId = system.create_entity();
//!
//! // Attach components to the entity
//! // The Ecs.set method returns an EcsResult that will be set to Err if
//! // the specified entity does not exist. If you're sure that the entity exists, suppress
//! // Rust's "unused result" warning by prefixing your calls to set(..) with "let _ = ..."
//! let _ = system.set(forrest, Age{years: 22});
//! let _ = system.set(forrest, Iq{points: 75}); // "I may not be a smart man..."
//!
//! // Get clone of attached component data from entity
//! let age = system.get::<Age>(forrest).unwrap();
//! assert_eq!(age.years, 22);
//!
//! // Annotating the variable's type may let you skip type parameters
//! let iq: Iq = system.get(forrest).unwrap();
//! assert_eq!(iq.points, 75);
//!
//! // Modify an entity's component
//! let older = Age{years: age.years + 1};
//! let _ = system.set(forrest, older);
//!
//! // Modify a component in-place with a mutable borrow
//! system.borrow_mut::<Iq>(forrest).map(|iq| iq.points += 5);
//!
//! // Inspect a component in-place without cloning
//! assert_eq!(system.borrow::<Age>(forrest), Ok(&Age{years: 23}));
//!
//! // Inspect a component via cloning
//! assert_eq!(system.get::<Iq>(forrest), Ok(Iq{points: 80}));
//!
//! }
//! ```
#![allow(unknown_lints)] // for rust-clippy
#![warn(missing_docs)]
use std::any::{TypeId, Any};
use std::collections::{HashMap, HashSet};
type IdNumber = u64;
/// Value type representing an entity in the entity-component system.
///
/// To avoid duplicate entity IDs, these can only be created by calling `Ecs.create_entity()`.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct EntityId(IdNumber);
/// Error type for ECS results that require a specific entity or component.
#[derive(Debug, PartialEq, Eq)]
pub enum NotFound {
/// A requested entity ID was not present in the system.
Entity(EntityId),
/// A requested component was not present on an entity.
Component(TypeId),
}
/// Result type for ECS operations that may fail without a specific entity or component.
pub type EcsResult<T> = Result<T, NotFound>;
/// Marker trait for types which can be used as components.
///
/// `Component` is automatically implemented for all eligible types by the
/// provided `impl`, so you don't have to worry about this. Hooray!
pub trait Component: Any+Send {}
impl<T: Any+Send> Component for T {}
/// List of component types.
///
/// The `Ecs` methods `has_all` and `collect_with` each take a `ComponentFilter` instance. The
/// recommended way to actually create a `ComponentFilter` is with the
/// [`component_filter!` macro](macro.component_filter!.html).
#[derive(Default, PartialEq, Eq, Debug, Clone)]
pub struct ComponentFilter {
set: HashSet<TypeId>,
}
impl ComponentFilter {
/// Create a new component filter.
pub fn new() -> Self {
Default::default()
}
/// Add component type `C` to the filter.
pub fn add<C: Component>(&mut self) {
self.set.insert(TypeId::of::<C>());
}
/// Remove component type `C` from the filter.
pub fn remove<C: Component>(&mut self) {
self.set.remove(&TypeId::of::<C>());
}
/// Return `true` if the filter already contains component type `C`; otherwise `false`.
pub fn contains<C: Component>(&mut self) -> bool {
self.set.contains(&TypeId::of::<C>())
}
/// Create a component filter from a vector/slice of `TypeId` instances. (Not recommended;
/// used by the `component_filter!` macro.)
pub fn from_slice(slice: &[TypeId]) -> Self {
let mut this = Self::new();
for type_id in slice.iter() {
this.set.insert(*type_id);
}
this
}
/// Return an iterator over all the contained component types.
#[allow(needless_lifetimes)] // https://github.com/Manishearth/rust-clippy/issues/740
pub fn iter<'a>(&'a self) -> Box<Iterator<Item = TypeId> + 'a> {
Box::new(self.set.iter().cloned())
}
}
/// Create a `ComponentFilter` by type name.
///
/// If you want all entities with components `Foo` and `Bar`:
///
/// ```
/// #[macro_use] // The macro won't be imported without this flag!
/// extern crate recs;
/// use recs::{Ecs, EntityId};
///
/// struct Foo;
/// struct Bar;
///
/// fn main() {
/// let sys = Ecs::new();
/// // ... add some entities and components ...
/// let mut ids: Vec<EntityId> = Vec::new();
/// let filter = component_filter!(Foo, Bar);
/// sys.collect_with(&filter, &mut ids);
/// for id in ids {
/// // Will only iterate over entities that have been assigned both `Foo` and `Bar`
/// // components
/// }
/// }
/// ```
#[macro_export]
macro_rules! component_filter {
($($x:ty),*) => (
$crate::ComponentFilter::from_slice(
&vec![$(std::any::TypeId::of::<$x>()),*]
)
);
($($x:ty,)*) => (component_filter![$($x),*])
}
/// Primary data structure containing entity and component data.
///
/// Notice that `Ecs` itself has no type parameters. Its methods to interact
/// with components do, but runtime reflection (via `std::any::TypeId`) is
/// used to retrieve components from an internal `HashMap`. Therefore, you
/// can create and use any data structure you want for components.
///
/// Tip: using `#[derive(Clone)]` on your component types will make your life a little easier by
/// enabling the `get` method, which avoids locking down the `Ecs` with a mutable or immutable
/// borrow.
#[derive(Default)]
pub struct Ecs {
ids: IdNumber,
data: HashMap<EntityId, ComponentMap>,
}
#[derive(Default)]
struct ComponentMap {
map: HashMap<TypeId, Box<dyn Any+Send>>,
}
impl ComponentMap {
fn set<C: Component>(&mut self, component: C) -> Option<C> {
self.map
.insert(TypeId::of::<C>(), Box::new(component))
.map(|old| *old.downcast::<C>().expect("ComponentMap.set: internal downcast error"))
}
fn borrow<C: Component>(&self) -> EcsResult<&C> {
self.map
.get(&TypeId::of::<C>())
.map(|c| {
c.downcast_ref()
.expect("ComponentMap.borrow: internal downcast error")
})
.ok_or_else(|| NotFound::Component(TypeId::of::<C>()))
}
#[allow(map_clone)]
fn get<C: Component + Clone>(&self) -> EcsResult<C> {
self.borrow::<C>()
.map(Clone::clone)
}
fn contains_type_id(&self, id: &TypeId) -> bool {
self.map.contains_key(id)
}
fn contains<C: Component>(&self) -> bool {
self.contains_type_id(&TypeId::of::<C>())
}
fn borrow_mut<C: Component>(&mut self) -> EcsResult<&mut C> {
match self.map.get_mut(&TypeId::of::<C>()) {
Some(c) => {
Ok(c.downcast_mut()
.expect("ComponentMap.borrow_mut: internal downcast error"))
}
None => Err(NotFound::Component(TypeId::of::<C>())),
}
}
}
impl Ecs {
/// Create a new and empty entity-component system (ECS).
pub fn new() -> Self {
Default::default()
}
/// Create a new entity in the ECS without components and return its ID.
pub fn create_entity(&mut self) -> EntityId {
let new_id = EntityId(self.ids);
self.ids += 1;
self.data.insert(new_id, Default::default());
new_id
}
/// Return `true` if the provided entity exists in the system.
pub fn exists(&self, id: EntityId) -> bool {
self.data.contains_key(&id)
}
/// Destroy the provided entity, automatically removing any of its components.
///
/// Return `NotFound::Entity` if the entity does not exist or was already deleted.
pub fn destroy_entity(&mut self, id: EntityId) -> EcsResult<()> {
self.data.remove(&id).map(|_| ()).ok_or_else(|| NotFound::Entity(id))
}
/// For the specified entity, add a component of type `C` to the system.
///
/// If the entity already has a component `prev` of type `C`, return `Some(prev)`. If not,
/// return `None`. If the entity does not exist, return `NotFound::Entity`.
///
/// To modify an existing component in place, see `borrow_mut`.
pub fn set<C: Component>(&mut self, id: EntityId, comp: C) -> EcsResult<Option<C>> {
self.data
.get_mut(&id)
.ok_or_else(|| NotFound::Entity(id))
.map(|map| map.set(comp))
}
/// Return a clone of the requested entity's component of type `C`, or a `NotFound` variant
/// if the entity does not exist or does not have that component.
///
/// To examine or modify a component without making a clone, see `borrow` and `borrow_mut`.
pub fn get<C: Component + Clone>(&self, id: EntityId) -> EcsResult<C> {
self.data
.get(&id)
.ok_or_else(|| NotFound::Entity(id))
.and_then(|map| map.get())
}
/// Return `true` if the specified entity has a component of type `C` in the system, or
/// `NotFound::Entity` if the entity does not exist.
pub fn has<C: Component>(&self, id: EntityId) -> EcsResult<bool> {
self.data
.get(&id)
.ok_or_else(|| NotFound::Entity(id))
.map(|map| map.contains::<C>())
}
/// Return `true` if each component type in the filter is present on the entity `id`.
pub fn has_all(&self, id: EntityId, set: &ComponentFilter) -> EcsResult<bool> {
let map = try!(self.data.get(&id).ok_or_else(|| NotFound::Entity(id)));
Ok(set.iter().all(|type_id| map.contains_type_id(&type_id)))
}
/// Return a shared reference to the requested entity's component of type `C`, or a
/// `NotFound` variant if the entity does not exist or does not have that component.
pub fn borrow<C: Component>(&self, id: EntityId) -> EcsResult<&C> {
self.data
.get(&id)
.ok_or_else(|| NotFound::Entity(id))
.and_then(|map| map.borrow())
}
/// Return a mutable reference to the requested entity's component of type `C`, or a
/// `NotFound` variant if the entity does not exist or does not have that component.
pub fn borrow_mut<C: Component>(&mut self, id: EntityId) -> EcsResult<&mut C> {
self.data
.get_mut(&id)
.ok_or_else(|| NotFound::Entity(id))
.and_then(|map| map.borrow_mut())
}
/// Return an iterator over every ID in the system.
#[allow(needless_lifetimes)] // https://github.com/Manishearth/rust-clippy/issues/740
pub fn iter<'a>(&'a self) -> Box<Iterator<Item = EntityId> + 'a> {
Box::new(self.data.keys().cloned())
}
/// Collect all entity IDs into a vector (after emptying the vector).
///
/// Useful for accessing entity IDs without borrowing the ECS.
pub fn collect(&self, dest: &mut Vec<EntityId>) {
dest.clear();
dest.extend(self.iter());
}
/// Collect the IDs of all entities containing a certain set of component types into a vector.
///
/// After calling this method, the vector `dest` will contain *only* those entities who have
/// at least each type of component specified in the filter.
pub fn collect_with<'a>(&'a self, components: &'a ComponentFilter, dest: &mut Vec<EntityId>) {
let ids = self.data.keys().cloned();
dest.clear();
dest.extend(ids.filter(|e| {
self.has_all(*e, components)
.expect("Ecs.collect_with: internal id filter error")
}))
}
}
| 37.836478 | 98 | 0.608295 |
1411efceed32c6a2d552d138704280d042fd5ef5
| 6,671 |
use clock::Clock;
//
// Clock Creation
//
#[test]
fn test_on_the_hour() {
assert_eq!(Clock::new(8, 0).to_string(), "08:00");
}
#[test]
fn test_past_the_hour() {
assert_eq!(Clock::new(11, 9).to_string(), "11:09");
}
#[test]
fn test_midnight_is_zero_hours() {
assert_eq!(Clock::new(24, 0).to_string(), "00:00");
}
#[test]
fn test_hour_rolls_over() {
assert_eq!(Clock::new(25, 0).to_string(), "01:00");
}
#[test]
fn test_hour_rolls_over_continuously() {
assert_eq!(Clock::new(100, 0).to_string(), "04:00");
}
#[test]
fn test_sixty_minutes_is_next_hour() {
assert_eq!(Clock::new(1, 60).to_string(), "02:00");
}
#[test]
fn test_minutes_roll_over() {
assert_eq!(Clock::new(0, 160).to_string(), "02:40");
}
#[test]
fn test_minutes_roll_over_continuously() {
assert_eq!(Clock::new(0, 1723).to_string(), "04:43");
}
#[test]
fn test_hours_and_minutes_roll_over() {
assert_eq!(Clock::new(25, 160).to_string(), "03:40");
}
#[test]
fn test_hours_and_minutes_roll_over_continuously() {
assert_eq!(Clock::new(201, 3001).to_string(), "11:01");
}
#[test]
fn test_hours_and_minutes_roll_over_to_exactly_midnight() {
assert_eq!(Clock::new(72, 8640).to_string(), "00:00");
}
#[test]
fn test_negative_hour() {
assert_eq!(Clock::new(-1, 15).to_string(), "23:15");
}
#[test]
fn test_negative_hour_roll_over() {
assert_eq!(Clock::new(-25, 00).to_string(), "23:00");
}
#[test]
fn test_negative_hour_roll_over_continuously() {
assert_eq!(Clock::new(-91, 00).to_string(), "05:00");
}
#[test]
fn test_negative_minutes() {
assert_eq!(Clock::new(1, -40).to_string(), "00:20");
}
#[test]
fn test_negative_minutes_roll_over() {
assert_eq!(Clock::new(1, -160).to_string(), "22:20");
}
#[test]
fn test_negative_minutes_roll_over_continuously() {
assert_eq!(Clock::new(1, -4820).to_string(), "16:40");
}
#[test]
fn test_negative_sixty_minutes_is_prev_hour() {
assert_eq!(Clock::new(2, -60).to_string(), "01:00");
}
#[test]
fn test_negative_hour_and_minutes_both_roll_over() {
assert_eq!(Clock::new(-25, -160).to_string(), "20:20");
}
#[test]
fn test_negative_hour_and_minutes_both_roll_over_continuously() {
assert_eq!(Clock::new(-121, -5810).to_string(), "22:10");
}
// //
// // Clock Math
// //
#[test]
fn test_add_minutes() {
let clock = Clock::new(10, 0).add_minutes(3);
assert_eq!(clock.to_string(), "10:03");
}
#[test]
fn test_add_no_minutes() {
let clock = Clock::new(6, 41).add_minutes(0);
assert_eq!(clock.to_string(), "06:41");
}
#[test]
fn test_add_to_next_hour() {
let clock = Clock::new(0, 45).add_minutes(40);
assert_eq!(clock.to_string(), "01:25");
}
#[test]
fn test_add_more_than_one_hour() {
let clock = Clock::new(10, 0).add_minutes(61);
assert_eq!(clock.to_string(), "11:01");
}
#[test]
fn test_add_more_than_two_hours_with_carry() {
let clock = Clock::new(0, 45).add_minutes(160);
assert_eq!(clock.to_string(), "03:25");
}
#[test]
fn test_add_across_midnight() {
let clock = Clock::new(23, 59).add_minutes(2);
assert_eq!(clock.to_string(), "00:01");
}
#[test]
fn test_add_more_than_one_day() {
let clock = Clock::new(5, 32).add_minutes(1500);
assert_eq!(clock.to_string(), "06:32");
}
#[test]
fn test_add_more_than_two_days() {
let clock = Clock::new(1, 1).add_minutes(3500);
assert_eq!(clock.to_string(), "11:21");
}
#[test]
fn test_subtract_minutes() {
let clock = Clock::new(10, 3).add_minutes(-3);
assert_eq!(clock.to_string(), "10:00");
}
#[test]
fn test_subtract_to_previous_hour() {
let clock = Clock::new(10, 3).add_minutes(-30);
assert_eq!(clock.to_string(), "09:33");
}
#[test]
fn test_subtract_more_than_an_hour() {
let clock = Clock::new(10, 3).add_minutes(-70);
assert_eq!(clock.to_string(), "08:53");
}
#[test]
fn test_subtract_across_midnight() {
let clock = Clock::new(0, 3).add_minutes(-4);
assert_eq!(clock.to_string(), "23:59");
}
#[test]
fn test_subtract_more_than_two_hours() {
let clock = Clock::new(0, 0).add_minutes(-160);
assert_eq!(clock.to_string(), "21:20");
}
#[test]
fn test_subtract_more_than_two_hours_with_borrow() {
let clock = Clock::new(6, 15).add_minutes(-160);
assert_eq!(clock.to_string(), "03:35");
}
#[test]
fn test_subtract_more_than_one_day() {
let clock = Clock::new(5, 32).add_minutes(-1500);
assert_eq!(clock.to_string(), "04:32");
}
#[test]
fn test_subtract_mores_than_two_days() {
let clock = Clock::new(2, 20).add_minutes(-3000);
assert_eq!(clock.to_string(), "00:20");
}
// //
// // Test Equality
// //
#[test]
fn test_compare_clocks_for_equality() {
assert_eq!(Clock::new(15, 37), Clock::new(15, 37));
}
#[test]
fn test_compare_clocks_a_minute_apart() {
assert_ne!(Clock::new(15, 36), Clock::new(15, 37));
}
#[test]
fn test_compare_clocks_an_hour_apart() {
assert_ne!(Clock::new(14, 37), Clock::new(15, 37));
}
#[test]
fn test_compare_clocks_with_hour_overflow() {
assert_eq!(Clock::new(10, 37), Clock::new(34, 37));
}
#[test]
fn test_compare_clocks_with_hour_overflow_by_several_days() {
assert_eq!(Clock::new(3, 11), Clock::new(99, 11));
}
#[test]
fn test_compare_clocks_with_negative_hour() {
assert_eq!(Clock::new(22, 40), Clock::new(-2, 40));
}
#[test]
fn test_compare_clocks_with_negative_hour_that_wraps() {
assert_eq!(Clock::new(17, 3), Clock::new(-31, 3));
}
#[test]
fn test_compare_clocks_with_negative_hour_that_wraps_multiple_times() {
assert_eq!(Clock::new(13, 49), Clock::new(-83, 49));
}
#[test]
fn test_compare_clocks_with_minutes_overflow() {
assert_eq!(Clock::new(0, 1), Clock::new(0, 1441));
}
#[test]
fn test_compare_clocks_with_minutes_overflow_by_several_days() {
assert_eq!(Clock::new(2, 2), Clock::new(2, 4322));
}
#[test]
fn test_compare_clocks_with_negative_minute() {
assert_eq!(Clock::new(2, 40), Clock::new(3, -20))
}
#[test]
fn test_compare_clocks_with_negative_minute_that_wraps() {
assert_eq!(Clock::new(4, 10), Clock::new(5, -1490))
}
#[test]
fn test_compare_clocks_with_negative_minute_that_wraps_multiple() {
assert_eq!(Clock::new(6, 15), Clock::new(6, -4305))
}
#[test]
fn test_compare_clocks_with_negative_hours_and_minutes() {
assert_eq!(Clock::new(7, 32), Clock::new(-12, -268))
}
#[test]
fn test_compare_clocks_with_negative_hours_and_minutes_that_wrap() {
assert_eq!(Clock::new(18, 7), Clock::new(-54, -11513))
}
#[test]
fn test_compare_full_clock_and_zeroed_clock() {
assert_eq!(Clock::new(24, 0), Clock::new(0, 0))
}
#[test]
fn test_string_from_clock() {
assert_eq!(String::from(Clock::new(50, -23)), "01:37")
}
| 19.44898 | 71 | 0.663169 |
2876f5ec687395c0e9276aaf379080e6fdc67b41
| 459 |
#![no_std]
#![feature(asm, global_asm, linkage)]
#![feature(vec_leak)]
#![deny(warnings)]
extern crate alloc;
#[cfg(target_arch = "x86_64")]
#[path = "arch/x86_64/mod.rs"]
mod arch;
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
#[path = "arch/riscv/mod.rs"]
mod arch;
#[cfg(target_arch = "mips")]
#[path = "arch/mipsel/mod.rs"]
pub mod arch;
#[cfg(target_arch = "aarch64")]
#[path = "arch/aarch64/mod.rs"]
pub mod arch;
pub use arch::*;
| 18.36 | 61 | 0.647059 |
bb7647dd2dbdace96e00eedb6eea9389faad9a07
| 3,563 |
//! Structs and utilities for handling API response data.
/// Response returned by the `get_access_token()` method.
#[derive(Debug, Deserialize, Serialize)]
pub struct AccessTokenResponse {
pub access_token: String,
pub scope: String,
pub expires_in: i64,
}
/// Response returned by the `get_price_history()` method.
#[derive(Debug, Deserialize)]
pub struct GetPriceHistoryResponse {
pub candles: Vec<Candle>,
pub empty: bool,
pub symbol: String,
}
/// Individual candle item in [`GetPriceHistoryResponse`](struct.GetPriceHistoryResponse.html).
#[derive(Clone, Copy, Debug, Deserialize)]
pub struct Candle {
pub close: f64,
pub datetime: usize,
pub high: f64,
pub low: f64,
pub open: f64,
pub volume: i64,
}
/// Individual response item returned by the `get_movers()` method.
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Mover {
pub change: f64,
pub description: String,
pub direction: String,
pub last: f64,
pub symbol: String,
pub total_volume: i64,
}
/// Individual response item returned by the `get_account()` and
/// `get_accounts()` methods.
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Account {
pub securities_account: SecuritiesAccount,
}
/// Securities Account item in [`Account`](struct.Account.html)
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum SecuritiesAccount {
#[serde(rename_all = "camelCase")]
MarginAccount {
r#type: String,
account_id: String,
round_trips: usize,
is_day_trader: bool,
is_closing_only_restricted: bool,
initial_balances: InitialBalances,
current_balances: CurrentBalances,
projected_balances: ProjectedBalances,
},
}
/// Initial Balances item in [`SecuritiesAccount`](enum.SecuritiesAccount.html)
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InitialBalances {
pub account_value: f64,
pub accrued_interest: f64,
pub bond_value: f64,
pub cash_available_for_trading: f64,
pub cash_available_for_withdrawal: f64,
pub cash_balance: f64,
pub cash_debit_call_value: f64,
pub cash_receipts: f64,
pub is_in_call: bool,
pub liquidation_value: f64,
pub long_option_market_value: f64,
pub money_market_fund: f64,
pub mutual_fund_value: f64,
pub pending_deposits: f64,
pub short_option_market_value: f64,
pub short_stock_value: f64,
pub unsettled_cash: f64,
}
/// Current Balances item in [`SecuritiesAccount`](enum.SecuritiesAccount.html)
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CurrentBalances {
pub accrued_interest: f64,
pub bond_value: f64,
pub cash_available_for_trading: f64,
pub cash_available_for_withdrawal: f64,
pub cash_balance: f64,
pub cash_call: f64,
pub cash_debit_call_value: f64,
pub cash_receipts: f64,
pub liquidation_value: f64,
pub long_market_value: f64,
pub long_option_market_value: f64,
pub money_market_fund: f64,
pub mutual_fund_value: f64,
pub pending_deposits: f64,
pub savings: f64,
pub short_market_value: f64,
pub short_option_market_value: f64,
pub total_cash: f64,
pub unsettled_cash: f64,
}
/// Projected Balances item in [`SecuritiesAccount`](enum.SecuritiesAccount.html)
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectedBalances {
pub cash_available_for_trading: f64,
pub cash_available_for_withdrawal: f64,
}
| 29.204918 | 95 | 0.712321 |
5d1ae9a622a8e367137134a750656761d1bf01a8
| 10,788 |
// EndBASIC
// Copyright 2021 Julio Merino
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
//! Console representation and manipulation.
use crate::console::{ClearType, Console, Key, Position};
use async_trait::async_trait;
use crossterm::{cursor, event, execute, style, terminal, tty::IsTty, QueueableCommand};
use std::cmp::Ordering;
use std::collections::VecDeque;
use std::env;
use std::io::{self, Write};
/// Converts a `crossterm::ErrorKind` to an `io::Error`.
fn crossterm_error_to_io_error(e: crossterm::ErrorKind) -> io::Error {
match e {
crossterm::ErrorKind::IoError(e) => e,
crossterm::ErrorKind::Utf8Error(e) => {
io::Error::new(io::ErrorKind::InvalidData, format!("{}", e))
}
_ => io::Error::new(io::ErrorKind::Other, format!("{}", e)),
}
}
/// Gets the value of the environment variable `name` and interprets it as a `usize`. Returns
/// `None` if the variable is not set or if its contents are invalid.
fn get_env_var_as_usize(name: &str) -> Option<usize> {
match env::var_os(name) {
Some(value) => {
value.as_os_str().to_string_lossy().parse::<usize>().map(Some).unwrap_or(None)
}
None => None,
}
}
/// Implementation of the EndBASIC console to interact with stdin and stdout.
pub struct TerminalConsole {
/// Whether stdin and stdout are attached to a TTY. When this is true, the console is put in
/// raw mode for finer-grained control.
is_tty: bool,
/// Line-oriented buffer to hold input when not operating in raw mode.
buffer: VecDeque<Key>,
/// Whether a background color is active. If so, we need to flush the contents of every line
/// we print so that the color applies to the whole line.
need_line_flush: bool,
}
impl Drop for TerminalConsole {
fn drop(&mut self) {
if self.is_tty {
terminal::disable_raw_mode().unwrap();
}
}
}
impl TerminalConsole {
/// Creates a new console based on the properties of stdin/stdout.
pub fn from_stdio() -> io::Result<Self> {
let is_tty = io::stdin().is_tty() && io::stdout().is_tty();
if is_tty {
terminal::enable_raw_mode().map_err(crossterm_error_to_io_error)?;
}
Ok(Self { is_tty, buffer: VecDeque::default(), need_line_flush: false })
}
/// Converts a line of text read from stdin into a sequence of key presses.
fn line_to_keys(s: String) -> VecDeque<Key> {
let mut keys = VecDeque::default();
for ch in s.chars() {
if ch == '\x1b' {
keys.push_back(Key::Escape);
} else if ch == '\n' {
keys.push_back(Key::NewLine);
} else if ch == '\r' {
// Ignore. When we run under Windows and use golden test input files, we end up
// seeing two separate characters to terminate a newline (CRLF) and these confuse
// our tests. I am not sure why this doesn't seem to be a problem for interactive
// usage though, but it might just be that crossterm hides this from us.
} else if !ch.is_control() {
keys.push_back(Key::Char(ch));
} else {
keys.push_back(Key::Unknown(format!("{}", ch)));
}
}
keys
}
/// Reads a single key from stdin when not attached to a TTY. Because characters are not
/// visible to us until a newline is received, this reads complete lines and buffers them in
/// memory.
fn read_key_from_stdin(&mut self) -> io::Result<Key> {
if self.buffer.is_empty() {
let mut line = String::new();
if io::stdin().read_line(&mut line)? == 0 {
return Ok(Key::Eof);
}
self.buffer = TerminalConsole::line_to_keys(line);
}
match self.buffer.pop_front() {
Some(key) => Ok(key),
None => Ok(Key::Eof),
}
}
/// Reads a single key from the connected TTY. This assumes the TTY is in raw mode.
fn read_key_from_tty(&mut self) -> io::Result<Key> {
loop {
if let event::Event::Key(ev) = event::read().map_err(crossterm_error_to_io_error)? {
match ev.code {
event::KeyCode::Backspace => return Ok(Key::Backspace),
event::KeyCode::Esc => return Ok(Key::Escape),
event::KeyCode::Up => return Ok(Key::ArrowUp),
event::KeyCode::Down => return Ok(Key::ArrowDown),
event::KeyCode::Left => return Ok(Key::ArrowLeft),
event::KeyCode::Right => return Ok(Key::ArrowRight),
event::KeyCode::Char('c') if ev.modifiers == event::KeyModifiers::CONTROL => {
return Ok(Key::Interrupt)
}
event::KeyCode::Char('d') if ev.modifiers == event::KeyModifiers::CONTROL => {
return Ok(Key::Eof)
}
event::KeyCode::Char('j') if ev.modifiers == event::KeyModifiers::CONTROL => {
return Ok(Key::NewLine)
}
event::KeyCode::Char('m') if ev.modifiers == event::KeyModifiers::CONTROL => {
return Ok(Key::NewLine)
}
event::KeyCode::Char(ch) => return Ok(Key::Char(ch)),
event::KeyCode::Enter => return Ok(Key::NewLine),
_ => return Ok(Key::Unknown(format!("{:?}", ev))),
}
}
}
}
}
#[async_trait(?Send)]
impl Console for TerminalConsole {
fn clear(&mut self, how: ClearType) -> io::Result<()> {
let how = match how {
ClearType::All => terminal::ClearType::All,
ClearType::CurrentLine => terminal::ClearType::CurrentLine,
ClearType::UntilNewLine => terminal::ClearType::UntilNewLine,
};
let mut output = io::stdout();
output.queue(terminal::Clear(how)).map_err(crossterm_error_to_io_error)?;
if how == terminal::ClearType::All {
output.queue(cursor::MoveTo(0, 0)).map_err(crossterm_error_to_io_error)?;
}
output.flush()
}
fn color(&mut self, fg: Option<u8>, bg: Option<u8>) -> io::Result<()> {
let mut output = io::stdout();
let fg = match fg {
None => style::Color::Reset,
Some(color) => style::Color::AnsiValue(color),
};
let bg = match bg {
None => style::Color::Reset,
Some(color) => style::Color::AnsiValue(color),
};
output.queue(style::SetForegroundColor(fg)).map_err(crossterm_error_to_io_error)?;
output.queue(style::SetBackgroundColor(bg)).map_err(crossterm_error_to_io_error)?;
output.flush()?;
self.need_line_flush = bg != style::Color::Reset;
Ok(())
}
fn enter_alt(&mut self) -> io::Result<()> {
execute!(io::stdout(), terminal::EnterAlternateScreen).map_err(crossterm_error_to_io_error)
}
fn hide_cursor(&mut self) -> io::Result<()> {
execute!(io::stdout(), cursor::Hide).map_err(crossterm_error_to_io_error)
}
fn is_interactive(&self) -> bool {
self.is_tty
}
fn leave_alt(&mut self) -> io::Result<()> {
execute!(io::stdout(), terminal::LeaveAlternateScreen).map_err(crossterm_error_to_io_error)
}
fn locate(&mut self, pos: Position) -> io::Result<()> {
if pos.row > std::u16::MAX as usize {
return Err(io::Error::new(io::ErrorKind::Other, "Row out of range"));
}
let row = pos.row as u16;
if pos.column > std::u16::MAX as usize {
return Err(io::Error::new(io::ErrorKind::Other, "Column out of range"));
}
let column = pos.column as u16;
execute!(io::stdout(), cursor::MoveTo(column, row)).map_err(crossterm_error_to_io_error)
}
fn move_within_line(&mut self, off: i16) -> io::Result<()> {
match off.cmp(&0) {
Ordering::Less => execute!(io::stdout(), cursor::MoveLeft(-off as u16)),
Ordering::Equal => Ok(()),
Ordering::Greater => execute!(io::stdout(), cursor::MoveRight(off as u16)),
}
.map_err(crossterm_error_to_io_error)
}
fn print(&mut self, text: &str) -> io::Result<()> {
let stdout = io::stdout();
let mut stdout = stdout.lock();
stdout.write_all(text.as_bytes())?;
if self.need_line_flush {
execute!(stdout, terminal::Clear(terminal::ClearType::UntilNewLine))
.map_err(crossterm_error_to_io_error)?;
}
if self.is_tty {
stdout.write_all(b"\r\n")?;
} else {
stdout.write_all(b"\n")?;
}
Ok(())
}
async fn read_key(&mut self) -> io::Result<Key> {
if self.is_tty {
self.read_key_from_tty()
} else {
self.read_key_from_stdin()
}
}
fn show_cursor(&mut self) -> io::Result<()> {
execute!(io::stdout(), cursor::Show).map_err(crossterm_error_to_io_error)
}
fn size(&self) -> io::Result<Position> {
// Must be careful to not query the terminal size if both LINES and COLUMNS are set, because
// the query fails when we don't have a PTY and we still need to run under these conditions
// for testing purposes.
let lines = get_env_var_as_usize("LINES");
let columns = get_env_var_as_usize("COLUMNS");
let size = match (lines, columns) {
(Some(l), Some(c)) => Position { row: l, column: c },
(l, c) => {
let (actual_columns, actual_lines) =
terminal::size().map_err(crossterm_error_to_io_error)?;
Position {
row: l.unwrap_or(actual_lines as usize),
column: c.unwrap_or(actual_columns as usize),
}
}
};
Ok(size)
}
fn write(&mut self, bytes: &[u8]) -> io::Result<()> {
let stdout = io::stdout();
let mut stdout = stdout.lock();
stdout.write_all(bytes)?;
stdout.flush()
}
}
| 38.805755 | 100 | 0.569707 |
75c7d87e3666fcc63cb61df3dfee6a8bb3c7cc4d
| 6,139 |
use std::fmt;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Position {
pub file: crate::P<String>,
pub line: u32,
pub column: u32,
}
impl Position {
pub fn new(file: crate::P<String>, x: u32, y: u32) -> Self {
Self {
file,
line: x,
column: y,
}
}
}
impl fmt::Display for Position {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}:{}", self.file, self.line, self.column)
}
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum TokenKind {
String(String),
LitChar(char),
LitInt(String, IntBase, IntSuffix),
LitFloat(String),
Identifier(String),
Builtin(String),
End,
LQuote,
RQuote,
// Keywords
Include,
This,
Match,
Fun,
Let,
Var,
While,
If,
Else,
Loop,
For,
In,
Break,
Continue,
Return,
True,
False,
Nil,
Throw,
Try,
Catch,
Yield,
Do,
ForEach,
Import,
Type,
Const,
Goto,
Underscore,
// Operators
Add,
Sub,
Mul,
Div,
Mod,
Not,
LParen,
RParen,
LBracket,
RBracket,
LBrace,
RBrace,
Comma,
Semicolon,
Dot,
Colon,
Sep, // ::
Arrow,
Tilde,
BitOr,
BitAnd,
Caret,
And,
Or,
Internal,
Eq,
EqEq,
Ne,
Lt,
Le,
Gt,
Ge,
GtGt,
GtGtGt,
LtLt,
}
impl TokenKind {
pub fn name(&self) -> &str {
match *self {
TokenKind::Yield => "yield",
TokenKind::ForEach => "foreach",
TokenKind::String(_) => "string",
TokenKind::LitInt(_, _, suffix) => match suffix {
IntSuffix::Byte => "byte number",
IntSuffix::Int => "int number",
IntSuffix::Long => "long number",
},
TokenKind::LitChar(_) => "char",
TokenKind::LitFloat(_) => "float number",
TokenKind::Identifier(_) => "identifier",
TokenKind::Builtin(_) => "builtin",
TokenKind::End => "<<EOF>>",
TokenKind::LQuote => "<",
TokenKind::RQuote => ">",
// Keywords
TokenKind::Try => "try",
TokenKind::Catch => "catch",
TokenKind::This => "self",
TokenKind::Fun => "function",
TokenKind::Let => "let",
TokenKind::Var => "var",
TokenKind::Goto => "goto",
TokenKind::While => "while",
TokenKind::If => "if",
TokenKind::Else => "else",
TokenKind::Loop => "loop",
TokenKind::For => "for",
TokenKind::In => "in",
TokenKind::Break => "break",
TokenKind::Continue => "continue",
TokenKind::Return => "return",
TokenKind::True => "true",
TokenKind::False => "false",
TokenKind::Nil => "nil",
TokenKind::Throw => "throw",
TokenKind::Match => "match",
TokenKind::Do => "do",
TokenKind::Type => "type",
TokenKind::Const => "const",
TokenKind::Underscore => "_",
TokenKind::Import => "import",
TokenKind::Include => "include",
// Operators
TokenKind::Add => "+",
TokenKind::Sub => "-",
TokenKind::Mul => "*",
TokenKind::Div => "/",
TokenKind::Mod => "%",
TokenKind::Not => "!",
TokenKind::LParen => "(",
TokenKind::RParen => ")",
TokenKind::LBracket => "[",
TokenKind::RBracket => "]",
TokenKind::LBrace => "{",
TokenKind::RBrace => "}",
TokenKind::Comma => ",",
TokenKind::Semicolon => ";",
TokenKind::Dot => ".",
TokenKind::Colon => ":",
TokenKind::Sep => "::",
TokenKind::Arrow => "->",
TokenKind::Tilde => "~",
TokenKind::BitOr => "|",
TokenKind::BitAnd => "&",
TokenKind::Caret => "^",
TokenKind::And => "&&",
TokenKind::Or => "||",
TokenKind::Internal => "internal",
TokenKind::Eq => "=",
TokenKind::EqEq => "==",
TokenKind::Ne => "!=",
TokenKind::Lt => "<",
TokenKind::Le => "<=",
TokenKind::Gt => ">",
TokenKind::Ge => ">=",
TokenKind::GtGtGt => ">>>",
TokenKind::GtGt => ">>",
TokenKind::LtLt => "<<",
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum IntSuffix {
Int,
Long,
Byte,
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Token {
pub kind: TokenKind,
pub position: Position,
}
impl Token {
pub fn new(tok: TokenKind, pos: Position) -> Token {
Token {
kind: tok,
position: pos,
}
}
pub fn is_eof(&self) -> bool {
self.kind == TokenKind::End
}
pub fn is(&self, kind: TokenKind) -> bool {
self.kind == kind
}
pub fn name(&self) -> String {
match self.kind {
TokenKind::LitInt(ref val, _, suffix) => {
let suffix = match suffix {
IntSuffix::Byte => "B",
IntSuffix::Int => "",
IntSuffix::Long => "L",
};
format!("{}{}", val, suffix)
}
TokenKind::String(ref val) => format!("\"{}\"", &val),
TokenKind::Identifier(ref val) => val.clone(),
_ => self.kind.name().into(),
}
}
}
impl fmt::Display for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.name())
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum IntBase {
Bin,
Dec,
Hex,
}
impl IntBase {
pub fn num(self) -> u32 {
match self {
IntBase::Bin => 2,
IntBase::Dec => 10,
IntBase::Hex => 16,
}
}
}
| 22.405109 | 69 | 0.443883 |
5bce13487a08a734422da50be02a18f4ba748c03
| 2,182 |
use crate::dag::IpldDag;
use crate::error::Error;
use crate::path::IpfsPath;
use crate::repo::RepoTypes;
use async_std::fs;
use async_std::io::ReadExt;
use async_std::path::PathBuf;
use libipld::cid::{Cid, Codec};
use libipld::ipld::Ipld;
use libipld::pb::PbNode;
use std::collections::BTreeMap;
use std::convert::TryInto;
pub struct File {
data: Vec<u8>,
}
impl File {
pub async fn new(path: PathBuf) -> Result<Self, Error> {
let mut file = fs::File::open(path).await?;
let mut data = Vec::new();
file.read_to_end(&mut data).await?;
Ok(File { data })
}
pub async fn get_unixfs_v1<T: RepoTypes>(
dag: &IpldDag<T>,
path: IpfsPath,
) -> Result<Self, Error> {
let ipld = dag.get(path).await?;
let pb_node: PbNode = (&ipld).try_into()?;
Ok(File { data: pb_node.data })
}
pub async fn put_unixfs_v1<T: RepoTypes>(&self, dag: &IpldDag<T>) -> Result<Cid, Error> {
let links: Vec<Ipld> = vec![];
let mut pb_node = BTreeMap::<String, Ipld>::new();
pb_node.insert("Data".to_string(), self.data.clone().into());
pb_node.insert("Links".to_string(), links.into());
dag.put(pb_node.into(), Codec::DagProtobuf).await
}
}
impl From<Vec<u8>> for File {
fn from(data: Vec<u8>) -> Self {
File { data }
}
}
impl From<&str> for File {
fn from(string: &str) -> Self {
File {
data: string.as_bytes().to_vec(),
}
}
}
impl Into<String> for File {
fn into(self) -> String {
String::from_utf8_lossy(&self.data).to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::repo::tests::create_mock_repo;
use core::convert::TryFrom;
#[async_std::test]
async fn test_file_cid() {
let (repo, _) = create_mock_repo();
let dag = IpldDag::new(repo);
let file = File::from("\u{8}\u{2}\u{12}\u{12}Here is some data\n\u{18}\u{12}");
let cid = Cid::try_from("QmSy5pnHk1EnvE5dmJSyFKG5unXLGjPpBuJJCBQkBTvBaW").unwrap();
let cid2 = file.put_unixfs_v1(&dag).await.unwrap();
assert_eq!(cid.to_string(), cid2.to_string());
}
}
| 26.938272 | 93 | 0.590742 |
902762f687ed9a46d3275f74b3c89626c6683462
| 869 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
use std::ops::Add;
#[derive(Clone)]
struct foo(Box<usize>);
impl Add for foo {
type Output = foo;
fn add(self, f: foo) -> foo {
let foo(box i) = self;
let foo(box j) = f;
foo(box() (i + j))
}
}
fn main() {
let x = foo(box 3);
let _y = {x} + x.clone(); // the `{x}` forces a move to occur
//~^ ERROR use of moved value: `x`
}
| 26.333333 | 68 | 0.635213 |
d7864b4c9f5f2442336b1e2888692b04dbc389b6
| 13,641 |
//! A demonstration of designing a custom, third-party widget.
//!
//! In this case, we'll design a simple circular button.
//!
//! All of the custom widget design will occur within the `circular_button` module.
//!
//! We'll *use* our fancy circular button in the `main` function (below the circular_button module).
//!
//! Note that in this case, we use `backend::src` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
#[macro_use] extern crate conrod_core;
extern crate conrod_glium;
extern crate conrod_winit;
extern crate find_folder;
extern crate glium;
mod support;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod_core::{self, widget_ids, widget, Colorable, Labelable, Point, Positionable, Widget};
/// The type upon which we'll implement the `Widget` trait.
#[derive(WidgetCommon)]
pub struct CircularButton<'a> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
#[conrod(common_builder)]
common: widget::CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
// We use `#[derive(WidgetStyle)] to vastly simplify the definition and implementation of the
// widget's associated `Style` type. This generates an implementation that automatically
// retrieves defaults from the provided theme in the following order:
//
// 1. If the field is `None`, falls back to the style stored within the `Theme`.
// 2. If there are no style defaults for the widget in the `Theme`, or if the
// default field is also `None`, falls back to the expression specified within
// the field's `#[conrod(default = "expr")]` attribute.
/// Represents the unique styling for our CircularButton widget.
#[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)]
pub struct Style {
/// Color of the button.
#[conrod(default = "theme.shape_color")]
pub color: Option<conrod_core::Color>,
/// Color of the button's label.
#[conrod(default = "theme.label_color")]
pub label_color: Option<conrod_core::Color>,
/// Font size of the button's label.
#[conrod(default = "theme.font_size_medium")]
pub label_font_size: Option<conrod_core::FontSize>,
/// Specify a unique font for the label.
#[conrod(default = "theme.font_id")]
pub label_font_id: Option<Option<conrod_core::text::font::Id>>,
}
// We'll create the widget using a `Circle` widget and a `Text` widget for its label.
//
// Here is where we generate the type that will produce these identifiers.
widget_ids! {
struct Ids {
circle,
text,
}
}
/// Represents the unique, cached state for our CircularButton widget.
pub struct State {
ids: Ids,
}
impl<'a> CircularButton<'a> {
/// Create a button context to be built upon.
pub fn new() -> Self {
CircularButton {
common: widget::CommonBuilder::default(),
style: Style::default(),
maybe_label: None,
enabled: true,
}
}
/// Specify the font used for displaying the label.
pub fn label_font_id(mut self, font_id: conrod_core::text::font::Id) -> Self {
self.style.label_font_id = Some(Some(font_id));
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a> Widget for CircularButton<'a> {
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined using the `widget_style!` macro.
type Style = Style;
/// The event produced by instantiating the widget.
///
/// `Some` when clicked, otherwise `None`.
type Event = Option<()>;
fn init_state(&self, id_gen: widget::id::Generator) -> Self::State {
State { ids: Ids::new(id_gen) }
}
fn style(&self) -> Self::Style {
self.style.clone()
}
/// Optionally specify a function to use for determining whether or not a point is over a
/// widget, or if some other widget's function should be used to represent this widget.
///
/// This method is optional to implement. By default, the bounding rectangle of the widget
/// is used.
fn is_over(&self) -> widget::IsOverFn {
use conrod_core::graph::Container;
use conrod_core::Theme;
fn is_over_widget(widget: &Container, _: Point, _: &Theme) -> widget::IsOver {
let unique = widget.state_and_style::<State, Style>().unwrap();
unique.state.ids.circle.into()
}
is_over_widget
}
/// Update the state of the button by handling any input that has occurred since the last
/// update.
fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event {
let widget::UpdateArgs { id, state, rect, ui, style, .. } = args;
let (color, event) = {
let input = ui.widget_input(id);
// If the button was clicked, produce `Some` event.
let event = input.clicks().left().next().map(|_| ());
let color = style.color(&ui.theme);
let color = input.mouse().map_or(color, |mouse| {
if mouse.buttons.left().is_down() {
color.clicked()
} else {
color.highlighted()
}
});
(color, event)
};
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
widget::Circle::fill(radius)
.middle_of(id)
.graphics_for(id)
.color(color)
.set(state.ids.circle, ui);
// Now we'll instantiate our label using the **Text** widget.
if let Some(ref label) = self.maybe_label {
let label_color = style.label_color(&ui.theme);
let font_size = style.label_font_size(&ui.theme);
let font_id = style.label_font_id(&ui.theme).or(ui.fonts.ids().next());
widget::Text::new(label)
.and_then(font_id, widget::Text::font_id)
.middle_of(id)
.font_size(font_size)
.graphics_for(id)
.color(label_color)
.set(state.ids.text, ui);
}
event
}
}
/// Provide the chainable color() configuration method.
impl<'a> Colorable for CircularButton<'a> {
fn color(mut self, color: conrod_core::Color) -> Self {
self.style.color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a> Labelable<'a> for CircularButton<'a> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: conrod_core::Color) -> Self {
self.style.label_color = Some(color);
self
}
fn label_font_size(mut self, size: conrod_core::FontSize) -> Self {
self.style.label_font_size = Some(size);
self
}
}
}
fn main() {
use conrod_core::{self, widget, widget_ids, Colorable, Labelable, Positionable, Sizeable, Widget};
use glium::{self, Surface};
use support;
use self::circular_button::CircularButton;
const WIDTH: u32 = 1200;
const HEIGHT: u32 = 800;
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Control Panel")
.with_dimensions((WIDTH, HEIGHT).into());
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
let display = support::GliumDisplayWinitWrapper(display);
// construct our `Ui`.
let mut ui = conrod_core::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// The `widget_ids` macro is a easy, safe way of generating a type for producing `widget::Id`s.
widget_ids! {
struct Ids {
// An ID for the background widget, upon which we'll place our custom button.
background,
// The WidgetId we'll use to plug our widget into the `Ui`.
circle_button,
}
}
let ids = Ids::new(ui.widget_id_generator());
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let regular = ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod_core::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod_glium::Renderer::new(&display.0).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod_core::image::Map::<glium::texture::Texture2d>::new();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
'main: loop {
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod_winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::CloseRequested |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => break 'main,
_ => (),
},
_ => (),
}
}
// Instantiate the widgets.
{
let ui = &mut ui.set_widgets();
// Sets a color to clear the background with before the Ui draws our widget.
widget::Canvas::new().color(conrod_core::color::DARK_RED).set(ids.background, ui);
// Instantiate of our custom widget.
for _click in CircularButton::new()
.color(conrod_core::color::rgb(0.0, 0.3, 0.1))
.middle_of(ids.background)
.w_h(256.0, 256.0)
.label_font_id(regular)
.label_color(conrod_core::color::WHITE)
.label("Circular Button")
// Add the widget to the conrod_core::Ui. This schedules the widget it to be
// drawn when we call Ui::draw.
.set(ids.circle_button, ui)
{
println!("Click!");
}
}
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display.0, primitives, &image_map);
let mut target = display.0.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display.0, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
| 39.424855 | 102 | 0.574298 |
3acb6d0943dc2727ef53b4534b2371d3d9f0331a
| 1,488 |
//! The cleaner is responsible for removing rejected transactions from the database
//! that were stored 2 or more weeks ago (this value is configurable as well as the actor's sleep time).
//!
//! The purpose is not to store the information about the failed transaction execution
//! which is useful only for a short period of time. Since such transactions are not actually
//! included in the block and don't affect the state hash, there is no much sense to keep
//! them forever.
// External uses
use tokio::{task::JoinHandle, time};
// Workspace deps
use zksync_config::DBConfig;
use zksync_storage::ConnectionPool;
#[must_use]
pub fn run_rejected_tx_cleaner(config: &DBConfig, db_pool: ConnectionPool) -> JoinHandle<()> {
let max_age = chrono::Duration::from_std(config.rejected_transactions_max_age()).unwrap();
let interval = config.rejected_transactions_cleaner_interval();
let mut timer = time::interval(interval);
tokio::spawn(async move {
loop {
let mut storage = db_pool
.access_storage()
.await
.expect("transactions cleaner couldn't access the database");
if let Err(e) = storage
.chain()
.operations_schema()
.remove_rejected_transactions(max_age)
.await
{
vlog::error!("Can't delete rejected transactions {:?}", e);
}
timer.tick().await;
}
})
}
| 37.2 | 104 | 0.643145 |
f999b200e98fcb74a4aed362ba86622424e2cf96
| 4,033 |
//! This program is used to generate a bunch of code, including block state ID mappings
//! and corresponding Rust code. It reads from vanilla block.json
//! files. See `block_format.md` for more information.
#![forbid(unsafe_code, warnings)]
#[macro_use]
extern crate serde;
#[macro_use]
extern crate derive_deref;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate log;
#[macro_use]
extern crate quote;
mod biome;
mod block_data;
mod item;
mod item_to_block;
mod rust;
mod util;
pub use block_data::{
Block, BlockProperties, BlockReport, State, StateProperties, DEFAULT_STATE_ID,
};
use byteorder::{LittleEndian, WriteBytesExt};
use clap::App;
use failure::Error;
use heck::CamelCase;
use proc_macro2::TokenStream;
use quote::quote;
use std::fs::File;
use std::io::{BufReader, Write};
use std::process::exit;
use std::str::FromStr;
use syn::export::Span;
use syn::Ident;
fn main() {
simple_logger::init_with_level(log::Level::Info).unwrap();
if let Err(e) = run() {
error!("An error occurred: {}", e);
exit(1);
}
}
fn run() -> Result<(), Error> {
let yaml = load_yaml!("cli.yml");
let matches = App::from_yaml(yaml).get_matches();
match matches.subcommand_name() {
Some("block-mappings") => {
let args = matches.subcommand_matches("block-mappings").unwrap();
block_data::generate_mappings_file(
args.value_of("input").unwrap(),
args.value_of("output").unwrap(),
args.value_of("native").unwrap(),
u32::from_str(args.value_of("proto").unwrap())?,
args.value_of("ver").unwrap(),
)?;
}
Some("native-block-mappings") => {
let args = matches.subcommand_matches("native-block-mappings").unwrap();
block_data::generate_native_mappings_file(
args.value_of("input").unwrap(),
args.value_of("output").unwrap(),
u32::from_str(args.value_of("proto").unwrap())?,
args.value_of("ver").unwrap(),
)?;
}
Some("block-rust") => {
let args = matches.subcommand_matches("block-rust").unwrap();
rust::generate_rust_code(
args.value_of("input").unwrap(),
args.value_of("output").unwrap(),
)?;
}
Some("item-mappings") => {
let args = matches.subcommand_matches("item-mappings").unwrap();
item::generate_mappings_file(
args.value_of("input").unwrap(),
args.value_of("output").unwrap(),
)?;
}
Some("item-rust") => {
let args = matches.subcommand_matches("item-rust").unwrap();
item::generate_rust(
args.value_of("input").unwrap(),
args.value_of("output").unwrap(),
)?;
}
Some("items-to-blocks") => {
let args = matches.subcommand_matches("items-to-blocks").unwrap();
item_to_block::generate_mappings(
args.value_of("blocks").unwrap(),
args.value_of("items").unwrap(),
args.value_of("output").unwrap(),
)?;
}
Some("biomes") => {
let args = matches.subcommand_matches("biomes").unwrap();
biome::generate_rust(
args.value_of("input").unwrap(),
args.value_of("output").unwrap(),
)?;
}
Some(s) => {
error!("Invalid subcommand {}", s);
return Ok(());
}
None => {
error!("No subcommand specified");
return Ok(());
}
}
Ok(())
}
pub trait WriteExt {
fn write_string(&mut self, x: &str) -> std::io::Result<()>;
}
impl<W: Write> WriteExt for W {
fn write_string(&mut self, x: &str) -> std::io::Result<()> {
self.write_u32::<LittleEndian>(x.len() as u32)?;
self.write_all(x.as_bytes())?;
Ok(())
}
}
| 29.874074 | 87 | 0.551698 |
e67c0d6487c3e82e89f9d6ef1af6f4988c4420c2
| 5,674 |
//! Ways to create a `str` from bytes slice.
use crate::mem;
use super::validations::run_utf8_validation;
use super::Utf8Error;
/// Converts a slice of bytes to a string slice.
///
/// A string slice ([`&str`]) is made of bytes ([`u8`]), and a byte slice
/// ([`&[u8]`][byteslice]) is made of bytes, so this function converts between
/// the two. Not all byte slices are valid string slices, however: [`&str`] requires
/// that it is valid UTF-8. `from_utf8()` checks to ensure that the bytes are valid
/// UTF-8, and then does the conversion.
///
/// [`&str`]: str
/// [byteslice]: slice
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want to
/// incur the overhead of the validity check, there is an unsafe version of
/// this function, [`from_utf8_unchecked`], which has the same
/// behavior but skips the check.
///
/// If you need a `String` instead of a `&str`, consider
/// [`String::from_utf8`][string].
///
/// [string]: ../../std/string/struct.String.html#method.from_utf8
///
/// Because you can stack-allocate a `[u8; N]`, and you can take a
/// [`&[u8]`][byteslice] of it, this function is one way to have a
/// stack-allocated string. There is an example of this in the
/// examples section below.
///
/// [byteslice]: slice
///
/// # Errors
///
/// Returns `Err` if the slice is not UTF-8 with a description as to why the
/// provided slice is not UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::str;
///
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// // We know these bytes are valid, so just use `unwrap()`.
/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// use std::str;
///
/// // some invalid bytes, in a vector
/// let sparkle_heart = vec![0, 159, 146, 150];
///
/// assert!(str::from_utf8(&sparkle_heart).is_err());
/// ```
///
/// See the docs for [`Utf8Error`] for more details on the kinds of
/// errors that can be returned.
///
/// A "stack allocated string":
///
/// ```
/// use std::str;
///
/// // some bytes, in a stack-allocated array
/// let sparkle_heart = [240, 159, 146, 150];
///
/// // We know these bytes are valid, so just use `unwrap()`.
/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
run_utf8_validation(v)?;
// SAFETY: Just ran validation.
Ok(unsafe { from_utf8_unchecked(v) })
}
/// Converts a mutable slice of bytes to a mutable string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::str;
///
/// // "Hello, Rust!" as a mutable vector
/// let mut hellorust = vec![72, 101, 108, 108, 111, 44, 32, 82, 117, 115, 116, 33];
///
/// // As we know these bytes are valid, we can use `unwrap()`
/// let outstr = str::from_utf8_mut(&mut hellorust).unwrap();
///
/// assert_eq!("Hello, Rust!", outstr);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// use std::str;
///
/// // Some invalid bytes in a mutable vector
/// let mut invalid = vec![128, 223];
///
/// assert!(str::from_utf8_mut(&mut invalid).is_err());
/// ```
/// See the docs for [`Utf8Error`] for more details on the kinds of
/// errors that can be returned.
#[stable(feature = "str_mut_extras", since = "1.20.0")]
pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
run_utf8_validation(v)?;
// SAFETY: Just ran validation.
Ok(unsafe { from_utf8_unchecked_mut(v) })
}
/// Converts a slice of bytes to a string slice without checking
/// that the string contains valid UTF-8.
///
/// See the safe version, [`from_utf8`], for more information.
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed to
/// it are valid UTF-8. If this constraint is violated, undefined behavior
/// results, as the rest of Rust assumes that [`&str`]s are valid UTF-8.
///
/// [`&str`]: str
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::str;
///
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// let sparkle_heart = unsafe {
/// str::from_utf8_unchecked(&sparkle_heart)
/// };
///
/// assert_eq!("💖", sparkle_heart);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_str_from_utf8_unchecked", since = "1.55.0")]
#[cfg_attr(bootstrap, rustc_allow_const_fn_unstable(const_fn_transmute))]
pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
// Also relies on `&str` and `&[u8]` having the same layout.
unsafe { mem::transmute(v) }
}
/// Converts a slice of bytes to a string slice without checking
/// that the string contains valid UTF-8; mutable version.
///
/// See the immutable version, [`from_utf8_unchecked()`] for more information.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::str;
///
/// let mut heart = vec![240, 159, 146, 150];
/// let heart = unsafe { str::from_utf8_unchecked_mut(&mut heart) };
///
/// assert_eq!("💖", heart);
/// ```
#[inline]
#[stable(feature = "str_mut_extras", since = "1.20.0")]
pub unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
// SAFETY: the caller must guarantee that the bytes `v`
// are valid UTF-8, thus the cast to `*mut str` is safe.
// Also, the pointer dereference is safe because that pointer
// comes from a reference which is guaranteed to be valid for writes.
unsafe { &mut *(v as *mut [u8] as *mut str) }
}
| 29.398964 | 84 | 0.627423 |
1ef4728faf076206f0dc3923f030fd8fb061d897
| 16,250 |
//! Implement thread-local storage.
use std::collections::BTreeMap;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashMapEntry;
use log::trace;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
use rustc_target::abi::{Size, HasDataLayout};
use crate::{
HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag, ThreadId,
ThreadsEvalContextExt,
};
pub type TlsKey = u128;
#[derive(Clone, Debug)]
pub struct TlsEntry<'tcx> {
/// The data for this key. None is used to represent NULL.
/// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.)
data: BTreeMap<ThreadId, Scalar<Tag>>,
dtor: Option<ty::Instance<'tcx>>,
}
#[derive(Clone, Debug)]
struct RunningDtorsState {
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
/// have not tried to retrieve a TLS destructor yet or that we already tried
/// all keys.
last_dtor_key: Option<TlsKey>,
}
#[derive(Debug)]
pub struct TlsData<'tcx> {
/// The Key to use for the next thread-local allocation.
next_key: TlsKey,
/// pthreads-style thread-local storage.
keys: BTreeMap<TlsKey, TlsEntry<'tcx>>,
/// A single per thread destructor of the thread local storage (that's how
/// things work on macOS) with a data argument.
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Tag>)>,
/// State for currently running TLS dtors. If this map contains a key for a
/// specific thread, it means that we are in the "destruct" phase, during
/// which some operations are UB.
dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
}
impl<'tcx> Default for TlsData<'tcx> {
fn default() -> Self {
TlsData {
next_key: 1, // start with 1 as we must not use 0 on Windows
keys: Default::default(),
macos_thread_dtors: Default::default(),
dtors_running: Default::default(),
}
}
}
impl<'tcx> TlsData<'tcx> {
/// Generate a new TLS key with the given destructor.
/// `max_size` determines the integer size the key has to fit in.
pub fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>, max_size: Size) -> InterpResult<'tcx, TlsKey> {
let new_key = self.next_key;
self.next_key += 1;
self.keys.insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap_none();
trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits() as u128) {
throw_unsup_format!("we ran out of TLS key space");
}
Ok(new_key)
}
pub fn delete_tls_key(&mut self, key: TlsKey) -> InterpResult<'tcx> {
match self.keys.remove(&key) {
Some(_) => {
trace!("TLS key {} removed", key);
Ok(())
}
None => throw_ub_format!("removing a non-existig TLS key: {}", key),
}
}
pub fn load_tls(
&self,
key: TlsKey,
thread_id: ThreadId,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Scalar<Tag>> {
match self.keys.get(&key) {
Some(TlsEntry { data, .. }) => {
let value = data.get(&thread_id).copied();
trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value);
Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into()))
}
None => throw_ub_format!("loading from a non-existing TLS key: {}", key),
}
}
pub fn store_tls(
&mut self,
key: TlsKey,
thread_id: ThreadId,
new_data: Option<Scalar<Tag>>
) -> InterpResult<'tcx> {
match self.keys.get_mut(&key) {
Some(TlsEntry { data, .. }) => {
match new_data {
Some(scalar) => {
trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, scalar);
data.insert(thread_id, scalar);
}
None => {
trace!("TLS key {} for thread {:?} removed", key, thread_id);
data.remove(&thread_id);
}
}
Ok(())
}
None => throw_ub_format!("storing to a non-existing TLS key: {}", key),
}
}
/// Set the thread wide destructor of the thread local storage for the given
/// thread. This function is used to implement `_tlv_atexit` shim on MacOS.
///
/// Thread wide dtors are available only on MacOS. There is one destructor
/// per thread as can be guessed from the following comment in the
/// [`_tlv_atexit`
/// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389):
///
/// // NOTE: this does not need locks because it only operates on current thread data
pub fn set_macos_thread_dtor(
&mut self,
thread: ThreadId,
dtor: ty::Instance<'tcx>,
data: Scalar<Tag>
) -> InterpResult<'tcx> {
if self.dtors_running.contains_key(&thread) {
// UB, according to libstd docs.
throw_ub_format!("setting thread's local storage destructor while destructors are already running");
}
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported");
}
Ok(())
}
/// Returns a dtor, its argument and its index, if one is supposed to run.
/// `key` is the last dtors that was run; we return the *next* one after that.
///
/// An optional destructor function may be associated with each key value.
/// At thread exit, if a key value has a non-NULL destructor pointer,
/// and the thread has a non-NULL value associated with that key,
/// the value of the key is set to NULL, and then the function pointed
/// to is called with the previously associated value as its sole argument.
/// The order of destructor calls is unspecified if more than one destructor
/// exists for a thread when it exits.
///
/// If, after all the destructors have been called for all non-NULL values
/// with associated destructors, there are still some non-NULL values with
/// associated destructors, then the process is repeated.
/// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor
/// calls for outstanding non-NULL values, there are still some non-NULL values
/// with associated destructors, implementations may stop calling destructors,
/// or they may continue calling destructors until no non-NULL values with
/// associated destructors exist, even though this might result in an infinite loop.
fn fetch_tls_dtor(
&mut self,
key: Option<TlsKey>,
thread_id: ThreadId,
) -> Option<(ty::Instance<'tcx>, Scalar<Tag>, TlsKey)> {
use std::collections::Bound::*;
let thread_local = &mut self.keys;
let start = match key {
Some(key) => Excluded(key),
None => Unbounded,
};
for (&key, TlsEntry { data, dtor }) in
thread_local.range_mut((start, Unbounded))
{
match data.entry(thread_id) {
BTreeEntry::Occupied(entry) => {
if let Some(dtor) = dtor {
// Set TLS data to NULL, and call dtor with old value.
let data_scalar = entry.remove();
let ret = Some((*dtor, data_scalar, key));
return ret;
}
}
BTreeEntry::Vacant(_) => {}
}
}
None
}
/// Set that dtors are running for `thread`. It is guaranteed not to change
/// the existing values stored in `dtors_running` for this thread. Returns
/// `true` if dtors for `thread` are already running.
fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
match self.dtors_running.entry(thread) {
HashMapEntry::Occupied(_) => true,
HashMapEntry::Vacant(entry) => {
// We cannot just do `self.dtors_running.insert` because that
// would overwrite `last_dtor_key` with `None`.
entry.insert(RunningDtorsState { last_dtor_key: None });
false
}
}
}
/// Delete all TLS entries for the given thread. This function should be
/// called after all TLS destructors have already finished.
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
for TlsEntry { data, .. } in self.keys.values_mut() {
data.remove(&thread_id);
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Schedule TLS destructors for the main thread on Windows. The
/// implementation assumes that we do not support concurrency on Windows
/// yet.
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread()?;
assert_eq!(this.get_total_thread_count()?, 1, "concurrency on Windows not supported");
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
// (that would be basically https://github.com/rust-lang/miri/issues/450),
// we specifically look up the static in libstd that we know is placed
// in that section.
let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?;
let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?;
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?;
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
this.call_function(
thread_callback,
&[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
this.enable_thread(active_thread)?;
Ok(())
}
/// Schedule the MacOS thread destructor of the thread local storage to be
/// executed. Returns `true` if scheduled.
///
/// Note: It is safe to call this function also on other Unixes.
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
let thread_id = this.get_active_thread()?;
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
this.call_function(
instance,
&[data.into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
// Enable the thread so that it steps through the destructor which
// we just scheduled. Since we deleted the destructor, it is
// guaranteed that we will schedule it again. The `dtors_running`
// flag will prevent the code from adding the destructor again.
this.enable_thread(thread_id)?;
Ok(true)
} else {
Ok(false)
}
}
/// Schedule a pthread TLS destructor. Returns `true` if found
/// a destructor to schedule, and `false` otherwise.
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread()?;
assert!(this.has_terminated(active_thread)?, "running TLS dtors for non-terminated thread");
// Fetch next dtor after `key`.
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone();
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => {
this.machine.tls.fetch_tls_dtor(None, active_thread)
}
};
if let Some((instance, ptr, key)) = dtor {
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = Some(key);
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!");
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
this.call_function(
instance,
&[ptr.into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
this.enable_thread(active_thread)?;
return Ok(true);
}
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
Ok(false)
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Schedule an active thread's TLS destructor to run on the active thread.
/// Note that this function does not run the destructors itself, it just
/// schedules them one by one each time it is called and reenables the
/// thread so that it can be executed normally by the main execution loop.
///
/// FIXME: we do not support yet deallocation of thread local statics.
/// Issue: https://github.com/rust-lang/miri/issues/1369
///
/// Note: we consistently run TLS destructors for all threads, including the
/// main thread. However, it is not clear that we should run the TLS
/// destructors for the main thread. See issue:
/// https://github.com/rust-lang/rust/issues/28129.
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread()?;
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
// This is the first time we got asked to schedule a destructor. The
// Windows schedule destructor function must be called exactly once,
// this is why it is in this block.
if this.tcx.sess.target.target.target_os == "windows" {
// On Windows, we signal that the thread quit by starting the
// relevant function, reenabling the thread, and going back to
// the scheduler.
this.schedule_windows_tls_dtors()?;
return Ok(())
}
}
// The macOS thread wide destructor runs "before any TLS slots get
// freed", so do that first.
if this.schedule_macos_tls_dtor()? {
// We have scheduled a MacOS dtor to run on the thread. Execute it
// to completion and come back here. Scheduling a destructor
// destroys it, so we will not enter this branch again.
return Ok(())
}
if this.schedule_next_pthread_tls_dtor()? {
// We have scheduled a pthread destructor and removed it from the
// destructors list. Run it to completion and come back here.
return Ok(())
}
// All dtors done!
this.machine.tls.delete_all_thread_tls(active_thread);
Ok(())
}
}
| 43.103448 | 144 | 0.6064 |
33e9b4dfd2547665eaaf635d75969fee6238aa92
| 33,490 |
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Mining Stratum Server
use futures::channel::{mpsc, oneshot};
use futures::pin_mut;
use futures::{SinkExt, StreamExt, TryStreamExt};
use tokio::net::TcpListener;
use tokio::runtime::Runtime;
use tokio_util::codec::{Framed, LinesCodec};
use crate::util::RwLock;
use chrono::prelude::Utc;
use serde;
use serde_json;
use serde_json::Value;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::Arc;
use std::time::Duration;
use std::{cmp, thread};
use super::stratum_data::WorkersList;
use crate::chain::{self, SyncState};
use crate::common::stats::StratumStats;
use crate::common::types::StratumServerConfig;
use crate::core::core::hash::Hashed;
use crate::core::core::Block;
use crate::core::stratum::connections;
use crate::core::{pow, ser};
use crate::keychain;
use crate::mining::mine_block;
use crate::util;
use crate::util::ToHex;
use crate::{ServerTxPool, ServerVerifierCache};
use std::cmp::min;
// ----------------------------------------
// http://www.jsonrpc.org/specification
// RPC Methods
/// Represents a compliant JSON RPC 2.0 id.
/// Valid id: Integer, String.
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(untagged)]
enum JsonId {
IntId(u32),
StrId(String),
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct RpcRequest {
id: JsonId,
jsonrpc: String,
method: String,
params: Option<Value>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct RpcResponse {
id: JsonId,
jsonrpc: String,
method: String,
result: Option<Value>,
error: Option<Value>,
}
#[derive(Serialize, Deserialize, Debug)]
struct RpcError {
code: i32,
message: String,
}
impl RpcError {
pub fn internal_error() -> Self {
RpcError {
code: 32603,
message: "Internal error".to_owned(),
}
}
pub fn node_is_syncing() -> Self {
RpcError {
code: -32000,
message: "Node is syncing - Please wait".to_owned(),
}
}
pub fn method_not_found() -> Self {
RpcError {
code: -32601,
message: "Method not found".to_owned(),
}
}
pub fn too_late() -> Self {
RpcError {
code: -32503,
message: "Solution submitted too late".to_string(),
}
}
pub fn cannot_validate() -> Self {
RpcError {
code: -32502,
message: "Failed to validate solution".to_string(),
}
}
pub fn too_low_difficulty() -> Self {
RpcError {
code: -32501,
message: "Share rejected due to low difficulty".to_string(),
}
}
pub fn invalid_request() -> Self {
RpcError {
code: -32600,
message: "Invalid Request".to_string(),
}
}
}
impl From<RpcError> for Value {
fn from(e: RpcError) -> Self {
serde_json::to_value(e).unwrap_or(Value::Null)
}
}
impl<T> From<T> for RpcError
where
T: std::error::Error,
{
fn from(e: T) -> Self {
error!("Received unhandled error: {}", e);
RpcError::internal_error()
}
}
#[derive(Serialize, Deserialize, Debug)]
struct LoginParams {
login: String,
pass: String,
agent: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct SubmitParams {
height: u64,
job_id: u64,
nonce: u64,
edge_bits: u32,
pow: Vec<u64>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct JobTemplate {
height: u64,
job_id: u64,
difficulty: u64,
pre_pow: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WorkerStatus {
id: String,
height: u64,
difficulty: u64,
accepted: u64,
rejected: u64,
stale: u64,
}
struct State {
current_block_versions: Vec<Block>,
// to prevent the wallet from generating a new HD key derivation for each
// iteration, we keep the returned derivation to provide it back when
// nothing has changed. We only want to create a key_id for each new block,
// and reuse it when we rebuild the current block to add new tx.
current_key_id: Option<keychain::Identifier>,
current_difficulty: u64,
minimum_share_difficulty: u64,
}
impl State {
pub fn new(minimum_share_difficulty: u64) -> Self {
let blocks = vec![Block::default()];
State {
current_block_versions: blocks,
current_key_id: None,
current_difficulty: <u64>::max_value(),
minimum_share_difficulty: minimum_share_difficulty,
}
}
}
struct Handler {
id: String,
workers: Arc<WorkersList>,
sync_state: Arc<SyncState>,
chain: Arc<chain::Chain>,
current_state: Arc<RwLock<State>>,
ip_pool: Arc<connections::StratumIpPool>,
worker_connections: Arc<AtomicI32>,
config: StratumServerConfig,
}
impl Handler {
pub fn from_stratum(stratum: &StratumServer) -> Self {
assert!(
stratum.config.ip_pool_ban_history_s > 10,
"Stratum ip_pool_ban_history_s value must has reasonable value"
);
Handler {
id: stratum.id.clone(),
workers: Arc::new(WorkersList::new(stratum.stratum_stats.clone())),
sync_state: stratum.sync_state.clone(),
chain: stratum.chain.clone(),
current_state: Arc::new(RwLock::new(State::new(
stratum.config.minimum_share_difficulty,
))),
ip_pool: stratum.ip_pool.clone(),
worker_connections: stratum.worker_connections.clone(),
config: stratum.config.clone(),
}
}
fn handle_rpc_requests(&self, request: RpcRequest, worker_id: usize, ip: &String) -> String {
self.workers.last_seen(worker_id);
// Call the handler function for requested method
let response = match request.method.as_str() {
"login" => match self.handle_login(request.params, &worker_id) {
Ok(r) => {
self.ip_pool.report_ok_login(ip);
Ok(r)
}
Err(e) => {
self.ip_pool.report_fail_login(ip);
Err(e)
}
},
"submit" => {
let res = self.handle_submit(request.params, worker_id);
// this key_id has been used now, reset
let res = match res {
Ok(ok) => {
self.ip_pool.report_ok_shares(ip);
Ok(ok)
}
Err(rpc_err) => {
if rpc_err.code != RpcError::too_late().code {
self.ip_pool.report_fail_noise(ip);
};
Err(rpc_err)
}
};
if let Ok((_, true)) = res {
self.current_state.write().current_key_id = None;
}
res.map(|(v, _)| v)
}
"keepalive" => self.handle_keepalive(),
"getjobtemplate" => {
if self.sync_state.is_syncing() {
Err(RpcError::node_is_syncing())
} else {
self.handle_getjobtemplate()
}
}
"status" => self.handle_status(worker_id),
_ => {
self.ip_pool.report_fail_noise(ip);
// Called undefined method
Err(RpcError::method_not_found())
}
};
// Package the reply as RpcResponse json
let resp = match response {
Err(rpc_error) => RpcResponse {
id: request.id,
jsonrpc: String::from("2.0"),
method: request.method,
result: None,
error: Some(rpc_error.into()),
},
Ok(response) => RpcResponse {
id: request.id,
jsonrpc: String::from("2.0"),
method: request.method,
result: Some(response),
error: None,
},
};
serde_json::to_string(&resp).unwrap_or("{}".to_string())
}
fn handle_login(&self, params: Option<Value>, worker_id: &usize) -> Result<Value, RpcError> {
// Note !!!! self.workers.login HAS to be there.
let params: LoginParams = parse_params(params)?;
if !self.workers.login(worker_id, params.login, params.agent) {
return Ok("false".into()); // you migth change that response, Possible solution Error 'Unauthorized worker'
}
return Ok("ok".into());
}
// Handle KEEPALIVE message
fn handle_keepalive(&self) -> Result<Value, RpcError> {
return Ok("ok".into());
}
fn handle_status(&self, worker_id: usize) -> Result<Value, RpcError> {
// Return worker status in json for use by a dashboard or healthcheck.
let stats = self
.workers
.get_stats(worker_id)
.ok_or(RpcError::internal_error())?;
let status = WorkerStatus {
id: stats.id.clone(),
height: self
.current_state
.read()
.current_block_versions
.last()
.unwrap()
.header
.height,
difficulty: stats.pow_difficulty,
accepted: stats.num_accepted,
rejected: stats.num_rejected,
stale: stats.num_stale,
};
let response = serde_json::to_value(&status).unwrap_or(Value::Null);
return Ok(response);
}
// Handle GETJOBTEMPLATE message
fn handle_getjobtemplate(&self) -> Result<Value, RpcError> {
// Build a JobTemplate from a BlockHeader and return JSON
let job_template = self.build_block_template();
let response = serde_json::to_value(&job_template).unwrap_or(Value::Null);
debug!(
"(Server ID: {}) sending block {} with id {} to single worker",
self.id, job_template.height, job_template.job_id,
);
return Ok(response);
}
// Build and return a JobTemplate for mining the current block
fn build_block_template(&self) -> JobTemplate {
let (bh, job_id, difficulty) = {
let state = self.current_state.read();
(
state.current_block_versions.last().unwrap().header.clone(),
state.current_block_versions.len() - 1,
state.minimum_share_difficulty,
)
};
// Serialize the block header into pre and post nonce strings
let mut header_buf = vec![];
{
let mut writer = ser::BinWriter::default(&mut header_buf);
bh.write_pre_pow(&mut writer).unwrap();
bh.pow.write_pre_pow(&mut writer).unwrap();
}
let pre_pow = util::to_hex(&header_buf);
let job_template = JobTemplate {
height: bh.height,
job_id: job_id as u64,
difficulty,
pre_pow,
};
return job_template;
}
// Handle SUBMIT message
// params contains a solved block header
// We accept and log valid shares of all difficulty above configured minimum
// Accepted shares that are full solutions will also be submitted to the
// network
fn handle_submit(
&self,
params: Option<Value>,
worker_id: usize,
) -> Result<(Value, bool), RpcError> {
// Validate parameters
let params: SubmitParams = parse_params(params)?;
let (b, header_height, minimum_share_difficulty, current_difficulty) = {
let state = self.current_state.read();
(
state
.current_block_versions
.get(params.job_id as usize)
.map(|b| b.clone()),
state.current_block_versions.last().unwrap().header.height,
state.minimum_share_difficulty,
state.current_difficulty,
)
};
// Find the correct version of the block to match this header
if params.height != header_height || b.is_none() {
// Return error status
error!(
"(Server ID: {}) Share at height {}, edge_bits {}, nonce {}, job_id {} submitted too late",
self.id, params.height, params.edge_bits, params.nonce, params.job_id,
);
self.workers.update_stats(worker_id, |ws| ws.num_stale += 1);
return Err(RpcError::too_late());
}
let share_difficulty: u64;
let mut share_is_block = false;
let mut b: Block = b.unwrap().clone();
// Reconstruct the blocks header with this nonce and pow added
b.header.pow.proof.edge_bits = params.edge_bits as u8;
b.header.pow.nonce = params.nonce;
b.header.pow.proof.nonces = params.pow;
if !b.header.pow.is_primary() && !b.header.pow.is_secondary() {
// Return error status
error!(
"(Server ID: {}) Failed to validate solution at height {}, hash {}, edge_bits {}, nonce {}, job_id {}: cuckoo size too small",
self.id, params.height, b.hash(), params.edge_bits, params.nonce, params.job_id,
);
self.workers
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
return Err(RpcError::cannot_validate());
}
// Get share difficulty
share_difficulty = b.header.pow.to_difficulty(b.header.height).to_num();
// If the difficulty is too low its an error
if (b.header.pow.is_primary() && share_difficulty < minimum_share_difficulty * 7_936)
|| b.header.pow.is_secondary()
&& share_difficulty
< minimum_share_difficulty * b.header.pow.secondary_scaling as u64
{
// Return error status
error!(
"(Server ID: {}) Share at height {}, hash {}, edge_bits {}, nonce {}, job_id {} rejected due to low difficulty: {}/{}",
self.id, params.height, b.hash(), params.edge_bits, params.nonce, params.job_id, share_difficulty, minimum_share_difficulty,
);
self.workers
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
return Err(RpcError::too_low_difficulty());
}
// If the difficulty is high enough, submit it (which also validates it)
if share_difficulty >= current_difficulty {
// This is a full solution, submit it to the network
let res = self.chain.process_block(b.clone(), chain::Options::MINE);
if let Err(e) = res {
// Return error status
error!(
"(Server ID: {}) Failed to validate solution at height {}, hash {}, edge_bits {}, nonce {}, job_id {}, {}: {}",
self.id,
params.height,
b.hash(),
params.edge_bits,
params.nonce,
params.job_id,
e,
e.backtrace().unwrap(),
);
self.workers
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
return Err(RpcError::cannot_validate());
}
share_is_block = true;
self.workers
.update_stats(worker_id, |worker_stats| worker_stats.num_blocks_found += 1);
// Log message to make it obvious we found a block
let stats = self
.workers
.get_stats(worker_id)
.ok_or(RpcError::internal_error())?;
warn!(
"(Server ID: {}) Solution Found for block {}, hash {} - Yay!!! Worker ID: {}, blocks found: {}, shares: {}",
self.id, params.height,
b.hash(),
stats.id,
stats.num_blocks_found,
stats.num_accepted,
);
} else {
// Do some validation but dont submit
let res = pow::verify_size(&b.header);
if res.is_err() {
// Return error status
error!(
"(Server ID: {}) Failed to validate share at height {}, hash {}, edge_bits {}, nonce {}, job_id {}. {:?}",
self.id,
params.height,
b.hash(),
params.edge_bits,
b.header.pow.nonce,
params.job_id,
res,
);
self.workers
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
return Err(RpcError::cannot_validate());
}
}
// Log this as a valid share
if let Some(worker) = self.workers.get_worker(&worker_id) {
let submitted_by = match worker.login {
None => worker.id.to_string(),
Some(login) => login.clone(),
};
info!(
"(Server ID: {}) Got share at height {}, hash {}, edge_bits {}, nonce {}, job_id {}, difficulty {}/{}, submitted by {}",
self.id,
b.header.height,
b.hash(),
b.header.pow.proof.edge_bits,
b.header.pow.nonce,
params.job_id,
share_difficulty,
current_difficulty,
submitted_by,
);
}
self.workers
.update_stats(worker_id, |worker_stats| worker_stats.num_accepted += 1);
let submit_response = if share_is_block {
format!("blockfound - {}", b.hash().to_hex())
} else {
"ok".to_string()
};
return Ok((
serde_json::to_value(submit_response).unwrap_or(Value::Null),
share_is_block,
));
} // handle submit a solution
fn broadcast_job(&self) {
debug!("broadcast job");
// Package new block into RpcRequest
let job_template = self.build_block_template();
let job_template_json = serde_json::to_string(&job_template).unwrap_or("{}".to_string());
// Issue #1159 - use a serde_json Value type to avoid extra quoting
let job_template_value: Value =
serde_json::from_str(&job_template_json).unwrap_or(Value::Null);
let job_request = RpcRequest {
id: JsonId::StrId(String::from("Stratum")),
jsonrpc: String::from("2.0"),
method: String::from("job"),
params: Some(job_template_value),
};
let job_request_json = serde_json::to_string(&job_request).unwrap_or("{}".to_string());
debug!(
"(Server ID: {}) sending block {} with id {} to stratum clients",
self.id, job_template.height, job_template.job_id,
);
self.workers.broadcast(job_request_json);
}
pub fn run(
&self,
config: &StratumServerConfig,
tx_pool: &ServerTxPool,
verifier_cache: ServerVerifierCache,
) {
debug!("Run main loop");
let mut deadline: i64 = 0;
let mut head = self.chain.head().unwrap();
let mut current_hash = head.prev_block_h;
let worker_checking_period = if self.config.worker_login_timeout_ms <= 0 {
1000
} else {
min(1000, self.config.worker_login_timeout_ms)
};
let mut next_worker_checking = Utc::now().timestamp_millis() + worker_checking_period;
let mut next_ip_pool_checking =
Utc::now().timestamp_millis() + self.config.ip_pool_ban_history_s * 1000 / 10;
loop {
// get the latest chain state
head = self.chain.head().unwrap();
let latest_hash = head.last_block_h;
// Build a new block if:
// There is a new block on the chain
// or We are rebuilding the current one to include new transactions
// and there is at least one worker connected
if (current_hash != latest_hash || Utc::now().timestamp() >= deadline)
&& self.workers.count() > 0
{
{
debug!("resend updated block");
let wallet_listener_url = if !config.burn_reward {
Some(config.wallet_listener_url.clone())
} else {
None
};
// If this is a new block, clear the current_block version history
let clear_blocks = current_hash != latest_hash;
// Build the new block (version)
let (new_block, block_fees) = mine_block::get_block(
&self.chain,
tx_pool,
verifier_cache.clone(),
self.current_state.read().current_key_id.clone(),
wallet_listener_url,
);
{
let mut state = self.current_state.write();
state.current_difficulty =
(new_block.header.total_difficulty() - head.total_difficulty).to_num();
state.current_key_id = block_fees.key_id();
current_hash = latest_hash;
// set the minimum acceptable share difficulty for this block
state.minimum_share_difficulty =
cmp::min(config.minimum_share_difficulty, state.current_difficulty);
}
// set a new deadline for rebuilding with fresh transactions
deadline = Utc::now().timestamp() + config.attempt_time_per_block as i64;
self.workers.update_block_height(new_block.header.height);
self.workers
.update_network_difficulty(self.current_state.read().current_difficulty);
{
let mut state = self.current_state.write();
if clear_blocks {
state.current_block_versions.clear();
}
state.current_block_versions.push(new_block);
}
// Send this job to all connected workers
}
self.broadcast_job();
}
// Check workers login statuses and do IP pool maintaince
let cur_time = Utc::now().timestamp_millis();
if cur_time > next_worker_checking {
next_worker_checking = cur_time + worker_checking_period;
if config.ip_tracking {
let mut banned_ips = self.ip_pool.get_banned_ips();
let mut extra_con = self.worker_connections.load(Ordering::Relaxed)
- self.config.workers_connection_limit;
if extra_con > 0 {
// we need to limit slash some connections.
// Let's do that with least profitable IP adresses
let mut ip_prof = self.ip_pool.get_ip_profitability();
// Last to del first
ip_prof.sort_by(|a, b| b.1.cmp(&a.1));
while extra_con > 0 && !ip_prof.is_empty() {
let prof = ip_prof.pop().unwrap();
warn!("Stratum need to clean {} connections. Will retire {} workers from IP {}", extra_con, prof.2, prof.0);
extra_con -= prof.2;
banned_ips.insert(prof.0);
}
}
let login_deadline = if self.config.worker_login_timeout_ms <= 0 {
0
} else {
cur_time - self.config.worker_login_timeout_ms
};
// we are working with a snapshot. Worker can be changed during the workflow.
for mut w in self.workers.get_workers_list() {
if self.config.ip_white_list.contains(&w.ip) {
continue; // skipping all while listed workers. They can do whatever that want.
}
if w.login.is_none() && w.create_time < login_deadline {
// Don't need to report login issue, will be processed at exit
warn!(
"Worker id:{} ip:{} banned because of login timeout",
w.id, w.ip
);
w.trigger_kill_switch();
} else if banned_ips.contains(&w.ip) {
// Cleaning all workers from the ban
warn!(
"Worker id:{} ip:{} banned because IP is in the kick out list",
w.id, w.ip
);
// We don't want double ban just connected workers. Assume they are authenticated
w.authenticated = true;
self.workers.update_worker(&w);
w.trigger_kill_switch();
}
}
}
} else if cur_time > next_ip_pool_checking {
next_ip_pool_checking = cur_time + self.config.ip_pool_ban_history_s * 1000 / 10;
self.ip_pool
.retire_old_events(cur_time - self.config.ip_pool_ban_history_s * 1000);
}
// sleep before restarting loop
thread::sleep(Duration::from_millis(5));
} // Main Loop
}
}
// ----------------------------------------
// Worker Factory Thread Function
// Returned runtime must be kept for a server lifetime
fn accept_connections(listen_addr: SocketAddr, handler: Arc<Handler>) {
info!("Start tokio stratum server");
if !handler.config.ip_white_list.is_empty() {
warn!(
"Stratum miners IP white list: {:?}",
handler.config.ip_white_list
);
}
if !handler.config.ip_black_list.is_empty() {
warn!(
"Stratum miners IP black list: {:?}",
handler.config.ip_black_list
);
}
if handler.config.ip_tracking {
warn!("Stratum miners IP tracking is ACTIVE. Parameters - connection_limit:{} connection_pace(ms):{} ban_action_limit:{} shares_weight:{} login_timeout(ms):{} ban_history(ms):{}",
handler.config.workers_connection_limit,
handler.config.connection_pace_ms,
handler.config.ban_action_limit,
handler.config.shares_weight,
handler.config.worker_login_timeout_ms,
handler.config.ip_pool_ban_history_s,
);
} else {
warn!("Stratum miners IP tracking is disabled. You might enable it if you are running public mining pool and expecting any attacks.");
}
let task = async move {
let mut listener = match TcpListener::bind(&listen_addr).await {
Ok(listener) => listener,
Err(e) => {
error!(
"Stratum: Failed to bind to listen address {}, {}",
listen_addr, e
);
return;
}
};
let server = listener
.incoming()
.filter_map(|s| async { s.map_err(|e| error!("accept error = {:?}", e)).ok() })
.for_each(move |socket| {
let peer_addr = socket
.peer_addr()
.unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 1234));
let ip = peer_addr.ip().to_string();
let handler = handler.clone();
async move {
let config = &handler.config;
let accepting_connection = if config.ip_white_list.contains(&ip) {
info!(
"Stratum accepting new connection for {}, it is in white list",
ip
);
true
} else if config.ip_black_list.contains(&ip) {
warn!(
"Stratum rejecting new connection for {}, it is in black list",
ip
);
false
} else if config.ip_tracking && handler.ip_pool.is_banned(&ip, true) {
warn!("Rejecting connection from ip {} because ip_tracking is active and that ip is banned.", ip);
false
} else {
info!("Stratum accepting new connection for {}", ip);
true
};
handler.worker_connections.fetch_add(1, Ordering::Relaxed);
let ip_pool = handler.ip_pool.clone();
// Worker IO channels
let (tx, mut rx) = mpsc::unbounded();
// Worker killer switch
let (kill_switch, kill_switch_receiver) = oneshot::channel::<()>();
let worker_id = handler.workers.add_worker(ip.clone(), tx, kill_switch);
info!("Worker {} connected", worker_id);
ip_pool.add_worker(&ip);
let framed = Framed::new(socket, LinesCodec::new());
let (mut writer, mut reader) = framed.split();
let h = handler.clone();
let workers = h.workers.clone();
let ip_clone = ip.clone();
let ip_clone2 = ip.clone();
let ip_pool_clone2 = ip_pool.clone();
let ip_pool_clone3 = ip_pool.clone();
let read = async move {
if accepting_connection {
while let Some(line) = reader.try_next().await.map_err(|e| {
ip_pool_clone2.report_fail_noise(&ip_clone2);
error!("error processing request to stratum, {}", e)
})? {
if !line.is_empty() {
debug!("get request: {}", line);
let request = serde_json::from_str(&line).map_err(|e| {
ip_pool_clone3.report_fail_noise(&ip_clone2);
error!("error serializing line: {}", e)
})?;
let resp = h.handle_rpc_requests(request, worker_id, &ip_clone);
workers.send_to(&worker_id, resp);
}
}
}
Result::<_, ()>::Ok(())
};
let write = async move {
if accepting_connection {
while let Some(line) = rx.next().await {
// No need to add line separator for the client, because
// Frames with LinesCodec does that.
writer.send(line).await.map_err(|e| {
error!("stratum cannot send data to worker, {}", e)
})?;
}
}
Result::<_, ()>::Ok(())
};
let task = async move {
pin_mut!(read, write);
let rw = futures::future::select(read, write);
futures::future::select(rw, kill_switch_receiver).await;
handler.workers.remove_worker(worker_id);
info!("Worker {} disconnected", worker_id);
};
tokio::spawn(task);
}
});
server.await
};
let mut rt = Runtime::new().unwrap();
rt.block_on(task);
}
// ----------------------------------------
// Grin Stratum Server
pub struct StratumServer {
id: String,
config: StratumServerConfig,
chain: Arc<chain::Chain>,
pub tx_pool: ServerTxPool,
verifier_cache: ServerVerifierCache,
sync_state: Arc<SyncState>,
stratum_stats: Arc<StratumStats>,
ip_pool: Arc<connections::StratumIpPool>,
worker_connections: Arc<AtomicI32>,
}
impl StratumServer {
/// Creates a new Stratum Server.
pub fn new(
config: StratumServerConfig,
chain: Arc<chain::Chain>,
tx_pool: ServerTxPool,
verifier_cache: ServerVerifierCache,
stratum_stats: Arc<StratumStats>,
ip_pool: Arc<connections::StratumIpPool>,
) -> StratumServer {
StratumServer {
id: String::from("0"),
config,
chain,
tx_pool,
verifier_cache,
sync_state: Arc::new(SyncState::new()),
stratum_stats: stratum_stats,
ip_pool,
worker_connections: Arc::new(AtomicI32::new(0)),
}
}
/// "main()" - Starts the stratum-server. Creates a thread to Listens for
/// a connection, then enters a loop, building a new block on top of the
/// existing chain anytime required and sending that to the connected
/// stratum miner, proxy, or pool, and accepts full solutions to
/// be submitted.
pub fn run_loop(&mut self, edge_bits: u32, proof_size: usize, sync_state: Arc<SyncState>) {
info!(
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}, config: {:?}",
self.id, edge_bits, proof_size, self.config
);
self.sync_state = sync_state;
let listen_addr = self
.config
.stratum_server_addr
.clone()
.unwrap()
.parse()
.expect("Stratum: Incorrect address ");
let handler = Arc::new(Handler::from_stratum(&self));
let h = handler.clone();
let _listener_th = thread::spawn(move || {
accept_connections(listen_addr, h);
});
// We have started
self.stratum_stats.is_running.store(true, Ordering::Relaxed);
self.stratum_stats
.edge_bits
.store(edge_bits as u16, Ordering::Relaxed);
warn!(
"Stratum server started on {}",
self.config.stratum_server_addr.clone().unwrap()
);
// Initial Loop. Waiting node complete syncing
while self.sync_state.is_syncing() {
thread::sleep(Duration::from_millis(50));
}
handler.run(&self.config, &self.tx_pool, self.verifier_cache.clone());
} // fn run_loop()
} // StratumServer
// Utility function to parse a JSON RPC parameter object, returning a proper
// error if things go wrong.
fn parse_params<T>(params: Option<Value>) -> Result<T, RpcError>
where
for<'de> T: serde::Deserialize<'de>,
{
params
.and_then(|v| serde_json::from_value(v).ok())
.ok_or_else(RpcError::invalid_request)
}
#[cfg(test)]
mod tests {
use super::*;
/// Tests deserializing an `RpcRequest` given a String as the id.
#[test]
fn test_request_deserialize_str() {
let expected = RpcRequest {
id: JsonId::StrId(String::from("1")),
method: String::from("login"),
jsonrpc: String::from("2.0"),
params: None,
};
let json = r#"{"id":"1","method":"login","jsonrpc":"2.0","params":null}"#;
let serialized: RpcRequest = serde_json::from_str(json).unwrap();
assert_eq!(expected, serialized);
}
/// Tests serializing an `RpcRequest` given a String as the id.
/// The extra step of deserializing again is due to associative structures not maintaining order.
#[test]
fn test_request_serialize_str() {
let expected = r#"{"id":"1","method":"login","jsonrpc":"2.0","params":null}"#;
let rpc = RpcRequest {
id: JsonId::StrId(String::from("1")),
method: String::from("login"),
jsonrpc: String::from("2.0"),
params: None,
};
let json_actual = serde_json::to_string(&rpc).unwrap();
let expected_deserialized: RpcRequest = serde_json::from_str(expected).unwrap();
let actual_deserialized: RpcRequest = serde_json::from_str(&json_actual).unwrap();
assert_eq!(expected_deserialized, actual_deserialized);
}
/// Tests deserializing an `RpcResponse` given a String as the id.
#[test]
fn test_response_deserialize_str() {
let expected = RpcResponse {
id: JsonId::StrId(String::from("1")),
method: String::from("login"),
jsonrpc: String::from("2.0"),
result: None,
error: None,
};
let json = r#"{"id":"1","method":"login","jsonrpc":"2.0","params":null}"#;
let serialized: RpcResponse = serde_json::from_str(json).unwrap();
assert_eq!(expected, serialized);
}
/// Tests serializing an `RpcResponse` given a String as the id.
/// The extra step of deserializing again is due to associative structures not maintaining order.
#[test]
fn test_response_serialize_str() {
let expected = r#"{"id":"1","method":"login","jsonrpc":"2.0","params":null}"#;
let rpc = RpcResponse {
id: JsonId::StrId(String::from("1")),
method: String::from("login"),
jsonrpc: String::from("2.0"),
result: None,
error: None,
};
let json_actual = serde_json::to_string(&rpc).unwrap();
let expected_deserialized: RpcResponse = serde_json::from_str(expected).unwrap();
let actual_deserialized: RpcResponse = serde_json::from_str(&json_actual).unwrap();
assert_eq!(expected_deserialized, actual_deserialized);
}
/// Tests deserializing an `RpcRequest` given an integer as the id.
#[test]
fn test_request_deserialize_int() {
let expected = RpcRequest {
id: JsonId::IntId(1),
method: String::from("login"),
jsonrpc: String::from("2.0"),
params: None,
};
let json = r#"{"id":1,"method":"login","jsonrpc":"2.0","params":null}"#;
let serialized: RpcRequest = serde_json::from_str(json).unwrap();
assert_eq!(expected, serialized);
}
/// Tests serializing an `RpcRequest` given an integer as the id.
/// The extra step of deserializing again is due to associative structures not maintaining order.
#[test]
fn test_request_serialize_int() {
let expected = r#"{"id":1,"method":"login","jsonrpc":"2.0","params":null}"#;
let rpc = RpcRequest {
id: JsonId::IntId(1),
method: String::from("login"),
jsonrpc: String::from("2.0"),
params: None,
};
let json_actual = serde_json::to_string(&rpc).unwrap();
let expected_deserialized: RpcRequest = serde_json::from_str(expected).unwrap();
let actual_deserialized: RpcRequest = serde_json::from_str(&json_actual).unwrap();
assert_eq!(expected_deserialized, actual_deserialized);
}
/// Tests deserializing an `RpcResponse` given an integer as the id.
#[test]
fn test_response_deserialize_int() {
let expected = RpcResponse {
id: JsonId::IntId(1),
method: String::from("login"),
jsonrpc: String::from("2.0"),
result: None,
error: None,
};
let json = r#"{"id":1,"method":"login","jsonrpc":"2.0","params":null}"#;
let serialized: RpcResponse = serde_json::from_str(json).unwrap();
assert_eq!(expected, serialized);
}
/// Tests serializing an `RpcResponse` given an integer as the id.
/// The extra step of deserializing again is due to associative structures not maintaining order.
#[test]
fn test_response_serialize_int() {
let expected = r#"{"id":1,"method":"login","jsonrpc":"2.0","params":null}"#;
let rpc = RpcResponse {
id: JsonId::IntId(1),
method: String::from("login"),
jsonrpc: String::from("2.0"),
result: None,
error: None,
};
let json_actual = serde_json::to_string(&rpc).unwrap();
let expected_deserialized: RpcResponse = serde_json::from_str(expected).unwrap();
let actual_deserialized: RpcResponse = serde_json::from_str(&json_actual).unwrap();
assert_eq!(expected_deserialized, actual_deserialized);
}
}
| 29.71606 | 181 | 0.66593 |
87dd204bbea81049614e5acfdf8af077571be34b
| 5,321 |
use crate::data::Data;
use crate::schema::{Config, Person};
use failure::{bail, Error};
use std::collections::{HashMap, HashSet};
#[derive(serde_derive::Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct BorsACL {
#[serde(default)]
review: bool,
#[serde(rename = "try", default)]
try_: bool,
}
impl Default for BorsACL {
fn default() -> Self {
BorsACL {
review: false,
try_: false,
}
}
}
impl BorsACL {
pub(crate) fn review(&self) -> bool {
self.review
}
pub(crate) fn try_(&self) -> bool {
self.try_
}
}
#[derive(serde_derive::Deserialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub(crate) struct Permissions {
#[serde(default)]
bors: HashMap<String, BorsACL>,
#[serde(default)]
crates_io_ops_bot: HashMap<String, bool>,
#[serde(flatten)]
booleans: HashMap<String, bool>,
}
impl Default for Permissions {
fn default() -> Self {
Permissions {
bors: HashMap::new(),
crates_io_ops_bot: HashMap::new(),
booleans: HashMap::new(),
}
}
}
impl Permissions {
pub(crate) fn bors(&self) -> &HashMap<String, BorsACL> {
&self.bors
}
pub(crate) fn booleans(&self) -> &HashMap<String, bool> {
&self.booleans
}
pub(crate) fn available(config: &Config) -> Vec<String> {
let mut result = Vec::new();
for boolean in config.permissions_bools() {
result.push(boolean.to_string());
}
for repo in config.permissions_bors_repos() {
result.push(format!("bors.{}.review", repo));
result.push(format!("bors.{}.try", repo));
}
for app in config.permissions_crates_io_ops_bot_apps() {
result.push(format!("crates-io-ops-bot.{}", app));
}
result
}
pub(crate) fn requires_discord(config: &Config) -> Vec<String> {
let mut result = Vec::new();
for app in config.permissions_crates_io_ops_bot_apps() {
result.push(format!("crates-io-ops-bot.{}", app));
}
result
}
pub(crate) fn has(&self, permission: &str) -> bool {
self.has_directly(permission) || self.has_indirectly(permission)
}
pub(crate) fn has_directly(&self, permission: &str) -> bool {
match permission.split('.').collect::<Vec<_>>().as_slice() {
[boolean] => self.booleans.get(*boolean).cloned(),
["bors", repo, "review"] => self.bors.get(*repo).map(|repo| repo.review),
["bors", repo, "try"] => self.bors.get(*repo).map(|repo| repo.try_),
["crates-io-ops-bot", app] => self.crates_io_ops_bot.get(*app).cloned(),
_ => None,
}
.unwrap_or(false)
}
pub fn has_indirectly(&self, permission: &str) -> bool {
match permission.split('.').collect::<Vec<_>>().as_slice() {
["bors", repo, "try"] => self.bors.get(*repo).map(|repo| repo.review),
_ => None,
}
.unwrap_or(false)
}
pub(crate) fn has_any(&self) -> bool {
for permission in self.booleans.values() {
if *permission {
return true;
}
}
for repo in self.bors.values() {
if repo.review || repo.try_ {
return true;
}
}
for app in self.crates_io_ops_bot.values() {
if *app {
return true;
}
}
false
}
pub(crate) fn validate(&self, what: String, config: &Config) -> Result<(), Error> {
for boolean in self.booleans.keys() {
if !config.permissions_bools().contains(boolean) {
bail!(
"unknown permission: {} (maybe add it to config.toml?)",
boolean
);
}
}
for (repo, perms) in self.bors.iter() {
if !config.permissions_bors_repos().contains(repo) {
bail!(
"unknown bors repository: {} (maybe add it to config.toml?)",
repo
);
}
if perms.try_ && perms.review {
bail!(
"{} has both the `bors.{}.review` and `bors.{}.try` permissions",
what,
repo,
repo,
);
}
}
for app in self.crates_io_ops_bot.keys() {
if !config.permissions_crates_io_ops_bot_apps().contains(app) {
bail!(
"unknown crates-io-ops-bot app: {} (maybe add it to config.toml?)",
app
);
}
}
Ok(())
}
}
pub(crate) fn allowed_people<'a>(
data: &'a Data,
permission: &str,
) -> Result<Vec<&'a Person>, Error> {
let mut members_with_perms = HashSet::new();
for team in data.teams() {
if team.permissions().has(permission) {
for member in team.members(&data)? {
members_with_perms.insert(member);
}
}
}
Ok(data
.people()
.filter(|p| members_with_perms.contains(p.github()) || p.permissions().has(permission))
.collect())
}
| 28.454545 | 95 | 0.512874 |
fb681d72f9ef1c5c9efe3c059ec86ea1ef0d85fb
| 1,104 |
use super::*;
use crate::sdk::std::collections::array;
use crate::test;
use crate::test::CommandValidation;
#[test]
fn common_functions() {
test::test_common_command_functions(create(""));
}
#[test]
fn run_no_args() {
test::run_script_and_error(vec![create("")], "out = array_get", "out");
}
#[test]
fn run_only_handle() {
test::run_script_and_error(vec![create("")], "out = array_get handle", "out");
}
#[test]
fn run_not_found() {
test::run_script_and_error(vec![create("")], "out = array_get bad_handle 2", "out");
}
#[test]
fn run_found() {
test::run_script_and_validate(
vec![create(""), array::create("")],
r#"
handle = array a b c "d e"
out = array_get ${handle} 3
"#,
CommandValidation::Match("out".to_string(), "d e".to_string()),
);
}
#[test]
fn run_found_out_of_bounds() {
test::run_script_and_validate(
vec![create(""), array::create("")],
r#"
handle = array a b c "d e"
out = array_get ${handle} 20
"#,
CommandValidation::Undefined("out".to_string()),
);
}
| 22.530612 | 88 | 0.59058 |
bf8accc0042df4ad9c2cf8d3f24595d0b0f550ae
| 8,733 |
#![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl ErrorResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct InfoField {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub project: Option<String>,
}
impl InfoField {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct MeterInfo {
#[serde(rename = "MeterId", default, skip_serializing_if = "Option::is_none")]
pub meter_id: Option<String>,
#[serde(rename = "MeterName", default, skip_serializing_if = "Option::is_none")]
pub meter_name: Option<String>,
#[serde(rename = "MeterCategory", default, skip_serializing_if = "Option::is_none")]
pub meter_category: Option<String>,
#[serde(rename = "MeterSubCategory", default, skip_serializing_if = "Option::is_none")]
pub meter_sub_category: Option<String>,
#[serde(rename = "Unit", default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "MeterTags", default, skip_serializing_if = "Vec::is_empty")]
pub meter_tags: Vec<String>,
#[serde(rename = "MeterRegion", default, skip_serializing_if = "Option::is_none")]
pub meter_region: Option<String>,
#[serde(rename = "MeterRates", default, skip_serializing_if = "Option::is_none")]
pub meter_rates: Option<serde_json::Value>,
#[serde(rename = "EffectiveDate", default, skip_serializing_if = "Option::is_none")]
pub effective_date: Option<String>,
#[serde(rename = "IncludedQuantity", default, skip_serializing_if = "Option::is_none")]
pub included_quantity: Option<f32>,
}
impl MeterInfo {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonetaryCommitment {
#[serde(flatten)]
pub offer_term_info: OfferTermInfo,
#[serde(rename = "TieredDiscount", default, skip_serializing_if = "Option::is_none")]
pub tiered_discount: Option<serde_json::Value>,
#[serde(rename = "ExcludedMeterIds", default, skip_serializing_if = "Vec::is_empty")]
pub excluded_meter_ids: Vec<String>,
}
impl MonetaryCommitment {
pub fn new(offer_term_info: OfferTermInfo) -> Self {
Self {
offer_term_info,
tiered_discount: None,
excluded_meter_ids: Vec::new(),
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonetaryCredit {
#[serde(flatten)]
pub offer_term_info: OfferTermInfo,
#[serde(rename = "Credit", default, skip_serializing_if = "Option::is_none")]
pub credit: Option<f64>,
#[serde(rename = "ExcludedMeterIds", default, skip_serializing_if = "Vec::is_empty")]
pub excluded_meter_ids: Vec<String>,
}
impl MonetaryCredit {
pub fn new(offer_term_info: OfferTermInfo) -> Self {
Self {
offer_term_info,
credit: None,
excluded_meter_ids: Vec::new(),
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OfferTermInfo {
#[serde(rename = "Name")]
pub name: offer_term_info::Name,
#[serde(rename = "EffectiveDate", default, skip_serializing_if = "Option::is_none")]
pub effective_date: Option<String>,
}
impl OfferTermInfo {
pub fn new(name: offer_term_info::Name) -> Self {
Self {
name,
effective_date: None,
}
}
}
pub mod offer_term_info {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
#[serde(rename = "Recurring Charge")]
RecurringCharge,
#[serde(rename = "Monetary Commitment")]
MonetaryCommitment,
#[serde(rename = "Monetary Credit")]
MonetaryCredit,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RateCardQueryParameters {
#[serde(rename = "OfferDurableId")]
pub offer_durable_id: String,
#[serde(rename = "Currency")]
pub currency: String,
#[serde(rename = "Locale")]
pub locale: String,
#[serde(rename = "RegionInfo")]
pub region_info: String,
}
impl RateCardQueryParameters {
pub fn new(offer_durable_id: String, currency: String, locale: String, region_info: String) -> Self {
Self {
offer_durable_id,
currency,
locale,
region_info,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecurringCharge {
#[serde(flatten)]
pub offer_term_info: OfferTermInfo,
#[serde(rename = "RecurringCharge", default, skip_serializing_if = "Option::is_none")]
pub recurring_charge: Option<i64>,
}
impl RecurringCharge {
pub fn new(offer_term_info: OfferTermInfo) -> Self {
Self {
offer_term_info,
recurring_charge: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceRateCardInfo {
#[serde(rename = "Currency", default, skip_serializing_if = "Option::is_none")]
pub currency: Option<String>,
#[serde(rename = "Locale", default, skip_serializing_if = "Option::is_none")]
pub locale: Option<String>,
#[serde(rename = "IsTaxIncluded", default, skip_serializing_if = "Option::is_none")]
pub is_tax_included: Option<bool>,
#[serde(rename = "OfferTerms", default, skip_serializing_if = "Vec::is_empty")]
pub offer_terms: Vec<OfferTermInfo>,
#[serde(rename = "Meters", default, skip_serializing_if = "Vec::is_empty")]
pub meters: Vec<MeterInfo>,
}
impl ResourceRateCardInfo {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UsageAggregation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<UsageSample>,
}
impl UsageAggregation {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UsageAggregationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<UsageAggregation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl UsageAggregationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UsageSample {
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "meterId", default, skip_serializing_if = "Option::is_none")]
pub meter_id: Option<String>,
#[serde(rename = "usageStartTime", default, skip_serializing_if = "Option::is_none")]
pub usage_start_time: Option<String>,
#[serde(rename = "usageEndTime", default, skip_serializing_if = "Option::is_none")]
pub usage_end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub quantity: Option<f32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "meterName", default, skip_serializing_if = "Option::is_none")]
pub meter_name: Option<String>,
#[serde(rename = "meterCategory", default, skip_serializing_if = "Option::is_none")]
pub meter_category: Option<String>,
#[serde(rename = "meterSubCategory", default, skip_serializing_if = "Option::is_none")]
pub meter_sub_category: Option<String>,
#[serde(rename = "meterRegion", default, skip_serializing_if = "Option::is_none")]
pub meter_region: Option<String>,
#[serde(rename = "infoFields", default, skip_serializing_if = "Option::is_none")]
pub info_fields: Option<InfoField>,
#[serde(rename = "instanceData", default, skip_serializing_if = "Option::is_none")]
pub instance_data: Option<String>,
}
impl UsageSample {
pub fn new() -> Self {
Self::default()
}
}
| 37.642241 | 105 | 0.670789 |
1dc8d4fa52a5cbb87b01a2e96e74b87f622b536c
| 87,312 |
//! [Flexible target specification.](https://github.com/rust-lang/rfcs/pull/131)
//!
//! Rust targets a wide variety of usecases, and in the interest of flexibility,
//! allows new target triples to be defined in configuration files. Most users
//! will not need to care about these, but this is invaluable when porting Rust
//! to a new platform, and allows for an unprecedented level of control over how
//! the compiler works.
//!
//! # Using custom targets
//!
//! A target triple, as passed via `rustc --target=TRIPLE`, will first be
//! compared against the list of built-in targets. This is to ease distributing
//! rustc (no need for configuration files) and also to hold these built-in
//! targets as immutable and sacred. If `TRIPLE` is not one of the built-in
//! targets, rustc will check if a file named `TRIPLE` exists. If it does, it
//! will be loaded as the target configuration. If the file does not exist,
//! rustc will search each directory in the environment variable
//! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will
//! be loaded. If no file is found in any of those directories, a fatal error
//! will be given.
//!
//! Projects defining their own targets should use
//! `--target=path/to/my-awesome-platform.json` instead of adding to
//! `RUST_TARGET_PATH`.
//!
//! # Defining a new target
//!
//! Targets are defined using [JSON](http://json.org/). The `Target` struct in
//! this module defines the format the JSON file should take, though each
//! underscore in the field names should be replaced with a hyphen (`-`) in the
//! JSON file. Some fields are required in every target specification, such as
//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`,
//! `arch`, and `os`. In general, options passed to rustc with `-C` override
//! the target's settings, though `target-feature` and `link-args` will *add*
//! to the list specified by the target, rather than replace.
use crate::abi::Endian;
use crate::spec::abi::{lookup as lookup_abi, Abi};
use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_serialize::json::{Json, ToJson};
use rustc_span::symbol::{sym, Symbol};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::{fmt, io};
use rustc_macros::HashStable_Generic;
pub mod abi;
pub mod crt_objects;
mod android_base;
mod apple_base;
mod apple_sdk_base;
mod arm_base;
mod avr_gnu_base;
mod dragonfly_base;
mod freebsd_base;
mod fuchsia_base;
mod haiku_base;
mod hermit_base;
mod hermit_kernel_base;
mod illumos_base;
mod l4re_base;
mod linux_base;
mod linux_gnu_base;
mod linux_kernel_base;
mod linux_musl_base;
mod linux_uclibc_base;
mod msvc_base;
mod netbsd_base;
mod openbsd_base;
mod redox_base;
mod riscv_base;
mod solaris_base;
mod thumb_base;
mod uefi_msvc_base;
mod vxworks_base;
mod wasm_base;
mod windows_gnu_base;
mod windows_msvc_base;
mod windows_uwp_gnu_base;
mod windows_uwp_msvc_base;
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum LinkerFlavor {
Em,
Gcc,
Ld,
Msvc,
Lld(LldFlavor),
PtxLinker,
}
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum LldFlavor {
Wasm,
Ld64,
Ld,
Link,
}
impl LldFlavor {
fn from_str(s: &str) -> Option<Self> {
Some(match s {
"darwin" => LldFlavor::Ld64,
"gnu" => LldFlavor::Ld,
"link" => LldFlavor::Link,
"wasm" => LldFlavor::Wasm,
_ => return None,
})
}
}
impl ToJson for LldFlavor {
fn to_json(&self) -> Json {
match *self {
LldFlavor::Ld64 => "darwin",
LldFlavor::Ld => "gnu",
LldFlavor::Link => "link",
LldFlavor::Wasm => "wasm",
}
.to_json()
}
}
impl ToJson for LinkerFlavor {
fn to_json(&self) -> Json {
self.desc().to_json()
}
}
macro_rules! flavor_mappings {
($((($($flavor:tt)*), $string:expr),)*) => (
impl LinkerFlavor {
pub const fn one_of() -> &'static str {
concat!("one of: ", $($string, " ",)*)
}
pub fn from_str(s: &str) -> Option<Self> {
Some(match s {
$($string => $($flavor)*,)*
_ => return None,
})
}
pub fn desc(&self) -> &str {
match *self {
$($($flavor)* => $string,)*
}
}
}
)
}
flavor_mappings! {
((LinkerFlavor::Em), "em"),
((LinkerFlavor::Gcc), "gcc"),
((LinkerFlavor::Ld), "ld"),
((LinkerFlavor::Msvc), "msvc"),
((LinkerFlavor::PtxLinker), "ptx-linker"),
((LinkerFlavor::Lld(LldFlavor::Wasm)), "wasm-ld"),
((LinkerFlavor::Lld(LldFlavor::Ld64)), "ld64.lld"),
((LinkerFlavor::Lld(LldFlavor::Ld)), "ld.lld"),
((LinkerFlavor::Lld(LldFlavor::Link)), "lld-link"),
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)]
pub enum PanicStrategy {
Unwind,
Abort,
}
impl PanicStrategy {
pub fn desc(&self) -> &str {
match *self {
PanicStrategy::Unwind => "unwind",
PanicStrategy::Abort => "abort",
}
}
pub fn desc_symbol(&self) -> Symbol {
match *self {
PanicStrategy::Unwind => sym::unwind,
PanicStrategy::Abort => sym::abort,
}
}
}
impl ToJson for PanicStrategy {
fn to_json(&self) -> Json {
match *self {
PanicStrategy::Abort => "abort".to_json(),
PanicStrategy::Unwind => "unwind".to_json(),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)]
pub enum RelroLevel {
Full,
Partial,
Off,
None,
}
impl RelroLevel {
pub fn desc(&self) -> &str {
match *self {
RelroLevel::Full => "full",
RelroLevel::Partial => "partial",
RelroLevel::Off => "off",
RelroLevel::None => "none",
}
}
}
impl FromStr for RelroLevel {
type Err = ();
fn from_str(s: &str) -> Result<RelroLevel, ()> {
match s {
"full" => Ok(RelroLevel::Full),
"partial" => Ok(RelroLevel::Partial),
"off" => Ok(RelroLevel::Off),
"none" => Ok(RelroLevel::None),
_ => Err(()),
}
}
}
impl ToJson for RelroLevel {
fn to_json(&self) -> Json {
match *self {
RelroLevel::Full => "full".to_json(),
RelroLevel::Partial => "partial".to_json(),
RelroLevel::Off => "off".to_json(),
RelroLevel::None => "None".to_json(),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)]
pub enum MergeFunctions {
Disabled,
Trampolines,
Aliases,
}
impl MergeFunctions {
pub fn desc(&self) -> &str {
match *self {
MergeFunctions::Disabled => "disabled",
MergeFunctions::Trampolines => "trampolines",
MergeFunctions::Aliases => "aliases",
}
}
}
impl FromStr for MergeFunctions {
type Err = ();
fn from_str(s: &str) -> Result<MergeFunctions, ()> {
match s {
"disabled" => Ok(MergeFunctions::Disabled),
"trampolines" => Ok(MergeFunctions::Trampolines),
"aliases" => Ok(MergeFunctions::Aliases),
_ => Err(()),
}
}
}
impl ToJson for MergeFunctions {
fn to_json(&self) -> Json {
match *self {
MergeFunctions::Disabled => "disabled".to_json(),
MergeFunctions::Trampolines => "trampolines".to_json(),
MergeFunctions::Aliases => "aliases".to_json(),
}
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum RelocModel {
Static,
Pic,
DynamicNoPic,
Ropi,
Rwpi,
RopiRwpi,
}
impl FromStr for RelocModel {
type Err = ();
fn from_str(s: &str) -> Result<RelocModel, ()> {
Ok(match s {
"static" => RelocModel::Static,
"pic" => RelocModel::Pic,
"dynamic-no-pic" => RelocModel::DynamicNoPic,
"ropi" => RelocModel::Ropi,
"rwpi" => RelocModel::Rwpi,
"ropi-rwpi" => RelocModel::RopiRwpi,
_ => return Err(()),
})
}
}
impl ToJson for RelocModel {
fn to_json(&self) -> Json {
match *self {
RelocModel::Static => "static",
RelocModel::Pic => "pic",
RelocModel::DynamicNoPic => "dynamic-no-pic",
RelocModel::Ropi => "ropi",
RelocModel::Rwpi => "rwpi",
RelocModel::RopiRwpi => "ropi-rwpi",
}
.to_json()
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum CodeModel {
Tiny,
Small,
Kernel,
Medium,
Large,
}
impl FromStr for CodeModel {
type Err = ();
fn from_str(s: &str) -> Result<CodeModel, ()> {
Ok(match s {
"tiny" => CodeModel::Tiny,
"small" => CodeModel::Small,
"kernel" => CodeModel::Kernel,
"medium" => CodeModel::Medium,
"large" => CodeModel::Large,
_ => return Err(()),
})
}
}
impl ToJson for CodeModel {
fn to_json(&self) -> Json {
match *self {
CodeModel::Tiny => "tiny",
CodeModel::Small => "small",
CodeModel::Kernel => "kernel",
CodeModel::Medium => "medium",
CodeModel::Large => "large",
}
.to_json()
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum TlsModel {
GeneralDynamic,
LocalDynamic,
InitialExec,
LocalExec,
}
impl FromStr for TlsModel {
type Err = ();
fn from_str(s: &str) -> Result<TlsModel, ()> {
Ok(match s {
// Note the difference "general" vs "global" difference. The model name is "general",
// but the user-facing option name is "global" for consistency with other compilers.
"global-dynamic" => TlsModel::GeneralDynamic,
"local-dynamic" => TlsModel::LocalDynamic,
"initial-exec" => TlsModel::InitialExec,
"local-exec" => TlsModel::LocalExec,
_ => return Err(()),
})
}
}
impl ToJson for TlsModel {
fn to_json(&self) -> Json {
match *self {
TlsModel::GeneralDynamic => "global-dynamic",
TlsModel::LocalDynamic => "local-dynamic",
TlsModel::InitialExec => "initial-exec",
TlsModel::LocalExec => "local-exec",
}
.to_json()
}
}
/// Everything is flattened to a single enum to make the json encoding/decoding less annoying.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub enum LinkOutputKind {
/// Dynamically linked non position-independent executable.
DynamicNoPicExe,
/// Dynamically linked position-independent executable.
DynamicPicExe,
/// Statically linked non position-independent executable.
StaticNoPicExe,
/// Statically linked position-independent executable.
StaticPicExe,
/// Regular dynamic library ("dynamically linked").
DynamicDylib,
/// Dynamic library with bundled libc ("statically linked").
StaticDylib,
/// WASI module with a lifetime past the _initialize entry point
WasiReactorExe,
}
impl LinkOutputKind {
fn as_str(&self) -> &'static str {
match self {
LinkOutputKind::DynamicNoPicExe => "dynamic-nopic-exe",
LinkOutputKind::DynamicPicExe => "dynamic-pic-exe",
LinkOutputKind::StaticNoPicExe => "static-nopic-exe",
LinkOutputKind::StaticPicExe => "static-pic-exe",
LinkOutputKind::DynamicDylib => "dynamic-dylib",
LinkOutputKind::StaticDylib => "static-dylib",
LinkOutputKind::WasiReactorExe => "wasi-reactor-exe",
}
}
pub(super) fn from_str(s: &str) -> Option<LinkOutputKind> {
Some(match s {
"dynamic-nopic-exe" => LinkOutputKind::DynamicNoPicExe,
"dynamic-pic-exe" => LinkOutputKind::DynamicPicExe,
"static-nopic-exe" => LinkOutputKind::StaticNoPicExe,
"static-pic-exe" => LinkOutputKind::StaticPicExe,
"dynamic-dylib" => LinkOutputKind::DynamicDylib,
"static-dylib" => LinkOutputKind::StaticDylib,
"wasi-reactor-exe" => LinkOutputKind::WasiReactorExe,
_ => return None,
})
}
}
impl fmt::Display for LinkOutputKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<String>>;
#[derive(Clone, Copy, Hash, Debug, PartialEq, Eq)]
pub enum SplitDebuginfo {
/// Split debug-information is disabled, meaning that on supported platforms
/// you can find all debug information in the executable itself. This is
/// only supported for ELF effectively.
///
/// * Windows - not supported
/// * macOS - don't run `dsymutil`
/// * ELF - `.dwarf_*` sections
Off,
/// Split debug-information can be found in a "packed" location separate
/// from the final artifact. This is supported on all platforms.
///
/// * Windows - `*.pdb`
/// * macOS - `*.dSYM` (run `dsymutil`)
/// * ELF - `*.dwp` (run `rust-llvm-dwp`)
Packed,
/// Split debug-information can be found in individual object files on the
/// filesystem. The main executable may point to the object files.
///
/// * Windows - not supported
/// * macOS - supported, scattered object files
/// * ELF - supported, scattered `*.dwo` files
Unpacked,
}
impl SplitDebuginfo {
fn as_str(&self) -> &'static str {
match self {
SplitDebuginfo::Off => "off",
SplitDebuginfo::Packed => "packed",
SplitDebuginfo::Unpacked => "unpacked",
}
}
}
impl FromStr for SplitDebuginfo {
type Err = ();
fn from_str(s: &str) -> Result<SplitDebuginfo, ()> {
Ok(match s {
"off" => SplitDebuginfo::Off,
"unpacked" => SplitDebuginfo::Unpacked,
"packed" => SplitDebuginfo::Packed,
_ => return Err(()),
})
}
}
impl ToJson for SplitDebuginfo {
fn to_json(&self) -> Json {
self.as_str().to_json()
}
}
impl fmt::Display for SplitDebuginfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum StackProbeType {
/// Don't emit any stack probes.
None,
/// It is harmless to use this option even on targets that do not have backend support for
/// stack probes as the failure mode is the same as if no stack-probe option was specified in
/// the first place.
Inline,
/// Call `__rust_probestack` whenever stack needs to be probed.
Call,
/// Use inline option for LLVM versions later than specified in `min_llvm_version_for_inline`
/// and call `__rust_probestack` otherwise.
InlineOrCall { min_llvm_version_for_inline: (u32, u32, u32) },
}
impl StackProbeType {
fn from_json(json: &Json) -> Result<Self, String> {
let object = json.as_object().ok_or_else(|| "expected a JSON object")?;
let kind = object
.get("kind")
.and_then(|o| o.as_string())
.ok_or_else(|| "expected `kind` to be a string")?;
match kind {
"none" => Ok(StackProbeType::None),
"inline" => Ok(StackProbeType::Inline),
"call" => Ok(StackProbeType::Call),
"inline-or-call" => {
let min_version = object
.get("min-llvm-version-for-inline")
.and_then(|o| o.as_array())
.ok_or_else(|| "expected `min-llvm-version-for-inline` to be an array")?;
let mut iter = min_version.into_iter().map(|v| {
let int = v.as_u64().ok_or_else(
|| "expected `min-llvm-version-for-inline` values to be integers",
)?;
u32::try_from(int)
.map_err(|_| "`min-llvm-version-for-inline` values don't convert to u32")
});
let min_llvm_version_for_inline = (
iter.next().unwrap_or(Ok(11))?,
iter.next().unwrap_or(Ok(0))?,
iter.next().unwrap_or(Ok(0))?,
);
Ok(StackProbeType::InlineOrCall { min_llvm_version_for_inline })
}
_ => Err(String::from(
"`kind` expected to be one of `none`, `inline`, `call` or `inline-or-call`",
)),
}
}
}
impl ToJson for StackProbeType {
fn to_json(&self) -> Json {
Json::Object(match self {
StackProbeType::None => {
vec![(String::from("kind"), "none".to_json())].into_iter().collect()
}
StackProbeType::Inline => {
vec![(String::from("kind"), "inline".to_json())].into_iter().collect()
}
StackProbeType::Call => {
vec![(String::from("kind"), "call".to_json())].into_iter().collect()
}
StackProbeType::InlineOrCall { min_llvm_version_for_inline } => vec![
(String::from("kind"), "inline-or-call".to_json()),
(
String::from("min-llvm-version-for-inline"),
min_llvm_version_for_inline.to_json(),
),
]
.into_iter()
.collect(),
})
}
}
bitflags::bitflags! {
#[derive(Default, Encodable, Decodable)]
pub struct SanitizerSet: u8 {
const ADDRESS = 1 << 0;
const LEAK = 1 << 1;
const MEMORY = 1 << 2;
const THREAD = 1 << 3;
const HWADDRESS = 1 << 4;
}
}
impl SanitizerSet {
/// Return sanitizer's name
///
/// Returns none if the flags is a set of sanitizers numbering not exactly one.
fn as_str(self) -> Option<&'static str> {
Some(match self {
SanitizerSet::ADDRESS => "address",
SanitizerSet::LEAK => "leak",
SanitizerSet::MEMORY => "memory",
SanitizerSet::THREAD => "thread",
SanitizerSet::HWADDRESS => "hwaddress",
_ => return None,
})
}
}
/// Formats a sanitizer set as a comma separated list of sanitizers' names.
impl fmt::Display for SanitizerSet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut first = true;
for s in *self {
let name = s.as_str().unwrap_or_else(|| panic!("unrecognized sanitizer {:?}", s));
if !first {
f.write_str(", ")?;
}
f.write_str(name)?;
first = false;
}
Ok(())
}
}
impl IntoIterator for SanitizerSet {
type Item = SanitizerSet;
type IntoIter = std::vec::IntoIter<SanitizerSet>;
fn into_iter(self) -> Self::IntoIter {
[
SanitizerSet::ADDRESS,
SanitizerSet::LEAK,
SanitizerSet::MEMORY,
SanitizerSet::THREAD,
SanitizerSet::HWADDRESS,
]
.iter()
.copied()
.filter(|&s| self.contains(s))
.collect::<Vec<_>>()
.into_iter()
}
}
impl<CTX> HashStable<CTX> for SanitizerSet {
fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
self.bits().hash_stable(ctx, hasher);
}
}
impl ToJson for SanitizerSet {
fn to_json(&self) -> Json {
self.into_iter()
.map(|v| Some(v.as_str()?.to_json()))
.collect::<Option<Vec<_>>>()
.unwrap_or(Vec::new())
.to_json()
}
}
macro_rules! supported_targets {
( $(($( $triple:literal, )+ $module:ident ),)+ ) => {
$(mod $module;)+
/// List of supported targets
pub const TARGETS: &[&str] = &[$($($triple),+),+];
fn load_builtin(target: &str) -> Option<Target> {
let mut t = match target {
$( $($triple)|+ => $module::target(), )+
_ => return None,
};
t.is_builtin = true;
debug!("got builtin target: {:?}", t);
Some(t)
}
#[cfg(test)]
mod tests {
mod tests_impl;
// Cannot put this into a separate file without duplication, make an exception.
$(
#[test] // `#[test]`
fn $module() {
tests_impl::test_target(super::$module::target());
}
)+
}
};
}
supported_targets! {
("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu),
("x86_64-unknown-linux-gnux32", x86_64_unknown_linux_gnux32),
("i686-unknown-linux-gnu", i686_unknown_linux_gnu),
("i586-unknown-linux-gnu", i586_unknown_linux_gnu),
("mips-unknown-linux-gnu", mips_unknown_linux_gnu),
("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64),
("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64),
("mipsisa32r6-unknown-linux-gnu", mipsisa32r6_unknown_linux_gnu),
("mipsisa32r6el-unknown-linux-gnu", mipsisa32r6el_unknown_linux_gnu),
("mipsisa64r6-unknown-linux-gnuabi64", mipsisa64r6_unknown_linux_gnuabi64),
("mipsisa64r6el-unknown-linux-gnuabi64", mipsisa64r6el_unknown_linux_gnuabi64),
("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu),
("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu),
("powerpc-unknown-linux-gnuspe", powerpc_unknown_linux_gnuspe),
("powerpc-unknown-linux-musl", powerpc_unknown_linux_musl),
("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu),
("powerpc64-unknown-linux-musl", powerpc64_unknown_linux_musl),
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
("powerpc64le-unknown-linux-musl", powerpc64le_unknown_linux_musl),
("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu),
("s390x-unknown-linux-musl", s390x_unknown_linux_musl),
("sparc-unknown-linux-gnu", sparc_unknown_linux_gnu),
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf),
("armv4t-unknown-linux-gnueabi", armv4t_unknown_linux_gnueabi),
("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi),
("armv5te-unknown-linux-musleabi", armv5te_unknown_linux_musleabi),
("armv5te-unknown-linux-uclibceabi", armv5te_unknown_linux_uclibceabi),
("armv7-unknown-linux-gnueabi", armv7_unknown_linux_gnueabi),
("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf),
("thumbv7neon-unknown-linux-gnueabihf", thumbv7neon_unknown_linux_gnueabihf),
("thumbv7neon-unknown-linux-musleabihf", thumbv7neon_unknown_linux_musleabihf),
("armv7-unknown-linux-musleabi", armv7_unknown_linux_musleabi),
("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf),
("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu),
("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl),
("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl),
("i686-unknown-linux-musl", i686_unknown_linux_musl),
("i586-unknown-linux-musl", i586_unknown_linux_musl),
("mips-unknown-linux-musl", mips_unknown_linux_musl),
("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl),
("mips64-unknown-linux-muslabi64", mips64_unknown_linux_muslabi64),
("mips64el-unknown-linux-muslabi64", mips64el_unknown_linux_muslabi64),
("hexagon-unknown-linux-musl", hexagon_unknown_linux_musl),
("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc),
("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc),
("i686-linux-android", i686_linux_android),
("x86_64-linux-android", x86_64_linux_android),
("arm-linux-androideabi", arm_linux_androideabi),
("armv7-linux-androideabi", armv7_linux_androideabi),
("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
("x86_64-unknown-none-linuxkernel", x86_64_unknown_none_linuxkernel),
("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
("armv6-unknown-freebsd", armv6_unknown_freebsd),
("armv7-unknown-freebsd", armv7_unknown_freebsd),
("i686-unknown-freebsd", i686_unknown_freebsd),
("powerpc64-unknown-freebsd", powerpc64_unknown_freebsd),
("x86_64-unknown-freebsd", x86_64_unknown_freebsd),
("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly),
("aarch64-unknown-openbsd", aarch64_unknown_openbsd),
("i686-unknown-openbsd", i686_unknown_openbsd),
("sparc64-unknown-openbsd", sparc64_unknown_openbsd),
("x86_64-unknown-openbsd", x86_64_unknown_openbsd),
("powerpc-unknown-openbsd", powerpc_unknown_openbsd),
("aarch64-unknown-netbsd", aarch64_unknown_netbsd),
("armv6-unknown-netbsd-eabihf", armv6_unknown_netbsd_eabihf),
("armv7-unknown-netbsd-eabihf", armv7_unknown_netbsd_eabihf),
("i686-unknown-netbsd", i686_unknown_netbsd),
("powerpc-unknown-netbsd", powerpc_unknown_netbsd),
("sparc64-unknown-netbsd", sparc64_unknown_netbsd),
("x86_64-unknown-netbsd", x86_64_unknown_netbsd),
("i686-unknown-haiku", i686_unknown_haiku),
("x86_64-unknown-haiku", x86_64_unknown_haiku),
("aarch64-apple-darwin", aarch64_apple_darwin),
("x86_64-apple-darwin", x86_64_apple_darwin),
("i686-apple-darwin", i686_apple_darwin),
("aarch64-fuchsia", aarch64_fuchsia),
("x86_64-fuchsia", x86_64_fuchsia),
("avr-unknown-gnu-atmega328", avr_unknown_gnu_atmega328),
("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc),
("aarch64-unknown-redox", aarch64_unknown_redox),
("x86_64-unknown-redox", x86_64_unknown_redox),
("i386-apple-ios", i386_apple_ios),
("x86_64-apple-ios", x86_64_apple_ios),
("aarch64-apple-ios", aarch64_apple_ios),
("armv7-apple-ios", armv7_apple_ios),
("armv7s-apple-ios", armv7s_apple_ios),
("x86_64-apple-ios-macabi", x86_64_apple_ios_macabi),
("aarch64-apple-ios-macabi", aarch64_apple_ios_macabi),
("aarch64-apple-ios-sim", aarch64_apple_ios_sim),
("aarch64-apple-tvos", aarch64_apple_tvos),
("x86_64-apple-tvos", x86_64_apple_tvos),
("armebv7r-none-eabi", armebv7r_none_eabi),
("armebv7r-none-eabihf", armebv7r_none_eabihf),
("armv7r-none-eabi", armv7r_none_eabi),
("armv7r-none-eabihf", armv7r_none_eabihf),
("x86_64-pc-solaris", x86_64_pc_solaris),
("x86_64-sun-solaris", x86_64_sun_solaris),
("sparcv9-sun-solaris", sparcv9_sun_solaris),
("x86_64-unknown-illumos", x86_64_unknown_illumos),
("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu),
("i686-pc-windows-gnu", i686_pc_windows_gnu),
("i686-uwp-windows-gnu", i686_uwp_windows_gnu),
("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu),
("aarch64-pc-windows-msvc", aarch64_pc_windows_msvc),
("aarch64-uwp-windows-msvc", aarch64_uwp_windows_msvc),
("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc),
("x86_64-uwp-windows-msvc", x86_64_uwp_windows_msvc),
("i686-pc-windows-msvc", i686_pc_windows_msvc),
("i686-uwp-windows-msvc", i686_uwp_windows_msvc),
("i586-pc-windows-msvc", i586_pc_windows_msvc),
("i686-oldpc-windows-msvc", i686_oldpc_windows_msvc),
("i586-oldpc-windows-msvc", i586_oldpc_windows_msvc),
("thumbv7a-pc-windows-msvc", thumbv7a_pc_windows_msvc),
("thumbv7a-uwp-windows-msvc", thumbv7a_uwp_windows_msvc),
("asmjs-unknown-emscripten", asmjs_unknown_emscripten),
("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
("wasm32-unknown-unknown", wasm32_unknown_unknown),
("wasm32-wasi", wasm32_wasi),
("wasm64-unknown-unknown", wasm64_unknown_unknown),
("thumbv6m-none-eabi", thumbv6m_none_eabi),
("thumbv7m-none-eabi", thumbv7m_none_eabi),
("thumbv7em-none-eabi", thumbv7em_none_eabi),
("thumbv7em-none-eabihf", thumbv7em_none_eabihf),
("thumbv8m.base-none-eabi", thumbv8m_base_none_eabi),
("thumbv8m.main-none-eabi", thumbv8m_main_none_eabi),
("thumbv8m.main-none-eabihf", thumbv8m_main_none_eabihf),
("armv7a-none-eabi", armv7a_none_eabi),
("armv7a-none-eabihf", armv7a_none_eabihf),
("msp430-none-elf", msp430_none_elf),
("aarch64-unknown-hermit", aarch64_unknown_hermit),
("x86_64-unknown-hermit", x86_64_unknown_hermit),
("x86_64-unknown-none-hermitkernel", x86_64_unknown_none_hermitkernel),
("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf),
("riscv32gc-unknown-linux-gnu", riscv32gc_unknown_linux_gnu),
("riscv32gc-unknown-linux-musl", riscv32gc_unknown_linux_musl),
("riscv64imac-unknown-none-elf", riscv64imac_unknown_none_elf),
("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf),
("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu),
("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl),
("aarch64-unknown-none", aarch64_unknown_none),
("aarch64-unknown-none-softfloat", aarch64_unknown_none_softfloat),
("x86_64-fortanix-unknown-sgx", x86_64_fortanix_unknown_sgx),
("x86_64-unknown-uefi", x86_64_unknown_uefi),
("i686-unknown-uefi", i686_unknown_uefi),
("nvptx64-nvidia-cuda", nvptx64_nvidia_cuda),
("i686-wrs-vxworks", i686_wrs_vxworks),
("x86_64-wrs-vxworks", x86_64_wrs_vxworks),
("armv7-wrs-vxworks-eabihf", armv7_wrs_vxworks_eabihf),
("aarch64-wrs-vxworks", aarch64_wrs_vxworks),
("powerpc-wrs-vxworks", powerpc_wrs_vxworks),
("powerpc-wrs-vxworks-spe", powerpc_wrs_vxworks_spe),
("powerpc64-wrs-vxworks", powerpc64_wrs_vxworks),
("mipsel-sony-psp", mipsel_sony_psp),
("mipsel-unknown-none", mipsel_unknown_none),
("thumbv4t-none-eabi", thumbv4t_none_eabi),
("aarch64_be-unknown-linux-gnu", aarch64_be_unknown_linux_gnu),
("aarch64-unknown-linux-gnu_ilp32", aarch64_unknown_linux_gnu_ilp32),
("aarch64_be-unknown-linux-gnu_ilp32", aarch64_be_unknown_linux_gnu_ilp32),
}
/// Everything `rustc` knows about how to compile for a specific target.
///
/// Every field here must be specified, and has no default value.
#[derive(PartialEq, Clone, Debug)]
pub struct Target {
/// Target triple to pass to LLVM.
pub llvm_target: String,
/// Number of bits in a pointer. Influences the `target_pointer_width` `cfg` variable.
pub pointer_width: u32,
/// Architecture to use for ABI considerations. Valid options include: "x86",
/// "x86_64", "arm", "aarch64", "mips", "powerpc", "powerpc64", and others.
pub arch: String,
/// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
pub data_layout: String,
/// Optional settings with defaults.
pub options: TargetOptions,
}
pub trait HasTargetSpec {
fn target_spec(&self) -> &Target;
}
impl HasTargetSpec for Target {
fn target_spec(&self) -> &Target {
self
}
}
/// Optional aspects of a target specification.
///
/// This has an implementation of `Default`, see each field for what the default is. In general,
/// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
///
/// `TargetOptions` as a separate structure is mostly an implementation detail of `Target`
/// construction, all its fields logically belong to `Target` and available from `Target`
/// through `Deref` impls.
#[derive(PartialEq, Clone, Debug)]
pub struct TargetOptions {
/// Whether the target is built-in or loaded from a custom target specification.
pub is_builtin: bool,
/// Used as the `target_endian` `cfg` variable. Defaults to little endian.
pub endian: Endian,
/// Width of c_int type. Defaults to "32".
pub c_int_width: String,
/// OS name to use for conditional compilation (`target_os`). Defaults to "none".
/// "none" implies a bare metal target without `std` library.
/// A couple of targets having `std` also use "unknown" as an `os` value,
/// but they are exceptions.
pub os: String,
/// Environment name to use for conditional compilation (`target_env`). Defaults to "".
pub env: String,
/// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown".
pub vendor: String,
/// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
/// on the command line. Defaults to `LinkerFlavor::Gcc`.
pub linker_flavor: LinkerFlavor,
/// Linker to invoke
pub linker: Option<String>,
/// LLD flavor used if `lld` (or `rust-lld`) is specified as a linker
/// without clarifying its flavor in any way.
pub lld_flavor: LldFlavor,
/// Linker arguments that are passed *before* any user-defined libraries.
pub pre_link_args: LinkArgs,
/// Objects to link before and after all other object code.
pub pre_link_objects: CrtObjects,
pub post_link_objects: CrtObjects,
/// Same as `(pre|post)_link_objects`, but when we fail to pull the objects with help of the
/// target's native gcc and fall back to the "self-contained" mode and pull them manually.
/// See `crt_objects.rs` for some more detailed documentation.
pub pre_link_objects_fallback: CrtObjects,
pub post_link_objects_fallback: CrtObjects,
/// Which logic to use to determine whether to fall back to the "self-contained" mode or not.
pub crt_objects_fallback: Option<CrtObjectsFallback>,
/// Linker arguments that are unconditionally passed after any
/// user-defined but before post-link objects. Standard platform
/// libraries that should be always be linked to, usually go here.
pub late_link_args: LinkArgs,
/// Linker arguments used in addition to `late_link_args` if at least one
/// Rust dependency is dynamically linked.
pub late_link_args_dynamic: LinkArgs,
/// Linker arguments used in addition to `late_link_args` if aall Rust
/// dependencies are statically linked.
pub late_link_args_static: LinkArgs,
/// Linker arguments that are unconditionally passed *after* any
/// user-defined libraries.
pub post_link_args: LinkArgs,
/// Optional link script applied to `dylib` and `executable` crate types.
/// This is a string containing the script, not a path. Can only be applied
/// to linkers where `linker_is_gnu` is true.
pub link_script: Option<String>,
/// Environment variables to be set for the linker invocation.
pub link_env: Vec<(String, String)>,
/// Environment variables to be removed for the linker invocation.
pub link_env_remove: Vec<String>,
/// Extra arguments to pass to the external assembler (when used)
pub asm_args: Vec<String>,
/// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults
/// to "generic".
pub cpu: String,
/// Default target features to pass to LLVM. These features will *always* be
/// passed, and cannot be disabled even via `-C`. Corresponds to `llc
/// -mattr=$features`.
pub features: String,
/// Whether dynamic linking is available on this target. Defaults to false.
pub dynamic_linking: bool,
/// If dynamic linking is available, whether only cdylibs are supported.
pub only_cdylib: bool,
/// Whether executables are available on this target. iOS, for example, only allows static
/// libraries. Defaults to false.
pub executables: bool,
/// Relocation model to use in object file. Corresponds to `llc
/// -relocation-model=$relocation_model`. Defaults to `Pic`.
pub relocation_model: RelocModel,
/// Code model to use. Corresponds to `llc -code-model=$code_model`.
/// Defaults to `None` which means "inherited from the base LLVM target".
pub code_model: Option<CodeModel>,
/// TLS model to use. Options are "global-dynamic" (default), "local-dynamic", "initial-exec"
/// and "local-exec". This is similar to the -ftls-model option in GCC/Clang.
pub tls_model: TlsModel,
/// Do not emit code that uses the "red zone", if the ABI has one. Defaults to false.
pub disable_redzone: bool,
/// Eliminate frame pointers from stack frames if possible. Defaults to true.
pub eliminate_frame_pointer: bool,
/// Emit each function in its own section. Defaults to true.
pub function_sections: bool,
/// String to prepend to the name of every dynamic library. Defaults to "lib".
pub dll_prefix: String,
/// String to append to the name of every dynamic library. Defaults to ".so".
pub dll_suffix: String,
/// String to append to the name of every executable.
pub exe_suffix: String,
/// String to prepend to the name of every static library. Defaults to "lib".
pub staticlib_prefix: String,
/// String to append to the name of every static library. Defaults to ".a".
pub staticlib_suffix: String,
/// OS family to use for conditional compilation. Valid options: "unix", "windows".
pub os_family: Option<String>,
/// Whether the target toolchain's ABI supports returning small structs as an integer.
pub abi_return_struct_as_int: bool,
/// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
/// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
pub is_like_osx: bool,
/// Whether the target toolchain is like Solaris's.
/// Only useful for compiling against Illumos/Solaris,
/// as they have a different set of linker flags. Defaults to false.
pub is_like_solaris: bool,
/// Whether the target is like Windows.
/// This is a combination of several more specific properties represented as a single flag:
/// - The target uses a Windows ABI,
/// - uses PE/COFF as a format for object code,
/// - uses Windows-style dllexport/dllimport for shared libraries,
/// - uses import libraries and .def files for symbol exports,
/// - executables support setting a subsystem.
pub is_like_windows: bool,
/// Whether the target is like MSVC.
/// This is a combination of several more specific properties represented as a single flag:
/// - The target has all the properties from `is_like_windows`
/// (for in-tree targets "is_like_msvc ⇒ is_like_windows" is ensured by a unit test),
/// - has some MSVC-specific Windows ABI properties,
/// - uses a link.exe-like linker,
/// - uses CodeView/PDB for debuginfo and natvis for its visualization,
/// - uses SEH-based unwinding,
/// - supports control flow guard mechanism.
pub is_like_msvc: bool,
/// Whether the target toolchain is like Emscripten's. Only useful for compiling with
/// Emscripten toolchain.
/// Defaults to false.
pub is_like_emscripten: bool,
/// Whether the target toolchain is like Fuchsia's.
pub is_like_fuchsia: bool,
/// Whether a target toolchain is like WASM.
pub is_like_wasm: bool,
/// Version of DWARF to use if not using the default.
/// Useful because some platforms (osx, bsd) only want up to DWARF2.
pub dwarf_version: Option<u32>,
/// Whether the linker support GNU-like arguments such as -O. Defaults to false.
pub linker_is_gnu: bool,
/// The MinGW toolchain has a known issue that prevents it from correctly
/// handling COFF object files with more than 2<sup>15</sup> sections. Since each weak
/// symbol needs its own COMDAT section, weak linkage implies a large
/// number sections that easily exceeds the given limit for larger
/// codebases. Consequently we want a way to disallow weak linkage on some
/// platforms.
pub allows_weak_linkage: bool,
/// Whether the linker support rpaths or not. Defaults to false.
pub has_rpath: bool,
/// Whether to disable linking to the default libraries, typically corresponds
/// to `-nodefaultlibs`. Defaults to true.
pub no_default_libraries: bool,
/// Dynamically linked executables can be compiled as position independent
/// if the default relocation model of position independent code is not
/// changed. This is a requirement to take advantage of ASLR, as otherwise
/// the functions in the executable are not randomized and can be used
/// during an exploit of a vulnerability in any code.
pub position_independent_executables: bool,
/// Executables that are both statically linked and position-independent are supported.
pub static_position_independent_executables: bool,
/// Determines if the target always requires using the PLT for indirect
/// library calls or not. This controls the default value of the `-Z plt` flag.
pub needs_plt: bool,
/// Either partial, full, or off. Full RELRO makes the dynamic linker
/// resolve all symbols at startup and marks the GOT read-only before
/// starting the program, preventing overwriting the GOT.
pub relro_level: RelroLevel,
/// Format that archives should be emitted in. This affects whether we use
/// LLVM to assemble an archive or fall back to the system linker, and
/// currently only "gnu" is used to fall into LLVM. Unknown strings cause
/// the system linker to be used.
pub archive_format: String,
/// Is asm!() allowed? Defaults to true.
pub allow_asm: bool,
/// Whether the runtime startup code requires the `main` function be passed
/// `argc` and `argv` values.
pub main_needs_argc_argv: bool,
/// Flag indicating whether ELF TLS (e.g., #[thread_local]) is available for
/// this target.
pub has_elf_tls: bool,
// This is mainly for easy compatibility with emscripten.
// If we give emcc .o files that are actually .bc files it
// will 'just work'.
pub obj_is_bitcode: bool,
/// Whether the target requires that emitted object code includes bitcode.
pub forces_embed_bitcode: bool,
/// Content of the LLVM cmdline section associated with embedded bitcode.
pub bitcode_llvm_cmdline: String,
/// Don't use this field; instead use the `.min_atomic_width()` method.
pub min_atomic_width: Option<u64>,
/// Don't use this field; instead use the `.max_atomic_width()` method.
pub max_atomic_width: Option<u64>,
/// Whether the target supports atomic CAS operations natively
pub atomic_cas: bool,
/// Panic strategy: "unwind" or "abort"
pub panic_strategy: PanicStrategy,
/// A list of ABIs unsupported by the current target. Note that generic ABIs
/// are considered to be supported on all platforms and cannot be marked
/// unsupported.
pub unsupported_abis: Vec<Abi>,
/// Whether or not linking dylibs to a static CRT is allowed.
pub crt_static_allows_dylibs: bool,
/// Whether or not the CRT is statically linked by default.
pub crt_static_default: bool,
/// Whether or not crt-static is respected by the compiler (or is a no-op).
pub crt_static_respected: bool,
/// The implementation of stack probes to use.
pub stack_probes: StackProbeType,
/// The minimum alignment for global symbols.
pub min_global_align: Option<u64>,
/// Default number of codegen units to use in debug mode
pub default_codegen_units: Option<u64>,
/// Whether to generate trap instructions in places where optimization would
/// otherwise produce control flow that falls through into unrelated memory.
pub trap_unreachable: bool,
/// This target requires everything to be compiled with LTO to emit a final
/// executable, aka there is no native linker for this target.
pub requires_lto: bool,
/// This target has no support for threads.
pub singlethread: bool,
/// Whether library functions call lowering/optimization is disabled in LLVM
/// for this target unconditionally.
pub no_builtins: bool,
/// The default visibility for symbols in this target should be "hidden"
/// rather than "default"
pub default_hidden_visibility: bool,
/// Whether a .debug_gdb_scripts section will be added to the output object file
pub emit_debug_gdb_scripts: bool,
/// Whether or not to unconditionally `uwtable` attributes on functions,
/// typically because the platform needs to unwind for things like stack
/// unwinders.
pub requires_uwtable: bool,
/// Whether or not to emit `uwtable` attributes on functions if `-C force-unwind-tables`
/// is not specified and `uwtable` is not required on this target.
pub default_uwtable: bool,
/// Whether or not SIMD types are passed by reference in the Rust ABI,
/// typically required if a target can be compiled with a mixed set of
/// target features. This is `true` by default, and `false` for targets like
/// wasm32 where the whole program either has simd or not.
pub simd_types_indirect: bool,
/// Pass a list of symbol which should be exported in the dylib to the linker.
pub limit_rdylib_exports: bool,
/// If set, have the linker export exactly these symbols, instead of using
/// the usual logic to figure this out from the crate itself.
pub override_export_symbols: Option<Vec<String>>,
/// Determines how or whether the MergeFunctions LLVM pass should run for
/// this target. Either "disabled", "trampolines", or "aliases".
/// The MergeFunctions pass is generally useful, but some targets may need
/// to opt out. The default is "aliases".
///
/// Workaround for: <https://github.com/rust-lang/rust/issues/57356>
pub merge_functions: MergeFunctions,
/// Use platform dependent mcount function
pub mcount: String,
/// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers
pub llvm_abiname: String,
/// Whether or not RelaxElfRelocation flag will be passed to the linker
pub relax_elf_relocations: bool,
/// Additional arguments to pass to LLVM, similar to the `-C llvm-args` codegen option.
pub llvm_args: Vec<String>,
/// Whether to use legacy .ctors initialization hooks rather than .init_array. Defaults
/// to false (uses .init_array).
pub use_ctors_section: bool,
/// Whether the linker is instructed to add a `GNU_EH_FRAME` ELF header
/// used to locate unwinding information is passed
/// (only has effect if the linker is `ld`-like).
pub eh_frame_header: bool,
/// Is true if the target is an ARM architecture using thumb v1 which allows for
/// thumb and arm interworking.
pub has_thumb_interworking: bool,
/// How to handle split debug information, if at all. Specifying `None` has
/// target-specific meaning.
pub split_debuginfo: SplitDebuginfo,
/// The sanitizers supported by this target
///
/// Note that the support here is at a codegen level. If the machine code with sanitizer
/// enabled can generated on this target, but the necessary supporting libraries are not
/// distributed with the target, the sanitizer should still appear in this list for the target.
pub supported_sanitizers: SanitizerSet,
/// If present it's a default value to use for adjusting the C ABI.
pub default_adjusted_cabi: Option<Abi>,
}
impl Default for TargetOptions {
/// Creates a set of "sane defaults" for any target. This is still
/// incomplete, and if used for compilation, will certainly not work.
fn default() -> TargetOptions {
TargetOptions {
is_builtin: false,
endian: Endian::Little,
c_int_width: "32".to_string(),
os: "none".to_string(),
env: String::new(),
vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.to_string()),
lld_flavor: LldFlavor::Ld,
pre_link_args: LinkArgs::new(),
post_link_args: LinkArgs::new(),
link_script: None,
asm_args: Vec::new(),
cpu: "generic".to_string(),
features: String::new(),
dynamic_linking: false,
only_cdylib: false,
executables: false,
relocation_model: RelocModel::Pic,
code_model: None,
tls_model: TlsModel::GeneralDynamic,
disable_redzone: false,
eliminate_frame_pointer: true,
function_sections: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".so".to_string(),
exe_suffix: String::new(),
staticlib_prefix: "lib".to_string(),
staticlib_suffix: ".a".to_string(),
os_family: None,
abi_return_struct_as_int: false,
is_like_osx: false,
is_like_solaris: false,
is_like_windows: false,
is_like_emscripten: false,
is_like_msvc: false,
is_like_fuchsia: false,
is_like_wasm: false,
dwarf_version: None,
linker_is_gnu: false,
allows_weak_linkage: true,
has_rpath: false,
no_default_libraries: true,
position_independent_executables: false,
static_position_independent_executables: false,
needs_plt: false,
relro_level: RelroLevel::None,
pre_link_objects: Default::default(),
post_link_objects: Default::default(),
pre_link_objects_fallback: Default::default(),
post_link_objects_fallback: Default::default(),
crt_objects_fallback: None,
late_link_args: LinkArgs::new(),
late_link_args_dynamic: LinkArgs::new(),
late_link_args_static: LinkArgs::new(),
link_env: Vec::new(),
link_env_remove: Vec::new(),
archive_format: "gnu".to_string(),
main_needs_argc_argv: true,
allow_asm: true,
has_elf_tls: false,
obj_is_bitcode: false,
forces_embed_bitcode: false,
bitcode_llvm_cmdline: String::new(),
min_atomic_width: None,
max_atomic_width: None,
atomic_cas: true,
panic_strategy: PanicStrategy::Unwind,
unsupported_abis: vec![],
crt_static_allows_dylibs: false,
crt_static_default: false,
crt_static_respected: false,
stack_probes: StackProbeType::None,
min_global_align: None,
default_codegen_units: None,
trap_unreachable: true,
requires_lto: false,
singlethread: false,
no_builtins: false,
default_hidden_visibility: false,
emit_debug_gdb_scripts: true,
requires_uwtable: false,
default_uwtable: false,
simd_types_indirect: true,
limit_rdylib_exports: true,
override_export_symbols: None,
merge_functions: MergeFunctions::Aliases,
mcount: "mcount".to_string(),
llvm_abiname: "".to_string(),
relax_elf_relocations: false,
llvm_args: vec![],
use_ctors_section: false,
eh_frame_header: true,
has_thumb_interworking: false,
split_debuginfo: SplitDebuginfo::Off,
supported_sanitizers: SanitizerSet::empty(),
default_adjusted_cabi: None,
}
}
}
/// `TargetOptions` being a separate type is basically an implementation detail of `Target` that is
/// used for providing defaults. Perhaps there's a way to merge `TargetOptions` into `Target` so
/// this `Deref` implementation is no longer necessary.
impl Deref for Target {
type Target = TargetOptions;
fn deref(&self) -> &Self::Target {
&self.options
}
}
impl DerefMut for Target {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.options
}
}
impl Target {
/// Given a function ABI, turn it into the correct ABI for this target.
pub fn adjust_abi(&self, abi: Abi) -> Abi {
match abi {
Abi::System { unwind } => {
if self.is_like_windows && self.arch == "x86" {
Abi::Stdcall { unwind }
} else {
Abi::C { unwind }
}
}
// These ABI kinds are ignored on non-x86 Windows targets.
// See https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions
// and the individual pages for __stdcall et al.
Abi::Stdcall { unwind } | Abi::Thiscall { unwind } => {
if self.is_like_windows && self.arch != "x86" { Abi::C { unwind } } else { abi }
}
Abi::Fastcall | Abi::Vectorcall => {
if self.is_like_windows && self.arch != "x86" {
Abi::C { unwind: false }
} else {
abi
}
}
Abi::EfiApi => {
if self.arch == "x86_64" {
Abi::Win64
} else {
Abi::C { unwind: false }
}
}
Abi::C { unwind } => self.default_adjusted_cabi.unwrap_or(Abi::C { unwind }),
abi => abi,
}
}
/// Minimum integer size in bits that this target can perform atomic
/// operations on.
pub fn min_atomic_width(&self) -> u64 {
self.min_atomic_width.unwrap_or(8)
}
/// Maximum integer size in bits that this target can perform atomic
/// operations on.
pub fn max_atomic_width(&self) -> u64 {
self.max_atomic_width.unwrap_or_else(|| self.pointer_width.into())
}
pub fn is_abi_supported(&self, abi: Abi) -> bool {
abi.generic() || !self.unsupported_abis.contains(&abi)
}
/// Loads a target descriptor from a JSON object.
pub fn from_json(obj: Json) -> Result<Target, String> {
// While ugly, this code must remain this way to retain
// compatibility with existing JSON fields and the internal
// expected naming of the Target and TargetOptions structs.
// To ensure compatibility is retained, the built-in targets
// are round-tripped through this code to catch cases where
// the JSON parser is not updated to match the structs.
let get_req_field = |name: &str| {
obj.find(name)
.and_then(Json::as_string)
.map(str::to_string)
.ok_or_else(|| format!("Field {} in target specification is required", name))
};
let mut base = Target {
llvm_target: get_req_field("llvm-target")?,
pointer_width: get_req_field("target-pointer-width")?
.parse::<u32>()
.map_err(|_| "target-pointer-width must be an integer".to_string())?,
data_layout: get_req_field("data-layout")?,
arch: get_req_field("arch")?,
options: Default::default(),
};
macro_rules! key {
($key_name:ident) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.find(&name).and_then(Json::as_string) {
base.$key_name = s.to_string();
}
} );
($key_name:ident = $json_name:expr) => ( {
let name = $json_name;
if let Some(s) = obj.find(&name).and_then(Json::as_string) {
base.$key_name = s.to_string();
}
} );
($key_name:ident, bool) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.find(&name).and_then(Json::as_boolean) {
base.$key_name = s;
}
} );
($key_name:ident, Option<u32>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.find(&name).and_then(Json::as_u64) {
if s < 1 || s > 5 {
return Err("Not a valid DWARF version number".to_string());
}
base.$key_name = Some(s as u32);
}
} );
($key_name:ident, Option<u64>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.find(&name).and_then(Json::as_u64) {
base.$key_name = Some(s);
}
} );
($key_name:ident, MergeFunctions) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<MergeFunctions>() {
Ok(mergefunc) => base.$key_name = mergefunc,
_ => return Some(Err(format!("'{}' is not a valid value for \
merge-functions. Use 'disabled', \
'trampolines', or 'aliases'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, RelocModel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<RelocModel>() {
Ok(relocation_model) => base.$key_name = relocation_model,
_ => return Some(Err(format!("'{}' is not a valid relocation model. \
Run `rustc --print relocation-models` to \
see the list of supported values.", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, CodeModel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<CodeModel>() {
Ok(code_model) => base.$key_name = Some(code_model),
_ => return Some(Err(format!("'{}' is not a valid code model. \
Run `rustc --print code-models` to \
see the list of supported values.", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, TlsModel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<TlsModel>() {
Ok(tls_model) => base.$key_name = tls_model,
_ => return Some(Err(format!("'{}' is not a valid TLS model. \
Run `rustc --print tls-models` to \
see the list of supported values.", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, PanicStrategy) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s {
"unwind" => base.$key_name = PanicStrategy::Unwind,
"abort" => base.$key_name = PanicStrategy::Abort,
_ => return Some(Err(format!("'{}' is not a valid value for \
panic-strategy. Use 'unwind' or 'abort'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, RelroLevel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<RelroLevel>() {
Ok(level) => base.$key_name = level,
_ => return Some(Err(format!("'{}' is not a valid value for \
relro-level. Use 'full', 'partial, or 'off'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, SplitDebuginfo) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<SplitDebuginfo>() {
Ok(level) => base.$key_name = level,
_ => return Some(Err(format!("'{}' is not a valid value for \
split-debuginfo. Use 'off' or 'dsymutil'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, list) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(v) = obj.find(&name).and_then(Json::as_array) {
base.$key_name = v.iter()
.map(|a| a.as_string().unwrap().to_string())
.collect();
}
} );
($key_name:ident, opt_list) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(v) = obj.find(&name).and_then(Json::as_array) {
base.$key_name = Some(v.iter()
.map(|a| a.as_string().unwrap().to_string())
.collect());
}
} );
($key_name:ident, optional) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(o) = obj.find(&name[..]) {
base.$key_name = o
.as_string()
.map(|s| s.to_string() );
}
} );
($key_name:ident = $json_name:expr, optional) => ( {
let name = $json_name;
if let Some(o) = obj.find(name) {
base.$key_name = o
.as_string()
.map(|s| s.to_string() );
}
} );
($key_name:ident, LldFlavor) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
if let Some(flavor) = LldFlavor::from_str(&s) {
base.$key_name = flavor;
} else {
return Some(Err(format!(
"'{}' is not a valid value for lld-flavor. \
Use 'darwin', 'gnu', 'link' or 'wasm.",
s)))
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, LinkerFlavor) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match LinkerFlavor::from_str(s) {
Some(linker_flavor) => base.$key_name = linker_flavor,
_ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \
Use {}", s, LinkerFlavor::one_of()))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, StackProbeType) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| match StackProbeType::from_json(o) {
Ok(v) => {
base.$key_name = v;
Some(Ok(()))
},
Err(s) => Some(Err(
format!("`{:?}` is not a valid value for `{}`: {}", o, name, s)
)),
}).unwrap_or(Ok(()))
} );
($key_name:ident, SanitizerSet) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_array()).and_then(|a| {
for s in a {
base.$key_name |= match s.as_string() {
Some("address") => SanitizerSet::ADDRESS,
Some("leak") => SanitizerSet::LEAK,
Some("memory") => SanitizerSet::MEMORY,
Some("thread") => SanitizerSet::THREAD,
Some("hwaddress") => SanitizerSet::HWADDRESS,
Some(s) => return Some(Err(format!("unknown sanitizer {}", s))),
_ => return Some(Err(format!("not a string: {:?}", s))),
};
}
Some(Ok(()))
}).unwrap_or(Ok(()))
} );
($key_name:ident, crt_objects_fallback) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<CrtObjectsFallback>() {
Ok(fallback) => base.$key_name = Some(fallback),
_ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \
Use 'musl', 'mingw' or 'wasm'", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, link_objects) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(val) = obj.find(&name[..]) {
let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
JSON object with fields per CRT object kind.", name))?;
let mut args = CrtObjects::new();
for (k, v) in obj {
let kind = LinkOutputKind::from_str(&k).ok_or_else(|| {
format!("{}: '{}' is not a valid value for CRT object kind. \
Use '(dynamic,static)-(nopic,pic)-exe' or \
'(dynamic,static)-dylib' or 'wasi-reactor-exe'", name, k)
})?;
let v = v.as_array().ok_or_else(||
format!("{}.{}: expected a JSON array", name, k)
)?.iter().enumerate()
.map(|(i,s)| {
let s = s.as_string().ok_or_else(||
format!("{}.{}[{}]: expected a JSON string", name, k, i))?;
Ok(s.to_owned())
})
.collect::<Result<Vec<_>, String>>()?;
args.insert(kind, v);
}
base.$key_name = args;
}
} );
($key_name:ident, link_args) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(val) = obj.find(&name[..]) {
let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
JSON object with fields per linker-flavor.", name))?;
let mut args = LinkArgs::new();
for (k, v) in obj {
let flavor = LinkerFlavor::from_str(&k).ok_or_else(|| {
format!("{}: '{}' is not a valid value for linker-flavor. \
Use 'em', 'gcc', 'ld' or 'msvc'", name, k)
})?;
let v = v.as_array().ok_or_else(||
format!("{}.{}: expected a JSON array", name, k)
)?.iter().enumerate()
.map(|(i,s)| {
let s = s.as_string().ok_or_else(||
format!("{}.{}[{}]: expected a JSON string", name, k, i))?;
Ok(s.to_owned())
})
.collect::<Result<Vec<_>, String>>()?;
args.insert(flavor, v);
}
base.$key_name = args;
}
} );
($key_name:ident, env) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(a) = obj.find(&name[..]).and_then(|o| o.as_array()) {
for o in a {
if let Some(s) = o.as_string() {
let p = s.split('=').collect::<Vec<_>>();
if p.len() == 2 {
let k = p[0].to_string();
let v = p[1].to_string();
base.$key_name.push((k, v));
}
}
}
}
} );
($key_name:ident, Option<Abi>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match lookup_abi(s) {
Some(abi) => base.$key_name = Some(abi),
_ => return Some(Err(format!("'{}' is not a valid value for abi", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
}
if let Some(s) = obj.find("target-endian").and_then(Json::as_string) {
base.endian = s.parse()?;
}
key!(is_builtin, bool);
key!(c_int_width = "target-c-int-width");
key!(os);
key!(env);
key!(vendor);
key!(linker_flavor, LinkerFlavor)?;
key!(linker, optional);
key!(lld_flavor, LldFlavor)?;
key!(pre_link_objects, link_objects);
key!(post_link_objects, link_objects);
key!(pre_link_objects_fallback, link_objects);
key!(post_link_objects_fallback, link_objects);
key!(crt_objects_fallback, crt_objects_fallback)?;
key!(pre_link_args, link_args);
key!(late_link_args, link_args);
key!(late_link_args_dynamic, link_args);
key!(late_link_args_static, link_args);
key!(post_link_args, link_args);
key!(link_script, optional);
key!(link_env, env);
key!(link_env_remove, list);
key!(asm_args, list);
key!(cpu);
key!(features);
key!(dynamic_linking, bool);
key!(only_cdylib, bool);
key!(executables, bool);
key!(relocation_model, RelocModel)?;
key!(code_model, CodeModel)?;
key!(tls_model, TlsModel)?;
key!(disable_redzone, bool);
key!(eliminate_frame_pointer, bool);
key!(function_sections, bool);
key!(dll_prefix);
key!(dll_suffix);
key!(exe_suffix);
key!(staticlib_prefix);
key!(staticlib_suffix);
key!(os_family = "target-family", optional);
key!(abi_return_struct_as_int, bool);
key!(is_like_osx, bool);
key!(is_like_solaris, bool);
key!(is_like_windows, bool);
key!(is_like_msvc, bool);
key!(is_like_emscripten, bool);
key!(is_like_fuchsia, bool);
key!(is_like_wasm, bool);
key!(dwarf_version, Option<u32>);
key!(linker_is_gnu, bool);
key!(allows_weak_linkage, bool);
key!(has_rpath, bool);
key!(no_default_libraries, bool);
key!(position_independent_executables, bool);
key!(static_position_independent_executables, bool);
key!(needs_plt, bool);
key!(relro_level, RelroLevel)?;
key!(archive_format);
key!(allow_asm, bool);
key!(main_needs_argc_argv, bool);
key!(has_elf_tls, bool);
key!(obj_is_bitcode, bool);
key!(forces_embed_bitcode, bool);
key!(bitcode_llvm_cmdline);
key!(max_atomic_width, Option<u64>);
key!(min_atomic_width, Option<u64>);
key!(atomic_cas, bool);
key!(panic_strategy, PanicStrategy)?;
key!(crt_static_allows_dylibs, bool);
key!(crt_static_default, bool);
key!(crt_static_respected, bool);
key!(stack_probes, StackProbeType)?;
key!(min_global_align, Option<u64>);
key!(default_codegen_units, Option<u64>);
key!(trap_unreachable, bool);
key!(requires_lto, bool);
key!(singlethread, bool);
key!(no_builtins, bool);
key!(default_hidden_visibility, bool);
key!(emit_debug_gdb_scripts, bool);
key!(requires_uwtable, bool);
key!(default_uwtable, bool);
key!(simd_types_indirect, bool);
key!(limit_rdylib_exports, bool);
key!(override_export_symbols, opt_list);
key!(merge_functions, MergeFunctions)?;
key!(mcount = "target-mcount");
key!(llvm_abiname);
key!(relax_elf_relocations, bool);
key!(llvm_args, list);
key!(use_ctors_section, bool);
key!(eh_frame_header, bool);
key!(has_thumb_interworking, bool);
key!(split_debuginfo, SplitDebuginfo)?;
key!(supported_sanitizers, SanitizerSet)?;
key!(default_adjusted_cabi, Option<Abi>)?;
// NB: The old name is deprecated, but support for it is retained for
// compatibility.
for name in ["abi-blacklist", "unsupported-abis"].iter() {
if let Some(array) = obj.find(name).and_then(Json::as_array) {
for name in array.iter().filter_map(|abi| abi.as_string()) {
match lookup_abi(name) {
Some(abi) => {
if abi.generic() {
return Err(format!(
"The ABI \"{}\" is considered to be supported on all \
targets and cannot be marked unsupported",
abi
));
}
base.unsupported_abis.push(abi)
}
None => {
return Err(format!(
"Unknown ABI \"{}\" in target specification",
name
));
}
}
}
}
}
Ok(base)
}
/// Search RUST_TARGET_PATH for a JSON file specifying the given target
/// triple. Note that it could also just be a bare filename already, so also
/// check for that. If one of the hardcoded targets we know about, just
/// return it directly.
///
/// The error string could come from any of the APIs called, including
/// filesystem access and JSON decoding.
pub fn search(target_triple: &TargetTriple) -> Result<Target, String> {
use rustc_serialize::json;
use std::env;
use std::fs;
fn load_file(path: &Path) -> Result<Target, String> {
let contents = fs::read(path).map_err(|e| e.to_string())?;
let obj = json::from_reader(&mut &contents[..]).map_err(|e| e.to_string())?;
Target::from_json(obj)
}
match *target_triple {
TargetTriple::TargetTriple(ref target_triple) => {
// check if triple is in list of built-in targets
if let Some(t) = load_builtin(target_triple) {
return Ok(t);
}
// search for a file named `target_triple`.json in RUST_TARGET_PATH
let path = {
let mut target = target_triple.to_string();
target.push_str(".json");
PathBuf::from(target)
};
let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_default();
// FIXME 16351: add a sane default search path?
for dir in env::split_paths(&target_path) {
let p = dir.join(&path);
if p.is_file() {
return load_file(&p);
}
}
Err(format!("Could not find specification for target {:?}", target_triple))
}
TargetTriple::TargetPath(ref target_path) => {
if target_path.is_file() {
return load_file(&target_path);
}
Err(format!("Target path {:?} is not a valid file", target_path))
}
}
}
}
impl ToJson for Target {
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
let default: TargetOptions = Default::default();
macro_rules! target_val {
($attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
d.insert(name, self.$attr.to_json());
}};
($attr:ident, $key_name:expr) => {{
let name = $key_name;
d.insert(name.to_string(), self.$attr.to_json());
}};
}
macro_rules! target_option_val {
($attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.$attr {
d.insert(name, self.$attr.to_json());
}
}};
($attr:ident, $key_name:expr) => {{
let name = $key_name;
if default.$attr != self.$attr {
d.insert(name.to_string(), self.$attr.to_json());
}
}};
(link_args - $attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.$attr {
let obj = self
.$attr
.iter()
.map(|(k, v)| (k.desc().to_owned(), v.clone()))
.collect::<BTreeMap<_, _>>();
d.insert(name, obj.to_json());
}
}};
(env - $attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.$attr {
let obj = self
.$attr
.iter()
.map(|&(ref k, ref v)| k.clone() + "=" + &v)
.collect::<Vec<_>>();
d.insert(name, obj.to_json());
}
}};
}
target_val!(llvm_target);
d.insert("target-pointer-width".to_string(), self.pointer_width.to_string().to_json());
target_val!(arch);
target_val!(data_layout);
target_option_val!(is_builtin);
target_option_val!(endian, "target-endian");
target_option_val!(c_int_width, "target-c-int-width");
target_option_val!(os);
target_option_val!(env);
target_option_val!(vendor);
target_option_val!(linker_flavor);
target_option_val!(linker);
target_option_val!(lld_flavor);
target_option_val!(pre_link_objects);
target_option_val!(post_link_objects);
target_option_val!(pre_link_objects_fallback);
target_option_val!(post_link_objects_fallback);
target_option_val!(crt_objects_fallback);
target_option_val!(link_args - pre_link_args);
target_option_val!(link_args - late_link_args);
target_option_val!(link_args - late_link_args_dynamic);
target_option_val!(link_args - late_link_args_static);
target_option_val!(link_args - post_link_args);
target_option_val!(link_script);
target_option_val!(env - link_env);
target_option_val!(link_env_remove);
target_option_val!(asm_args);
target_option_val!(cpu);
target_option_val!(features);
target_option_val!(dynamic_linking);
target_option_val!(only_cdylib);
target_option_val!(executables);
target_option_val!(relocation_model);
target_option_val!(code_model);
target_option_val!(tls_model);
target_option_val!(disable_redzone);
target_option_val!(eliminate_frame_pointer);
target_option_val!(function_sections);
target_option_val!(dll_prefix);
target_option_val!(dll_suffix);
target_option_val!(exe_suffix);
target_option_val!(staticlib_prefix);
target_option_val!(staticlib_suffix);
target_option_val!(os_family, "target-family");
target_option_val!(abi_return_struct_as_int);
target_option_val!(is_like_osx);
target_option_val!(is_like_solaris);
target_option_val!(is_like_windows);
target_option_val!(is_like_msvc);
target_option_val!(is_like_emscripten);
target_option_val!(is_like_fuchsia);
target_option_val!(is_like_wasm);
target_option_val!(dwarf_version);
target_option_val!(linker_is_gnu);
target_option_val!(allows_weak_linkage);
target_option_val!(has_rpath);
target_option_val!(no_default_libraries);
target_option_val!(position_independent_executables);
target_option_val!(static_position_independent_executables);
target_option_val!(needs_plt);
target_option_val!(relro_level);
target_option_val!(archive_format);
target_option_val!(allow_asm);
target_option_val!(main_needs_argc_argv);
target_option_val!(has_elf_tls);
target_option_val!(obj_is_bitcode);
target_option_val!(forces_embed_bitcode);
target_option_val!(bitcode_llvm_cmdline);
target_option_val!(min_atomic_width);
target_option_val!(max_atomic_width);
target_option_val!(atomic_cas);
target_option_val!(panic_strategy);
target_option_val!(crt_static_allows_dylibs);
target_option_val!(crt_static_default);
target_option_val!(crt_static_respected);
target_option_val!(stack_probes);
target_option_val!(min_global_align);
target_option_val!(default_codegen_units);
target_option_val!(trap_unreachable);
target_option_val!(requires_lto);
target_option_val!(singlethread);
target_option_val!(no_builtins);
target_option_val!(default_hidden_visibility);
target_option_val!(emit_debug_gdb_scripts);
target_option_val!(requires_uwtable);
target_option_val!(default_uwtable);
target_option_val!(simd_types_indirect);
target_option_val!(limit_rdylib_exports);
target_option_val!(override_export_symbols);
target_option_val!(merge_functions);
target_option_val!(mcount, "target-mcount");
target_option_val!(llvm_abiname);
target_option_val!(relax_elf_relocations);
target_option_val!(llvm_args);
target_option_val!(use_ctors_section);
target_option_val!(eh_frame_header);
target_option_val!(has_thumb_interworking);
target_option_val!(split_debuginfo);
target_option_val!(supported_sanitizers);
if let Some(abi) = self.default_adjusted_cabi {
d.insert("default-adjusted-cabi".to_string(), Abi::name(abi).to_json());
}
if default.unsupported_abis != self.unsupported_abis {
d.insert(
"unsupported-abis".to_string(),
self.unsupported_abis
.iter()
.map(|&name| Abi::name(name).to_json())
.collect::<Vec<_>>()
.to_json(),
);
}
Json::Object(d)
}
}
/// Either a target triple string or a path to a JSON file.
#[derive(PartialEq, Clone, Debug, Hash, Encodable, Decodable)]
pub enum TargetTriple {
TargetTriple(String),
TargetPath(PathBuf),
}
impl TargetTriple {
/// Creates a target triple from the passed target triple string.
pub fn from_triple(triple: &str) -> Self {
TargetTriple::TargetTriple(triple.to_string())
}
/// Creates a target triple from the passed target path.
pub fn from_path(path: &Path) -> Result<Self, io::Error> {
let canonicalized_path = path.canonicalize()?;
Ok(TargetTriple::TargetPath(canonicalized_path))
}
/// Returns a string triple for this target.
///
/// If this target is a path, the file name (without extension) is returned.
pub fn triple(&self) -> &str {
match *self {
TargetTriple::TargetTriple(ref triple) => triple,
TargetTriple::TargetPath(ref path) => path
.file_stem()
.expect("target path must not be empty")
.to_str()
.expect("target path must be valid unicode"),
}
}
/// Returns an extended string triple for this target.
///
/// If this target is a path, a hash of the path is appended to the triple returned
/// by `triple()`.
pub fn debug_triple(&self) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let triple = self.triple();
if let TargetTriple::TargetPath(ref path) = *self {
let mut hasher = DefaultHasher::new();
path.hash(&mut hasher);
let hash = hasher.finish();
format!("{}-{}", triple, hash)
} else {
triple.to_owned()
}
}
}
impl fmt::Display for TargetTriple {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.debug_triple())
}
}
| 40.033012 | 100 | 0.57629 |
e4a09be3fbeef85943ad5e86d8bdea7c6dda5b48
| 2,259 |
//! # types
//!
//! Defines the various types and aliases used or exposed by the simple_redis library.
//!
#[cfg(test)]
#[path = "./types_test.rs"]
mod types_test;
use std::error::Error;
use std::fmt;
use std::fmt::Display;
#[derive(Debug)]
/// Holds the error information
pub enum RedisError {
/// Root redis error
RedisError(redis::RedisError),
/// Description text of the error reason
Description(&'static str),
}
impl Display for RedisError {
/// Formats the value using the given formatter.
fn fmt(&self, format: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
Self::RedisError(ref cause) => cause.fmt(format),
Self::Description(description) => description.fmt(format),
}
}
}
impl Error for RedisError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::RedisError(error) => Some(error),
Self::Description(_) => None,
}
}
}
/// Defines a redis command argument
pub trait RedisArg: Sized + ToString {}
macro_rules! as_redis_arg {
($t:ty) => {
impl RedisArg for $t {}
};
}
impl<'a> RedisArg for &'a str {}
as_redis_arg!(u8);
as_redis_arg!(i8);
as_redis_arg!(i16);
as_redis_arg!(u16);
as_redis_arg!(i32);
as_redis_arg!(u32);
as_redis_arg!(i64);
as_redis_arg!(u64);
as_redis_arg!(i128);
as_redis_arg!(u128);
as_redis_arg!(f32);
as_redis_arg!(f64);
as_redis_arg!(isize);
as_redis_arg!(usize);
as_redis_arg!(bool);
/// PubSub message
pub type Message = redis::Msg;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = Result<T, RedisError>;
/// Holds empty result or error
pub type RedisEmptyResult = RedisResult<()>;
/// Holds string result or error
pub type RedisStringResult = RedisResult<String>;
/// Holds bool result or error
pub type RedisBoolResult = RedisResult<bool>;
#[derive(Debug, Clone, Copy, Default)]
/// Enable to modify blocking operations.
pub struct Interrupts {
/// Notify blocking operation to stop
pub stop: bool,
/// Next polling time in millies
pub next_polling_time: Option<u64>,
}
impl Interrupts {
/// Returns a new instance.
pub fn new() -> Interrupts {
Default::default()
}
}
| 22.818182 | 86 | 0.656485 |
7688d09c56bca936a885a0da5de260ec6f670160
| 1,447 |
fn main(){
let mut st = String::from("Apple is a great company");
conv_pig_latin(&mut st)
}
/*
Convert strings to pig latin. The first consonant of each word is moved to
the end of the word and “ay” is added, so “first” becomes “irst-fay.”
Words that start with a vowel have “hay” added to the end instead
(“apple” becomes “apple-hay”). Keep in mind the details about UTF-8 encoding!
*/
fn conv_pig_latin(st: &mut str){
let words :Vec<&str> = st.split(' ').into_iter().collect();
let mut result :Vec<String> = Vec::new();
for word in words{
println!("{}", word);
let word_char:Vec<char> = word.chars().collect();
let mut res = word.to_string();
match word_char[0]{
'a' => res.push_str("_hay"),
'A' => res.push_str("_hay"),
'e' => res.push_str("_hay"),
'E' => res.push_str("_hay"),
'i' => res.push_str("_hay"),
'I' => res.push_str("_hay"),
'o' => res.push_str("_hay"),
'O' => res.push_str("_hay"),
'u' => res.push_str("_hay"),
'U' => res.push_str("_hay"),
_ => {
res.push('_');
res.push(word_char[0]);
res.remove(0);
res.push_str("ay");
}
}
result.push(res);
}
for word in result{
println!("{} ", word);
}
}
| 32.155556 | 77 | 0.492053 |
696e3c560dfa38c78cdf9fe14a3dd4396e1ff36d
| 21,896 |
// Copyright 2020 Oxide Computer Company
/*!
* Dropshot is a general-purpose crate for exposing REST APIs from a Rust
* program. Planned highlights include:
*
* * Suitability for production use on a largely untrusted network.
* Dropshot-based systems should be high-performing, reliable, debuggable, and
* secure against basic denial of service attacks (intentional or otherwise).
*
* * First-class OpenAPI support, in the form of precise OpenAPI specs generated
* directly from code. This works because the functions that serve HTTP
* resources consume arguments and return values of specific types from which
* a schema can be statically generated.
*
* * Ease of integrating into a diverse team. An important use case for
* Dropshot consumers is to have a team of engineers where individuals might
* add a few endpoints at a time to a complex server, and it should be
* relatively easy to do this. Part of this means an emphasis on the
* principle of least surprise: like Rust itself, we may choose abstractions
* that require more time to learn up front in order to make it harder to
* accidentally build systems that will not perform, will crash in corner
* cases, etc.
*
* By "REST API", we primarily mean an API built atop existing HTTP primitives,
* organized into hierarchical resources, and providing consistent, idempotent
* mechanisms to create, update, list, and delete those resources. "REST" can
* mean a range of things depending on who you talk to, and some people are
* dogmatic about what is or isn't RESTy. We find such dogma not only
* unhelpful, but poorly defined. (Consider such a simple case as trying to
* update a resource in a REST API. Popular APIs sometimes use `PUT`, `PATCH`,
* or `POST` for the verb; and JSON Merge Patch or JSON Patch as the format.
* (sometimes without even knowing it!). There's hardly a clear standard, yet
* this is a really basic operation for any REST API.)
*
* For a discussion of alternative crates considered, see Oxide RFD 10.
*
* We hope Dropshot will be fairly general-purpose, but it's primarily intended
* to address the needs of the Oxide control plane.
*
*
* ## Usage
*
* The bare minimum might look like this:
*
* ```no_run
* use dropshot::ApiDescription;
* use dropshot::ConfigDropshot;
* use dropshot::ConfigLogging;
* use dropshot::ConfigLoggingLevel;
* use dropshot::HttpServer;
* use std::sync::Arc;
*
* #[tokio::main]
* async fn main() -> Result<(), String> {
* // Set up a logger.
* let log =
* ConfigLogging::StderrTerminal {
* level: ConfigLoggingLevel::Info,
* }
* .to_logger("minimal-example")
* .map_err(|e| e.to_string())?;
*
* // Describe the API.
* let mut api = ApiDescription::new();
* // Register API functions -- see detailed example or ApiDescription docs.
*
* // Start the server.
* let mut server =
* HttpServer::new(
* &ConfigDropshot {
* bind_address: "127.0.0.1:0".parse().unwrap(),
* request_body_max_bytes: 1024,
* },
* api,
* Arc::new(()),
* &log,
* )
* .map_err(|error| format!("failed to start server: {}", error))?;
*
* let server_task = server.run();
* server.wait_for_shutdown(server_task).await
* }
* ```
*
* This server returns a 404 for all resources because no API functions were
* registered. See `examples/basic.rs` for a simple, documented example that
* provides a few resources using shared state.
*
* For a given `ApiDescription`, you can also print out an OpenAPI spec
* describing the API. See [`ApiDescription::openapi`].
*
*
* ## API Handler Functions
*
* HTTP talks about **resources**. For a REST API, we often talk about
* **endpoints** or **operations**, which are identified by a combination of the
* HTTP method and the URI path.
*
* Example endpoints for a resource called a "project" might include:
*
* * `GET /projects` (list projects)
* * `POST /projects` (one way to create a project)
* * `GET /projects/my_project` (fetch one project)
* * `PUT /projects/my_project` (update (or possibly create) a project)
* * `DELETE /projects/my_project` (delete a project)
*
* With Dropshot, an incoming request for a given API endpoint is handled by a
* particular Rust function. That function is called an **entrypoint**, an
* **endpoint handler**, or a **handler function**. When you set up a Dropshot
* server, you configure the set of available API endpoints and which functions
* will handle each one by setting up an [`ApiDescription`].
*
* Typically, you define an endpoint with a handler function by using the
* [`endpoint`] macro. Here's an example of a single endpoint that lists
* a hardcoded project:
*
* ```
* use dropshot::endpoint;
* use dropshot::ApiDescription;
* use dropshot::HttpError;
* use dropshot::HttpResponseOk;
* use dropshot::RequestContext;
* use http::Method;
* use schemars::JsonSchema;
* use serde::Serialize;
* use std::sync::Arc;
*
* /** Represents a project in our API */
* #[derive(Serialize, JsonSchema)]
* struct Project {
* /** name of the project */
* name: String,
* }
*
* /** Fetch a project. */
* #[endpoint {
* method = GET,
* path = "/projects/project1",
* }]
* async fn myapi_projects_get_project(
* rqctx: Arc<RequestContext>,
* ) -> Result<HttpResponseOk<Project>, HttpError>
* {
* let project = Project { name: String::from("project1") };
* Ok(HttpResponseOk(project))
* }
*
* fn main() {
* let mut api = ApiDescription::new();
*
* /*
* * Register our endpoint and its handler function. The "endpoint" macro
* * specifies the HTTP method and URI path that identify the endpoint,
* * allowing this metadata to live right alongside the handler function.
* */
* api.register(myapi_projects_get_project).unwrap();
*
* /* ... (use `api` to set up an `HttpServer` ) */
* }
*
* ```
*
* There's quite a lot going on here:
*
* * The `endpoint` macro specifies the HTTP method and URI path. When we
* invoke `ApiDescription::register()`, this information is used to register
* the endpoint that will be handled by our function.
* * The signature of our function indicates that on success, it returns a
* `HttpResponseOk<Project>`. This means that the function will
* return an HTTP 200 status code ("OK") with an object of type `Project`.
* * The function itself has a Rustdoc comment that will be used to document
* this _endpoint_ in the OpenAPI schema.
*
* From this information, Dropshot can generate an OpenAPI specification for
* this API that describes the endpoint (which OpenAPI calls an "operation"),
* its documentation, the possible responses that it can return, and the schema
* for each type of response (which can also include documentation). This is
* largely known statically, though generated at runtime.
*
*
* ### `#[endpoint { ... }]` attribute parameters
*
* The `endpoint` attribute accepts parameters the affect the operation of
* the endpoint as well as metadata that appears in the OpenAPI description
* of it.
*
* ```ignore
* #[endpoint {
* // Required fields
* method = { DELETE | GET | PATCH | POST | PUT },
* path = "/path/name/with/{named}/{variables}",
*
* // Optional fields
* tags = [ "all", "your", "OpenAPI", "tags" ],
* }]
* ```
*
* This is where you specify the HTTP method and path (including path variables)
* for the API endpoint. These are used as part of endpoint registration and
* appear in the OpenAPI spec output.
*
* The tags field is used to categorize API endpoints and only impacts the
* OpenAPI spec output.
*
*
* ### Function parameters
*
* In general, a handler function looks like this:
*
* ```ignore
* async fn f(
* rqctx: Arc<RequestContext>,
* [query_params: Query<Q>,]
* [path_params: Path<P>,]
* [body_param: TypedBody<J>,]
* [body_param: UntypedBody<J>,]
* ) -> Result<HttpResponse*, HttpError>
* ```
*
* Other than the RequestContext, parameters may appear in any order. The types
* `Query`, `Path`, `TypedBody`, and `UntypedBody` are called **Extractors**
* because they cause information to be pulled out of the request and made
* available to the handler function.
*
* * [`Query`]`<Q>` extracts parameters from a query string, deserializing them
* into an instance of type `Q`. `Q` must implement `serde::Deserialize` and
* `schemars::JsonSchema`.
* * [`Path`]`<P>` extracts parameters from HTTP path, deserializing them into
* an instance of type `P`. `P` must implement `serde::Deserialize` and
* `schemars::JsonSchema`.
* * [`TypedBody`]`<J>` extracts content from the request body by parsing the
* body as JSON and deserializing it into an instance of type `J`. `J` must
* implement `serde::Deserialize` and `schemars::JsonSchema`.
* * [`UntypedBody`] extracts the raw bytes of the request body.
*
* If the handler takes a `Query<Q>`, `Path<P>`, `TypedBody<J>`, or
* `UntypedBody`, and the corresponding extraction cannot be completed, the
* request fails with status code 400 and an error message reflecting a
* validation error.
*
* As with any serde-deserializable type, you can make fields optional by having
* the corresponding property of the type be an `Option`. Here's an example of
* an endpoint that takes two arguments via query parameters: "limit", a
* required u32, and "marker", an optional string:
*
* ```
* use http::StatusCode;
* use dropshot::HttpError;
* use dropshot::TypedBody;
* use dropshot::Query;
* use dropshot::RequestContext;
* use hyper::Body;
* use hyper::Response;
* use schemars::JsonSchema;
* use serde::Deserialize;
* use std::sync::Arc;
*
* #[derive(Deserialize, JsonSchema)]
* struct MyQueryArgs {
* limit: u32,
* marker: Option<String>
* }
*
* async fn myapi_projects_get(
* _: Arc<RequestContext>,
* query: Query<MyQueryArgs>)
* -> Result<Response<Body>, HttpError>
* {
* let query_args = query.into_inner();
* let limit: u32 = query_args.limit;
* let marker: Option<String> = query_args.marker;
* Ok(Response::builder()
* .status(StatusCode::OK)
* .body(format!("limit = {}, marker = {:?}\n", limit, marker).into())?)
* }
* ```
*
* ### Endpoint function return types
*
* Endpoint handler functions are async, so they always return a `Future`. When
* we say "return type" below, we use that as shorthand for the output of the
* future.
*
* An endpoint function must return a type that implements `HttpResponse`.
* Typically this should be a type that implements `HttpTypedResponse` (either
* one of the Dropshot-provided ones or one of your own creation).
*
* The more specific a type returned by the handler function, the more can be
* validated at build-time, and the more specific an OpenAPI schema can be
* generated from the source code. For example, a POST to an endpoint
* "/projects" might return `Result<HttpResponseCreated<Project>, HttpError>`.
* As you might expect, on success, this turns into an HTTP 201 "Created"
* response whose body is constructed by serializing the `Project`. In this
* example, OpenAPI tooling can identify at build time that this function
* produces a 201 "Created" response on success with a body whose schema matches
* `Project` (which we already said implements `Serialize`), and there would be
* no way to violate this contract at runtime.
*
* These are the implementations of `HttpTypedResponse` with their associated
* HTTP response code
* on the HTTP method:
*
* | Return Type | HTTP status code |
* | ----------- | ---------------- |
* | [`HttpResponseOk`] | 200 |
* | [`HttpResponseCreated`] | 201 |
* | [`HttpResponseAccepted`] | 202 |
* | [`HttpResponseDeleted`] | 204 |
* | [`HttpResponseUpdatedNoContent`] | 204 |
*
* In situations where the response schema is not fixed, the endpoint should
* return `Response<Body>`, which also implements `HttpResponse`. Note that
* the OpenAPI spec will not include any status code or type information in
* this case.
*
* ## What about generic handlers that run on all requests?
*
* There's no mechanism in Dropshot for this. Instead, it's recommended that
* users commonize code using regular Rust functions and calling them. See the
* design notes in the README for more on this.
*
*
* ## Support for paginated resources
*
* "Pagination" here refers to the interface pattern where HTTP resources (or
* API endpoints) that provide a list of the items in a collection return a
* relatively small maximum number of items per request, often called a "page"
* of results. Each page includes some metadata that the client can use to make
* another request for the next page of results. The client can repeat this
* until they've gotten all the results. Limiting the number of results
* returned per request helps bound the resource utilization and time required
* for any request, which in turn facilities horizontal scalability, high
* availability, and protection against some denial of service attacks
* (intentional or otherwise). For more background, see the comments in
* dropshot/src/pagination.rs.
*
* Pagination support in Dropshot implements this common pattern:
*
* * This server exposes an **API endpoint** that returns the **items**
* contained within a **collection**.
* * The client is not allowed to list the entire collection in one request.
* Instead, they list the collection using a sequence of requests to the one
* endpoint. We call this sequence of requests a **scan** of the collection,
* and we sometimes say that the client **pages through** the collection.
* * The initial request in the scan may specify the **scan parameters**, which
* typically specify how the results are to be sorted (i.e., by which
* field(s) and whether the sort is ascending or descending), any filters to
* apply, etc.
* * Each request returns a **page** of results at a time, along with a **page
* token** that's provided with the next request as a query parameter.
* * The scan parameters cannot change between requests that are part of the
* same scan.
* * With all requests: there's a default limit (e.g., 100 items returned at a
* time). Clients can request a higher limit using a query parameter (e.g.,
* `limit=1000`). This limit is capped by a hard limit on the server. If the
* client asks for more than the hard limit, the server can use the hard limit
* or reject the request.
*
* As an example, imagine that we have an API endpoint called `"/animals"`. Each
* item returned is an `Animal` object that might look like this:
*
* ```json
* {
* "name": "aardvark",
* "class": "mammal",
* "max_weight": "80", /* kilograms, typical */
* }
* ```
*
* There are at least 1.5 million known species of animal -- too many to return
* in one API call! Our API supports paginating them by `"name"`, which we'll
* say is a unique field in our data set.
*
* The first request to the API fetches `"/animals"` (with no querystring
* parameters) and returns:
*
* ```json
* {
* "page_token": "abc123...",
* "items": [
* {
* "name": "aardvark",
* "class": "mammal",
* "max_weight": "80",
* },
* ...
* {
* "name": "badger",
* "class": "mammal",
* "max_weight": "12",
* }
* ]
* }
* ```
*
* The subsequent request to the API fetches `"/animals?page_token=abc123..."`.
* The page token `"abc123..."` is an opaque token to the client, but typically
* encodes the scan parameters and the value of the last item seen
* (`"name=badger"`). The client knows it has completed the scan when it
* receives a response with no `page_token` in it.
*
* Our API endpoint can also support scanning in reverse order. In this case,
* when the client makes the first request, it should fetch
* `"/animals?sort=name-descending"`. Now the first result might be `"zebra"`.
* Again, the page token must include the scan parameters so that in subsequent
* requests, the API endpoint knows that we're scanning backwards, not forwards,
* from the value we were given. It's not allowed to change directions or sort
* order in the middle of a scan. (You can always start a new scan, but you
* can't pick up from where you were in the previous scan.)
*
* It's also possible to support sorting by multiple fields. For example, we
* could support `sort=class-name`, which we could define to mean that we'll
* sort the results first by the animal's class, then by name. Thus we'd get
* all the amphibians in sorted order, then all the mammals, then all the
* reptiles. The main requirement is that the combination of fields used for
* pagination must be unique. We cannot paginate by the animal's class alone.
* (To see why: there are over 6,000 mammals. If the page size is, say, 1000,
* then the page_token would say `"mammal"`, but there's not enough information
* there to see where we are within the list of mammals. It doesn't matter
* whether there are 2 mammals or 6,000 because clients can limit the page size
* to just one item if they want and that ought to work.)
*
*
* ### Dropshot interfaces for pagination
*
* We can think of pagination in two parts: the input (handling the pagination
* query parameters) and the output (emitting a page of results, including the
* page token).
*
* For input, a paginated API endpoint's handler function should accept a
* [`Query`]`<`[`PaginationParams`]`<ScanParams, PageSelector>>`, where
* `ScanParams` is a consumer-defined type specifying the parameters of the scan
* (typically including the sort fields, sort order, and filter options) and
* `PageSelector` is a consumer-defined type describing the page token. The
* PageSelector will be serialized to JSON and base64-encoded to construct the
* page token. This will be automatically parsed on the way back in.
*
* For output, a paginated API endpoint's handler function can return
* `Result<`[`HttpResponseOk`]<[`ResultsPage`]`<T>, HttpError>` where `T:
* Serialize` is the item listed by the endpoint. You can also use your own
* structure that contains a [`ResultsPage`] (possibly using
* `#[serde(flatten)]`), if that's the behavior you want.
*
* There are several complete, documented examples in the "examples" directory.
*
*
* ### Advanced usage notes
*
* It's possible to accept additional query parameters besides the pagination
* parameters by having your API endpoint handler function take two different
* arguments using `Query`, like this:
*
* ```
* use dropshot::HttpError;
* use dropshot::HttpResponseOk;
* use dropshot::PaginationParams;
* use dropshot::Query;
* use dropshot::RequestContext;
* use dropshot::ResultsPage;
* use dropshot::endpoint;
* use schemars::JsonSchema;
* use serde::Deserialize;
* use std::sync::Arc;
* # use serde::Serialize;
* # #[derive(Debug, Deserialize, JsonSchema)]
* # enum MyScanParams { A };
* # #[derive(Debug, Deserialize, JsonSchema, Serialize)]
* # enum MyPageSelector { A(String) };
* #[derive(Deserialize, JsonSchema)]
* struct MyExtraQueryParams {
* do_extra_stuff: bool,
* }
*
* #[endpoint {
* method = GET,
* path = "/list_stuff"
* }]
* async fn my_list_api(
* rqctx: Arc<RequestContext>,
* pag_params: Query<PaginationParams<MyScanParams, MyPageSelector>>,
* extra_params: Query<MyExtraQueryParams>,
* ) -> Result<HttpResponseOk<ResultsPage<String>>, HttpError>
* {
* # unimplemented!();
* /* ... */
* }
* ```
*
* You might expect that instead of doing this, you could define your own
* structure that includes a `PaginationParams` using `#[serde(flatten)]`, and
* this ought to work, but it currently doesn't due to serde_urlencoded#33,
* which is really serde#1183.
*/
/*
* Clippy's style advice is definitely valuable, but not worth the trouble for
* automated enforcement.
*/
#![allow(clippy::style)]
mod api_description;
mod config;
mod error;
mod from_map;
mod handler;
mod http_util;
mod logging;
mod pagination;
mod router;
mod server;
pub mod test_util;
#[macro_use]
extern crate slog;
pub use api_description::ApiDescription;
pub use api_description::ApiEndpoint;
pub use api_description::ApiEndpointParameter;
pub use api_description::ApiEndpointParameterLocation;
pub use api_description::ApiEndpointResponse;
pub use api_description::OpenApiDefinition;
pub use config::ConfigDropshot;
pub use error::HttpError;
pub use error::HttpErrorResponseBody;
pub use handler::Extractor;
pub use handler::HttpResponse;
pub use handler::HttpResponseAccepted;
pub use handler::HttpResponseCreated;
pub use handler::HttpResponseDeleted;
pub use handler::HttpResponseOk;
pub use handler::HttpResponseUpdatedNoContent;
pub use handler::Path;
pub use handler::Query;
pub use handler::RequestContext;
pub use handler::TypedBody;
pub use handler::UntypedBody;
pub use http_util::CONTENT_TYPE_JSON;
pub use http_util::CONTENT_TYPE_NDJSON;
pub use http_util::CONTENT_TYPE_OCTET_STREAM;
pub use http_util::HEADER_REQUEST_ID;
pub use logging::ConfigLogging;
pub use logging::ConfigLoggingIfExists;
pub use logging::ConfigLoggingLevel;
pub use pagination::EmptyScanParams;
pub use pagination::PaginationOrder;
pub use pagination::PaginationParams;
pub use pagination::ResultsPage;
pub use pagination::WhichPage;
pub use server::HttpServer;
/*
* Users of the `endpoint` macro need `http::Method` available.
*/
pub use http::Method;
extern crate dropshot_endpoint;
pub use dropshot_endpoint::endpoint;
| 39.169946 | 81 | 0.693369 |
0192872f97586cdf4dda7f35a34f95c716bddc2a
| 264 |
pub use self::abs::*;
pub use self::clamp::*;
pub use self::curve::*;
pub use self::exponent::*;
pub use self::negate::*;
pub use self::scale_bias::*;
pub use self::terrace::*;
mod abs;
mod clamp;
mod curve;
mod exponent;
mod negate;
mod scale_bias;
mod terrace;
| 16.5 | 28 | 0.67803 |
d7d88673bea59ad6ba98e4d550d869b7d222af5a
| 52 |
mod generate;
pub use generate::print_completions;
| 13 | 36 | 0.807692 |
2f23016e607a78fa823082f2ae02816935f913f5
| 3,290 |
//! This module implements configuration related stuff.
use std::fmt;
use std::env;
use std::io::Read;
use std::fs::File;
use std::path::Path;
use serde_json::{self, Map, Value};
/// The pencil `Config` type, We provide ways to fill it from JSON files:
///
/// ```rust,no_run
/// let mut app = chilli::Pencil::new("/demo");
/// app.config.from_jsonfile("yourconfig.json")
/// ```
///
/// You can also load configurations from an environment variable
/// pointing to a file:
///
/// ```rust,no_run
/// let mut app = chilli::Pencil::new("/demo");
/// app.config.from_envvar("YOURAPPLICATION_SETTINGS")
/// ```
///
/// In this case, you have to set this environment variable to the file
/// you want to use. On Linux and OS X you can use the export statement:
///
/// ```bash
/// export YOURAPPLICATION_SETTINGS="/path/to/config/file"
/// ```
#[derive(Clone)]
pub struct Config {
config: serde_json::Map<String, Value>,
}
impl Default for Config {
fn default() -> Config {
Config::new()
}
}
impl Config {
/// Create a `Config` object.
pub fn new() -> Config {
let json_object: Map<String, Value> = Map::new();
Config {
config: json_object,
}
}
/// Set a value for the key.
pub fn set(&mut self, key: &str, value: Value) {
self.config.insert(key.to_string(), value);
}
/// Returns a reference to the value corresponding to the key.
pub fn get(&self, key: &str) -> Option<&Value> {
self.config.get(&key.to_string())
}
/// Get a boolean configuration value. If the key doesn't exist
/// or the value is not a `Json::Boolean`, the default value
/// will be returned.
pub fn get_boolean(&self, key: &str, default: bool) -> bool {
match self.get(key) {
Some(value) => {
match *value {
Value::Bool(value) => value,
_ => default
}
},
None => default
}
}
/// Loads a configuration from an environment variable pointing to
/// a JSON configuration file.
pub fn from_envvar(&mut self, variable_name: &str) {
match env::var(variable_name) {
Ok(value) => self.from_jsonfile(&value),
Err(_) => panic!("The environment variable {} is not set.", variable_name),
}
}
/// Updates the values in the config from a JSON file.
pub fn from_jsonfile(&mut self, filepath: &str) {
let path = Path::new(filepath);
let mut file = File::open(&path).unwrap();
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
let object: Value = serde_json::from_str(&content).unwrap();
match object {
Value::Object(object) => { self.from_object(object); },
_ => { panic!("The configuration file is not an JSON object."); }
}
}
/// Updates the values from the given `Object`.
pub fn from_object(&mut self, object: Map<String, Value>) {
for (key, value) in &object {
self.set(&key, value.clone());
}
}
}
impl fmt::Debug for Config {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "<Pencil Config {:?}>", self.config)
}
}
| 29.63964 | 87 | 0.5769 |
e277efe2e8c42fc09e640650251c71fd9ef06371
| 6,375 |
use crate::errors::ShellError;
#[cfg(not(coloring_in_tokens))]
use crate::parser::hir::syntax_shape::FlatShape;
use crate::parser::{
hir::syntax_shape::{
color_syntax, expand_atom, AtomicToken, ColorSyntax, ExpandContext, ExpansionRule,
MaybeSpaceShape,
},
TokenNode, TokensIterator,
};
use crate::{Span, Spanned, Text};
pub fn expand_external_tokens(
token_nodes: &mut TokensIterator<'_>,
source: &Text,
) -> Result<Vec<Spanned<String>>, ShellError> {
let mut out: Vec<Spanned<String>> = vec![];
loop {
if let Some(span) = expand_next_expression(token_nodes)? {
out.push(span.spanned_string(source));
} else {
break;
}
}
Ok(out)
}
#[derive(Debug, Copy, Clone)]
pub struct ExternalTokensShape;
#[cfg(not(coloring_in_tokens))]
impl ColorSyntax for ExternalTokensShape {
type Info = ();
type Input = ();
fn color_syntax<'a, 'b>(
&self,
_input: &(),
token_nodes: &'b mut TokensIterator<'a>,
context: &ExpandContext,
shapes: &mut Vec<Spanned<FlatShape>>,
) -> Self::Info {
loop {
// Allow a space
color_syntax(&MaybeSpaceShape, token_nodes, context, shapes);
// Process an external expression. External expressions are mostly words, with a
// few exceptions (like $variables and path expansion rules)
match color_syntax(&ExternalExpression, token_nodes, context, shapes).1 {
ExternalExpressionResult::Eof => break,
ExternalExpressionResult::Processed => continue,
}
}
}
}
#[cfg(coloring_in_tokens)]
impl ColorSyntax for ExternalTokensShape {
type Info = ();
type Input = ();
fn color_syntax<'a, 'b>(
&self,
_input: &(),
token_nodes: &'b mut TokensIterator<'a>,
context: &ExpandContext,
) -> Self::Info {
loop {
// Allow a space
color_syntax(&MaybeSpaceShape, token_nodes, context);
// Process an external expression. External expressions are mostly words, with a
// few exceptions (like $variables and path expansion rules)
match color_syntax(&ExternalExpression, token_nodes, context).1 {
ExternalExpressionResult::Eof => break,
ExternalExpressionResult::Processed => continue,
}
}
}
}
pub fn expand_next_expression(
token_nodes: &mut TokensIterator<'_>,
) -> Result<Option<Span>, ShellError> {
let first = token_nodes.next_non_ws();
let first = match first {
None => return Ok(None),
Some(v) => v,
};
let first = triage_external_head(first)?;
let mut last = first;
loop {
let continuation = triage_continuation(token_nodes)?;
if let Some(continuation) = continuation {
last = continuation;
} else {
break;
}
}
Ok(Some(first.until(last)))
}
fn triage_external_head(node: &TokenNode) -> Result<Span, ShellError> {
Ok(match node {
TokenNode::Token(token) => token.span,
TokenNode::Call(_call) => unimplemented!("TODO: OMG"),
TokenNode::Nodes(_nodes) => unimplemented!("TODO: OMG"),
TokenNode::Delimited(_delimited) => unimplemented!("TODO: OMG"),
TokenNode::Pipeline(_pipeline) => unimplemented!("TODO: OMG"),
TokenNode::Flag(flag) => flag.span,
TokenNode::Whitespace(_whitespace) => {
unreachable!("This function should be called after next_non_ws()")
}
TokenNode::Error(_error) => unimplemented!("TODO: OMG"),
})
}
fn triage_continuation<'a, 'b>(
nodes: &'a mut TokensIterator<'b>,
) -> Result<Option<Span>, ShellError> {
let mut peeked = nodes.peek_any();
let node = match peeked.node {
None => return Ok(None),
Some(node) => node,
};
match &node {
node if node.is_whitespace() => return Ok(None),
TokenNode::Token(..) | TokenNode::Flag(..) => {}
TokenNode::Call(..) => unimplemented!("call"),
TokenNode::Nodes(..) => unimplemented!("nodes"),
TokenNode::Delimited(..) => unimplemented!("delimited"),
TokenNode::Pipeline(..) => unimplemented!("pipeline"),
TokenNode::Whitespace(..) => unimplemented!("whitespace"),
TokenNode::Error(..) => unimplemented!("error"),
}
peeked.commit();
Ok(Some(node.span()))
}
#[must_use]
enum ExternalExpressionResult {
Eof,
Processed,
}
#[derive(Debug, Copy, Clone)]
struct ExternalExpression;
#[cfg(not(coloring_in_tokens))]
impl ColorSyntax for ExternalExpression {
type Info = ExternalExpressionResult;
type Input = ();
fn color_syntax<'a, 'b>(
&self,
_input: &(),
token_nodes: &'b mut TokensIterator<'a>,
context: &ExpandContext,
shapes: &mut Vec<Spanned<FlatShape>>,
) -> ExternalExpressionResult {
let atom = match expand_atom(
token_nodes,
"external word",
context,
ExpansionRule::permissive(),
) {
Err(_) => unreachable!("TODO: separate infallible expand_atom"),
Ok(Spanned {
item: AtomicToken::Eof { .. },
..
}) => return ExternalExpressionResult::Eof,
Ok(atom) => atom,
};
atom.color_tokens(shapes);
return ExternalExpressionResult::Processed;
}
}
#[cfg(coloring_in_tokens)]
impl ColorSyntax for ExternalExpression {
type Info = ExternalExpressionResult;
type Input = ();
fn color_syntax<'a, 'b>(
&self,
_input: &(),
token_nodes: &'b mut TokensIterator<'a>,
context: &ExpandContext,
) -> ExternalExpressionResult {
let atom = match expand_atom(
token_nodes,
"external word",
context,
ExpansionRule::permissive(),
) {
Err(_) => unreachable!("TODO: separate infallible expand_atom"),
Ok(Spanned {
item: AtomicToken::Eof { .. },
..
}) => return ExternalExpressionResult::Eof,
Ok(atom) => atom,
};
atom.color_tokens(token_nodes.mut_shapes());
return ExternalExpressionResult::Processed;
}
}
| 29.109589 | 92 | 0.583373 |
18d69339a611f6c17d61f5654cc97a74f3c6f749
| 373 |
mod sample_structs;
use cachem::Server;
use cachem_example::*;
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let (cnc_rec, mut server) = Server::new("0.0.0.0:55555".into());
server.add(CacheName::A, Arc::new(Box::new(ACache::new(cnc_rec))));
server.listen_cnc();
server.listen_tcp().await;
Ok(())
}
| 20.722222 | 71 | 0.627346 |
6a392da42638a015e271de630f66ec8107186db9
| 1,634 |
use super::error::ConvertError;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
#[derive(Serialize, Deserialize, Debug)]
pub(crate) enum Subject {
#[serde(rename = "ページング")]
Paging,
#[serde(rename = "表示")]
Required,
#[serde(rename = "表示条件1")]
DisplayConditionFirst,
#[serde(rename = "表示条件2")]
DisplayConditionSecond,
#[serde(rename = "表示条件3")]
DisplayConditionThird,
#[serde(rename = "タイプ")]
Type,
#[serde(rename = "最大")]
Max,
#[serde(rename = "最小")]
Min,
#[serde(rename = "ラベル")]
Label,
#[serde(rename = "プレースホルダ")]
Placeholder,
#[serde(rename = "入力指定")]
InputSpec,
#[serde(rename = "数字入力指定範囲(例:10~90)")]
NumInputSpec,
#[serde(rename = "入力指定エラー文言")]
NumInputSpecError,
#[serde(rename = "プルダウン")]
Options,
}
impl FromStr for Subject {
type Err = ConvertError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"ページング" => Ok(Subject::Paging),
"表示" => Ok(Subject::Required),
"表示条件1" => Ok(Subject::DisplayConditionFirst),
"表示条件2" => Ok(Subject::DisplayConditionSecond),
"表示条件3" => Ok(Subject::DisplayConditionThird),
"タイプ" => Ok(Subject::Type),
"最大" => Ok(Subject::Max),
"最小" => Ok(Subject::Min),
"ラベル" => Ok(Subject::Label),
"プレースホルダ" => Ok(Subject::Placeholder),
"入力指定" => Ok(Subject::InputSpec),
"数字入力指定範囲(例:10~90)" => Ok(Subject::NumInputSpec),
"入力指定エラー文言" => Ok(Subject::NumInputSpecError),
options if options.starts_with("プルダウン") => Ok(Subject::Options),
unknown_subject => Err(ConvertError::IncorrectSubject(unknown_subject.to_owned())),
}
}
}
| 27.233333 | 89 | 0.618115 |
f9d379abf3c1eddab868a5912f08c065538734b9
| 2,372 |
//! This module defines Concrete Syntax Tree (CST), used by rust-analyzer.
//!
//! The CST includes comments and whitespace, provides a single node type,
//! `SyntaxNode`, and a basic traversal API (parent, children, siblings).
//!
//! The *real* implementation is in the (language-agnostic) `rowan` crate, this
//! module just wraps its API.
use rowan::{GreenNodeBuilder, Language};
use crate::{Parse, SmolStr, SyntaxError, SyntaxKind, TextSize};
pub(crate) use rowan::{GreenNode, GreenToken};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum RustLanguage {}
impl Language for RustLanguage {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> SyntaxKind {
SyntaxKind::from(raw.0)
}
fn kind_to_raw(kind: SyntaxKind) -> rowan::SyntaxKind {
rowan::SyntaxKind(kind.into())
}
}
pub type SyntaxNode = rowan::SyntaxNode<RustLanguage>;
pub type SyntaxToken = rowan::SyntaxToken<RustLanguage>;
pub type SyntaxElement = rowan::SyntaxElement<RustLanguage>;
pub type SyntaxNodeChildren = rowan::SyntaxNodeChildren<RustLanguage>;
pub type SyntaxElementChildren = rowan::SyntaxElementChildren<RustLanguage>;
pub use rowan::{Direction, NodeOrToken};
#[derive(Default)]
pub struct SyntaxTreeBuilder {
errors: Vec<SyntaxError>,
inner: GreenNodeBuilder<'static>,
}
impl SyntaxTreeBuilder {
pub(crate) fn finish_raw(self) -> (GreenNode, Vec<SyntaxError>) {
let green = self.inner.finish();
(green, self.errors)
}
pub fn finish(self) -> Parse<SyntaxNode> {
let (green, errors) = self.finish_raw();
let node = SyntaxNode::new_root(green);
if cfg!(debug_assertions) {
crate::validation::validate_block_structure(&node);
}
Parse::new(node.green().clone(), errors)
}
pub fn token(&mut self, kind: SyntaxKind, text: SmolStr) {
let kind = RustLanguage::kind_to_raw(kind);
self.inner.token(kind, text)
}
pub fn start_node(&mut self, kind: SyntaxKind) {
let kind = RustLanguage::kind_to_raw(kind);
self.inner.start_node(kind)
}
pub fn finish_node(&mut self) {
self.inner.finish_node()
}
pub fn error(&mut self, error: ra_parser::ParseError, text_pos: TextSize) {
self.errors.push(SyntaxError::new_at_offset(error.0, text_pos))
}
}
| 31.210526 | 79 | 0.679595 |
2852c3b616df1a4afd84d3606d939d23f2cb0911
| 8,404 |
//! Collects trait impls for each item in the crate. For example, if a crate
//! defines a struct that implements a trait, this pass will note that the
//! struct implements that trait.
use super::Pass;
use crate::clean::*;
use crate::core::DocContext;
use crate::formats::cache::Cache;
use crate::visit::DocVisitor;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_hir::def_id::DefId;
use rustc_middle::ty::DefIdTree;
use rustc_span::symbol::sym;
crate const COLLECT_TRAIT_IMPLS: Pass = Pass {
name: "collect-trait-impls",
run: collect_trait_impls,
description: "retrieves trait impls for items in the crate",
};
crate fn collect_trait_impls(mut krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let synth_impls = cx.sess().time("collect_synthetic_impls", || {
let mut synth = SyntheticImplCollector { cx, impls: Vec::new() };
synth.visit_crate(&krate);
synth.impls
});
let prims: FxHashSet<PrimitiveType> = krate.primitives.iter().map(|p| p.1).collect();
let crate_items = {
let mut coll = ItemCollector::new();
cx.sess().time("collect_items_for_trait_impls", || coll.visit_crate(&krate));
coll.items
};
let mut new_items = Vec::new();
// External trait impls.
cx.with_all_trait_impls(|cx, all_trait_impls| {
let _prof_timer = cx.tcx.sess.prof.generic_activity("build_extern_trait_impls");
for &impl_def_id in all_trait_impls.iter().skip_while(|def_id| def_id.is_local()) {
inline::build_impl(cx, None, impl_def_id, None, &mut new_items);
}
});
// Also try to inline primitive impls from other crates.
cx.tcx.sess.prof.generic_activity("build_primitive_trait_impls").run(|| {
for def_id in PrimitiveType::all_impls(cx.tcx) {
if !def_id.is_local() {
inline::build_impl(cx, None, def_id, None, &mut new_items);
// FIXME(eddyb) is this `doc(hidden)` check needed?
if !cx.tcx.is_doc_hidden(def_id) {
let impls = get_auto_trait_and_blanket_impls(cx, def_id);
new_items.extend(impls.filter(|i| cx.inlined.insert(i.def_id)));
}
}
}
});
let mut cleaner = BadImplStripper { prims, items: crate_items, cache: &cx.cache };
let mut type_did_to_deref_target: FxHashMap<DefId, &Type> = FxHashMap::default();
// Follow all `Deref` targets of included items and recursively add them as valid
fn add_deref_target(
cx: &DocContext<'_>,
map: &FxHashMap<DefId, &Type>,
cleaner: &mut BadImplStripper<'_>,
type_did: DefId,
) {
if let Some(target) = map.get(&type_did) {
debug!("add_deref_target: type {:?}, target {:?}", type_did, target);
if let Some(target_prim) = target.primitive_type() {
cleaner.prims.insert(target_prim);
} else if let Some(target_did) = target.def_id(&cx.cache) {
// `impl Deref<Target = S> for S`
if target_did == type_did {
// Avoid infinite cycles
return;
}
cleaner.items.insert(target_did.into());
add_deref_target(cx, map, cleaner, target_did);
}
}
}
// scan through included items ahead of time to splice in Deref targets to the "valid" sets
for it in &new_items {
if let ImplItem(Impl { ref for_, ref trait_, ref items, .. }) = *it.kind {
if trait_.as_ref().map(|t| t.def_id()) == cx.tcx.lang_items().deref_trait()
&& cleaner.keep_impl(for_, true)
{
let target = items
.iter()
.find_map(|item| match *item.kind {
TypedefItem(ref t, true) => Some(&t.type_),
_ => None,
})
.expect("Deref impl without Target type");
if let Some(prim) = target.primitive_type() {
cleaner.prims.insert(prim);
} else if let Some(did) = target.def_id(&cx.cache) {
cleaner.items.insert(did.into());
}
if let Some(for_did) = for_.def_id(&cx.cache) {
if type_did_to_deref_target.insert(for_did, target).is_none() {
// Since only the `DefId` portion of the `Type` instances is known to be same for both the
// `Deref` target type and the impl for type positions, this map of types is keyed by
// `DefId` and for convenience uses a special cleaner that accepts `DefId`s directly.
if cleaner.keep_impl_with_def_id(for_did.into()) {
add_deref_target(cx, &type_did_to_deref_target, &mut cleaner, for_did);
}
}
}
}
}
}
new_items.retain(|it| {
if let ImplItem(Impl { ref for_, ref trait_, ref kind, .. }) = *it.kind {
cleaner.keep_impl(
for_,
trait_.as_ref().map(|t| t.def_id()) == cx.tcx.lang_items().deref_trait(),
) || trait_.as_ref().map_or(false, |t| cleaner.keep_impl_with_def_id(t.def_id().into()))
|| kind.is_blanket()
} else {
true
}
});
// Local trait impls.
cx.with_all_trait_impls(|cx, all_trait_impls| {
let _prof_timer = cx.tcx.sess.prof.generic_activity("build_local_trait_impls");
let mut attr_buf = Vec::new();
for &impl_def_id in all_trait_impls.iter().take_while(|def_id| def_id.is_local()) {
let mut parent = cx.tcx.parent(impl_def_id);
while let Some(did) = parent {
attr_buf.extend(
cx.tcx
.get_attrs(did)
.iter()
.filter(|attr| attr.has_name(sym::doc))
.filter(|attr| {
if let Some([attr]) = attr.meta_item_list().as_deref() {
attr.has_name(sym::cfg)
} else {
false
}
})
.cloned(),
);
parent = cx.tcx.parent(did);
}
inline::build_impl(cx, None, impl_def_id, Some(&attr_buf), &mut new_items);
attr_buf.clear();
}
});
if let ModuleItem(Module { items, .. }) = &mut *krate.module.kind {
items.extend(synth_impls);
items.extend(new_items);
} else {
panic!("collect-trait-impls can't run");
};
krate
}
struct SyntheticImplCollector<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
impls: Vec<Item>,
}
impl<'a, 'tcx> DocVisitor for SyntheticImplCollector<'a, 'tcx> {
fn visit_item(&mut self, i: &Item) {
if i.is_struct() || i.is_enum() || i.is_union() {
// FIXME(eddyb) is this `doc(hidden)` check needed?
if !self.cx.tcx.is_doc_hidden(i.def_id.expect_def_id()) {
self.impls
.extend(get_auto_trait_and_blanket_impls(self.cx, i.def_id.expect_def_id()));
}
}
self.visit_item_recur(i)
}
}
#[derive(Default)]
struct ItemCollector {
items: FxHashSet<ItemId>,
}
impl ItemCollector {
fn new() -> Self {
Self::default()
}
}
impl DocVisitor for ItemCollector {
fn visit_item(&mut self, i: &Item) {
self.items.insert(i.def_id);
self.visit_item_recur(i)
}
}
struct BadImplStripper<'a> {
prims: FxHashSet<PrimitiveType>,
items: FxHashSet<ItemId>,
cache: &'a Cache,
}
impl<'a> BadImplStripper<'a> {
fn keep_impl(&self, ty: &Type, is_deref: bool) -> bool {
if let Generic(_) = ty {
// keep impls made on generics
true
} else if let Some(prim) = ty.primitive_type() {
self.prims.contains(&prim)
} else if let Some(did) = ty.def_id(self.cache) {
is_deref || self.keep_impl_with_def_id(did.into())
} else {
false
}
}
fn keep_impl_with_def_id(&self, did: ItemId) -> bool {
self.items.contains(&did)
}
}
| 36.224138 | 114 | 0.547358 |
011b318edc344b07cc40bc91d93e50fc9c11b257
| 8,724 |
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use super::{Class, Error, Result};
use crate::decl_defs::{DeclTy, FoldedClass};
use crate::folded_decl_provider::{DeclName, FoldedDeclProvider, Substitution};
use crate::reason::Reason;
use crate::typing_defs::ClassElt;
use once_cell::unsync::OnceCell;
use pos::{MethodName, MethodNameMap, PropName, PropNameMap, TypeName};
use std::cell::RefCell;
use std::fmt;
use std::rc::Rc;
use std::sync::Arc;
/// c.f. OCaml type `Typing_classes_heap.eager_members`
#[derive(Debug)]
struct Members<R: Reason> {
props: PropNameMap<Rc<ClassElt<R>>>,
static_props: PropNameMap<Rc<ClassElt<R>>>,
methods: MethodNameMap<Rc<ClassElt<R>>>,
static_methods: MethodNameMap<Rc<ClassElt<R>>>,
constructor: OnceCell<Option<Rc<ClassElt<R>>>>,
}
/// A typing `ClassType` (c.f. the `Eager` variant of OCaml type
/// `Typing_classes_heap.class_t`) contains a folded decl and a cache of class
/// members. The purpose of the class-member-cache is to abstract over the fact
/// that class elements in a folded decl don't contain their type (in hh_server,
/// the type is stored on a separate heap, to reduce overfetching and
/// duplication). When asked for a class member, the `ClassType` checks its
/// member-cache. If not present, it looks up the type of the member using the
/// `FoldedDeclProvider`, and populates its member-cache with a new `ClassElt`
/// containing that type and any other metadata from the `FoldedElt`.
pub struct ClassType<R: Reason> {
provider: Arc<dyn FoldedDeclProvider<R>>,
class: Arc<FoldedClass<R>>,
members: RefCell<Members<R>>,
}
impl<R: Reason> fmt::Debug for ClassType<R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.fetch_all_members().unwrap();
f.debug_struct("ClassType")
.field("class", &self.class)
.field("members", &self.members.borrow())
.finish()
}
}
impl<R: Reason> Members<R> {
fn new() -> Self {
Self {
props: Default::default(),
static_props: Default::default(),
methods: Default::default(),
static_methods: Default::default(),
constructor: OnceCell::new(),
}
}
}
impl<R: Reason> ClassType<R> {
pub fn new(provider: Arc<dyn FoldedDeclProvider<R>>, class: Arc<FoldedClass<R>>) -> Self {
Self {
provider,
class,
members: RefCell::new(Members::new()),
}
}
fn fetch_all_members(&self) -> Result<()> {
for (&prop, _) in self.class.props.iter() {
self.get_prop(self.class.name.into(), prop)?;
}
for (&prop, _) in self.class.static_props.iter() {
self.get_static_prop(self.class.name.into(), prop)?;
}
for (&method, _) in self.class.methods.iter() {
self.get_method(self.class.name.into(), method)?;
}
for (&method, _) in self.class.static_methods.iter() {
self.get_static_method(self.class.name.into(), method)?;
}
self.get_constructor(self.class.name.into())?;
Ok(())
}
// Invariant violation: we expect our provider to provide member types for any
// member from a FoldedClass it returned. See docs for `FoldedDeclProvider`.
// c.f. OCaml exception `Decl_heap_elems_bug`
fn member_type_missing<T>(&self, kind: &str, origin: TypeName, name: impl AsRef<str>) -> T {
panic!(
"Could not find {kind} {origin}::{} (inherited by {})",
name.as_ref(),
self.class.name
);
}
// If `self.class` has a substitution context for `origin`, apply the
// associated substitution to `ty`.
fn instantiate(&self, ty: DeclTy<R>, origin: TypeName) -> DeclTy<R> {
match self.class.substs.get(&origin) {
Some(ctx) => Substitution { subst: &ctx.subst }.instantiate(&ty),
None => ty,
}
}
}
impl<R: Reason> Class<R> for ClassType<R> {
fn get_prop(&self, dependent: DeclName, name: PropName) -> Result<Option<Rc<ClassElt<R>>>> {
if let Some(class_elt) = self.members.borrow().props.get(&name) {
return Ok(Some(Rc::clone(class_elt)));
}
let folded_elt = match self.class.props.get(&name) {
Some(fe) => fe,
None => return Ok(None),
};
let origin = folded_elt.origin;
let ty = self.instantiate(
self.provider
.get_shallow_property_type(dependent, origin, name)?
.unwrap_or_else(|| self.member_type_missing("property", origin, name)),
origin,
);
let class_elt = Rc::new(ClassElt::new(folded_elt, ty));
self.members
.borrow_mut()
.props
.insert(name, Rc::clone(&class_elt));
Ok(Some(class_elt))
}
fn get_static_prop(
&self,
dependent: DeclName,
name: PropName,
) -> Result<Option<Rc<ClassElt<R>>>> {
if let Some(class_elt) = self.members.borrow().static_props.get(&name) {
return Ok(Some(Rc::clone(class_elt)));
}
let folded_elt = match self.class.static_props.get(&name) {
Some(fe) => fe,
None => return Ok(None),
};
let origin = folded_elt.origin;
let ty = self.instantiate(
self.provider
.get_shallow_static_property_type(dependent, origin, name)?
.unwrap_or_else(|| self.member_type_missing("static property", origin, name)),
origin,
);
let class_elt = Rc::new(ClassElt::new(folded_elt, ty));
self.members
.borrow_mut()
.static_props
.insert(name, Rc::clone(&class_elt));
Ok(Some(class_elt))
}
fn get_method(&self, dependent: DeclName, name: MethodName) -> Result<Option<Rc<ClassElt<R>>>> {
if let Some(class_elt) = self.members.borrow().methods.get(&name) {
return Ok(Some(Rc::clone(class_elt)));
}
let folded_elt = match self.class.methods.get(&name) {
Some(fe) => fe,
None => return Ok(None),
};
let origin = folded_elt.origin;
let ty = self.instantiate(
self.provider
.get_shallow_method_type(dependent, origin, name)?
.unwrap_or_else(|| self.member_type_missing("method", origin, name)),
origin,
);
let class_elt = Rc::new(ClassElt::new(folded_elt, ty));
self.members
.borrow_mut()
.methods
.insert(name, Rc::clone(&class_elt));
Ok(Some(class_elt))
}
fn get_static_method(
&self,
dependent: DeclName,
name: MethodName,
) -> Result<Option<Rc<ClassElt<R>>>> {
if let Some(class_elt) = self.members.borrow().static_methods.get(&name) {
return Ok(Some(Rc::clone(class_elt)));
}
let folded_elt = match self.class.static_methods.get(&name) {
Some(fe) => fe,
None => return Ok(None),
};
let origin = folded_elt.origin;
let ty = self.instantiate(
self.provider
.get_shallow_static_method_type(dependent, origin, name)?
.unwrap_or_else(|| self.member_type_missing("static method", origin, name)),
origin,
);
let class_elt = Rc::new(ClassElt::new(folded_elt, ty));
self.members
.borrow_mut()
.static_methods
.insert(name, Rc::clone(&class_elt));
Ok(Some(class_elt))
}
fn get_constructor(&self, dependent: DeclName) -> Result<Option<Rc<ClassElt<R>>>> {
Ok(self
.members
.borrow_mut()
.constructor
.get_or_try_init::<_, Error>(|| {
let folded_elt = match &self.class.constructor {
Some(fe) => fe,
None => return Ok(None),
};
let origin = folded_elt.origin;
let ty = self.instantiate(
self.provider
.get_shallow_constructor_type(dependent, origin)?
.unwrap_or_else(|| {
self.member_type_missing("constructor", origin, "__construct")
}),
origin,
);
Ok(Some(Rc::new(ClassElt::new(folded_elt, ty))))
})?
.as_ref()
.map(Rc::clone))
}
}
| 36.655462 | 100 | 0.571756 |
4a7ad2edcbfe804519732714e0998672774c2117
| 3,885 |
use crate::test;
use super::super::*;
#[test]
fn content_length_on_str() {
test::set_handler("/content_length_on_str", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let resp = post("test://host/content_length_on_str")
.send_string("Hello World!!!")
.unwrap();
let vec = resp.to_write_vec();
let s = String::from_utf8_lossy(&vec);
assert!(s.contains("\r\nContent-Length: 14\r\n"));
}
#[test]
fn user_set_content_length_on_str() {
test::set_handler("/user_set_content_length_on_str", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let resp = post("test://host/user_set_content_length_on_str")
.set("Content-Length", "12345")
.send_string("Hello World!!!")
.unwrap();
let vec = resp.to_write_vec();
let s = String::from_utf8_lossy(&vec);
assert!(s.contains("\r\nContent-Length: 12345\r\n"));
}
#[test]
#[cfg(feature = "json")]
fn content_length_on_json() {
test::set_handler("/content_length_on_json", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let mut json = SerdeMap::new();
json.insert(
"Hello".to_string(),
SerdeValue::String("World!!!".to_string()),
);
let resp = post("test://host/content_length_on_json")
.send_json(SerdeValue::Object(json))
.unwrap();
let vec = resp.to_write_vec();
let s = String::from_utf8_lossy(&vec);
assert!(s.contains("\r\nContent-Length: 20\r\n"));
}
#[test]
fn content_length_and_chunked() {
test::set_handler("/content_length_and_chunked", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let resp = post("test://host/content_length_and_chunked")
.set("Transfer-Encoding", "chunked")
.send_string("Hello World!!!")
.unwrap();
let vec = resp.to_write_vec();
let s = String::from_utf8_lossy(&vec);
assert!(s.contains("Transfer-Encoding: chunked\r\n"));
assert!(!s.contains("\r\nContent-Length:\r\n"));
}
#[test]
#[cfg(feature = "charset")]
fn str_with_encoding() {
test::set_handler("/str_with_encoding", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let resp = post("test://host/str_with_encoding")
.set("Content-Type", "text/plain; charset=iso-8859-1")
.send_string("Hällo Wörld!!!")
.unwrap();
let vec = resp.to_write_vec();
assert_eq!(
&vec[vec.len() - 14..],
//H ä l l o _ W ö r l d ! ! !
[72, 228, 108, 108, 111, 32, 87, 246, 114, 108, 100, 33, 33, 33]
);
}
#[test]
#[cfg(feature = "json")]
fn content_type_on_json() {
test::set_handler("/content_type_on_json", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let mut json = SerdeMap::new();
json.insert(
"Hello".to_string(),
SerdeValue::String("World!!!".to_string()),
);
let resp = post("test://host/content_type_on_json")
.send_json(SerdeValue::Object(json))
.unwrap();
let vec = resp.to_write_vec();
let s = String::from_utf8_lossy(&vec);
assert!(s.contains("\r\nContent-Type: application/json\r\n"));
}
#[test]
#[cfg(feature = "json")]
fn content_type_not_overriden_on_json() {
test::set_handler("/content_type_not_overriden_on_json", |_unit| {
test::make_response(200, "OK", vec![], vec![])
});
let mut json = SerdeMap::new();
json.insert(
"Hello".to_string(),
SerdeValue::String("World!!!".to_string()),
);
let resp = post("test://host/content_type_not_overriden_on_json")
.set("content-type", "text/plain")
.send_json(SerdeValue::Object(json))
.unwrap();
let vec = resp.to_write_vec();
let s = String::from_utf8_lossy(&vec);
assert!(s.contains("\r\ncontent-type: text/plain\r\n"));
}
| 31.844262 | 72 | 0.592278 |
4ae87229728d10524b05b4194194d1b1fcfdbf68
| 747 |
/*
*
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LolClashClientFailedInvite {
#[serde(rename = "exception", skip_serializing_if = "Option::is_none")]
pub exception: Option<String>,
#[serde(rename = "playerId", skip_serializing_if = "Option::is_none")]
pub player_id: Option<i64>,
}
impl LolClashClientFailedInvite {
pub fn new() -> LolClashClientFailedInvite {
LolClashClientFailedInvite {
exception: None,
player_id: None,
}
}
}
| 23.34375 | 109 | 0.67336 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.