hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
87e91fbdb8263ccd10c1d2a82b6a351a6d05afce
| 675 |
#[derive(Debug, Default)]
pub struct TabBar {
titles: Vec<String>,
index: usize, // Currently selected tab, 0 by default
}
impl TabBar {
pub fn new(titles: Vec<String>) -> TabBar {
TabBar {
titles,
index: 0,
}
}
pub fn index(&self) -> usize {
self.index
}
pub fn next(&mut self) {
self.index = (self.index + 1) % self.titles.len();
}
pub fn previous(&mut self) {
if self.index > 0 {
self.index -= 1;
} else {
self.index = self.titles.len() - 1;
}
}
pub fn titles(&self) -> &[String] {
self.titles.as_ref()
}
}
| 19.285714 | 58 | 0.484444 |
1dca073d1dc4958f6435d3217cb0d5069ac591bc
| 1,805 |
use actix_web::{HttpResponse, ResponseError};
use drogue_client::error::ErrorInformation;
use drogue_cloud_database_common::{error::ServiceError, models::AdvanceError};
use drogue_cloud_registry_events::EventSenderError;
use drogue_cloud_service_api::webapp as actix_web;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum PostgresManagementServiceError<E>
where
E: std::error::Error + std::fmt::Debug + 'static,
{
#[error("Service error: {0}")]
Service(#[from] ServiceError),
#[error("Event sender error: {0}")]
EventSender(#[from] EventSenderError<E>),
}
impl<E> From<tokio_postgres::Error> for PostgresManagementServiceError<E>
where
E: std::error::Error + std::fmt::Debug + 'static,
{
fn from(err: tokio_postgres::Error) -> Self {
PostgresManagementServiceError::Service(err.into())
}
}
impl<E> From<deadpool_postgres::PoolError> for PostgresManagementServiceError<E>
where
E: std::error::Error + std::fmt::Debug + 'static,
{
fn from(err: deadpool_postgres::PoolError) -> Self {
PostgresManagementServiceError::Service(err.into())
}
}
impl<E> From<AdvanceError> for PostgresManagementServiceError<E>
where
E: std::error::Error + std::fmt::Debug + 'static,
{
fn from(err: AdvanceError) -> Self {
PostgresManagementServiceError::Service(err.into())
}
}
impl<E> ResponseError for PostgresManagementServiceError<E>
where
E: std::error::Error + std::fmt::Debug + 'static,
{
fn error_response(&self) -> HttpResponse {
match self {
Self::Service(err) => err.error_response(),
Self::EventSender(err) => HttpResponse::BadGateway().json(ErrorInformation {
error: "EventSenderError".into(),
message: err.to_string(),
}),
}
}
}
| 30.083333 | 88 | 0.675346 |
e5dd9644e45e1697b7f73515a400014c11e6ceb5
| 1,664 |
//! Converts maps to vecs for serialization.
//! from https://github.com/DenisKolodin/vectorize
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{iter::FromIterator, vec::Vec};
pub fn serialize<'a, T, K, V, S>(target: T, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: IntoIterator<Item = (&'a K, &'a V)>,
K: Serialize + 'a,
V: Serialize + 'a,
{
let container: Vec<_> = target.into_iter().collect();
serde::Serialize::serialize(&container, ser)
}
pub fn deserialize<'de, T, K, V, D>(des: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: FromIterator<(K, V)>,
K: Deserialize<'de>,
V: Deserialize<'de>,
{
let container: Vec<_> = serde::Deserialize::deserialize(des)?;
Ok(container.into_iter().collect())
}
#[cfg(test)]
mod tests {
use crate::vectorize;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct MyKey {
one: String,
two: u16,
more: Vec<u8>,
}
#[derive(Debug, Serialize, Deserialize)]
struct MyComplexType {
#[serde(with = "vectorize")]
map: HashMap<MyKey, String>,
}
#[test]
fn it_works() -> Result<(), Box<dyn std::error::Error>> {
let key = MyKey { one: "1".into(), two: 2, more: vec![1, 2, 3] };
let mut map = HashMap::new();
map.insert(key.clone(), "value".into());
let instance = MyComplexType { map };
let serialized = postcard::to_allocvec(&instance)?;
let deserialized: MyComplexType = postcard::from_bytes(&serialized)?;
let expected_value = "value".to_string();
assert_eq!(deserialized.map.get(&key), Some(&expected_value));
Ok(())
}
}
| 27.278689 | 86 | 0.658654 |
01ab430856328c7426bab3c9dfeb3aa696a73c5a
| 1,979 |
mod client;
mod store;
use trussed::{
client::{
CounterClient as _,
// ManagementClient as _,
},
error::Result,
syscall,
types::Location::*,
store::counterstore::{
Counterstore as _,
ClientCounterstore,
},
};
#[test]
fn counter_implementation() {
let result: Result<()> = store::get(|store| {
let client_id = "test".into();
let mut cstore = ClientCounterstore::new(client_id, *store);
assert_eq!(cstore.increment_counter_zero(), 257);
assert_eq!(cstore.increment_counter_zero(), 258);
assert_eq!(cstore.increment_counter_zero(), 259);
let id = cstore.create(Volatile).unwrap(); // counter zero is now at 4
assert_eq!(cstore.increment_counter_zero(), 261);
assert_eq!(cstore.increment(id)?, 257);
assert_eq!(cstore.increment(id)?, 258);
assert_eq!(cstore.increment(id)?, 259);
assert_eq!(cstore.increment_counter_zero(), 262);
Ok(())
});
result.unwrap();
}
#[test]
fn counter_client() {
client::get(|client| {
let id = syscall!(client.create_counter(Volatile)).id;
assert_eq!(syscall!(client.increment_counter(id)).counter, 257);
assert_eq!(syscall!(client.increment_counter(id)).counter, 258);
assert_eq!(syscall!(client.increment_counter(id)).counter, 259);
let jd = syscall!(client.create_counter(External)).id;
assert_eq!(syscall!(client.increment_counter(jd)).counter, 257);
assert_eq!(syscall!(client.increment_counter(jd)).counter, 258);
assert_eq!(syscall!(client.increment_counter(id)).counter, 260);
for i in 5..1_000 {
assert_eq!(syscall!(client.increment_counter(id)).counter, 256 + i);
}
for j in 3..1_000 {
assert_eq!(syscall!(client.increment_counter(jd)).counter, 256 + j);
}
// assert_eq!(syscall!(client.uptime()).uptime.as_nanos(), 10);
});
}
| 29.537313 | 80 | 0.616473 |
1c6dba6cf441cfc81c809313446e86f24836d043
| 23,763 |
use libc::{c_char, c_int, c_void};
use std::fmt::{Debug, Display, Formatter};
use std::{mem, ptr, str};
pub mod ffi;
pub struct Version {
pub major: i32,
pub minor: i32,
pub patch: i32,
}
impl Version {
pub fn new() -> Self {
let mut major = 0i32;
let mut minor = 0i32;
let mut patch = 0i32;
unsafe {
ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
Version {
major,
minor,
patch,
}
}
}
impl Default for Version {
fn default() -> Self {
Self::new()
}
}
pub enum ErrNo {
ENOTSUP,
EPROTONOSUPPORT,
ENOBUFS,
ENETDOWN,
EADDRINUSE,
EADDRNOTAVAIL,
ECONNREFUSED,
EINPROGRESS,
ENOTSOCK,
EMSGSIZE,
EAFNOSUPPORT,
ENETUNREACH,
ECONNABORTED,
ECONNRESET,
ENOTCONN,
ETIMEDOUT,
EHOSTUNREACH,
ENETRESET,
EFSM,
ENOCOMPATPROTO,
ETERM,
EMTHREAD,
Unknown(i32),
}
impl From<i32> for ErrNo {
fn from(errno: i32) -> Self {
match errno {
ffi::ENOTSUP => ErrNo::ENOTSUP,
ffi::EPROTONOSUPPORT => ErrNo::EPROTONOSUPPORT,
ffi::ENOBUFS => ErrNo::ENOBUFS,
ffi::ENETDOWN => ErrNo::ENETDOWN,
ffi::EADDRINUSE => ErrNo::EADDRINUSE,
ffi::EADDRNOTAVAIL => ErrNo::EADDRNOTAVAIL,
ffi::ECONNREFUSED => ErrNo::ECONNREFUSED,
ffi::EINPROGRESS => ErrNo::EINPROGRESS,
ffi::ENOTSOCK => ErrNo::ENOTSOCK,
ffi::EMSGSIZE => ErrNo::EMSGSIZE,
ffi::EAFNOSUPPORT => ErrNo::EAFNOSUPPORT,
ffi::ENETUNREACH => ErrNo::ENETUNREACH,
ffi::ECONNABORTED => ErrNo::ECONNABORTED,
ffi::ECONNRESET => ErrNo::ECONNRESET,
ffi::ENOTCONN => ErrNo::ENOTCONN,
ffi::ETIMEDOUT => ErrNo::ETIMEDOUT,
ffi::EHOSTUNREACH => ErrNo::EHOSTUNREACH,
ffi::ENETRESET => ErrNo::ENETRESET,
ffi::EFSM => ErrNo::EFSM,
ffi::ENOCOMPATPROTO => ErrNo::ENOCOMPATPROTO,
ffi::ETERM => ErrNo::ETERM,
ffi::EMTHREAD => ErrNo::EMTHREAD,
other => ErrNo::Unknown(other),
}
}
}
impl ErrNo {
pub fn get_errno() -> ErrNo {
ErrNo::from(unsafe { ffi::zmq_errno() } as i32)
}
pub fn errno(&self) -> i32 {
match self {
ErrNo::ENOTSUP => ffi::ENOTSUP,
ErrNo::EPROTONOSUPPORT => ffi::EPROTONOSUPPORT,
ErrNo::ENOBUFS => ffi::ENOBUFS,
ErrNo::ENETDOWN => ffi::ENETDOWN,
ErrNo::EADDRINUSE => ffi::EADDRINUSE,
ErrNo::EADDRNOTAVAIL => ffi::EADDRNOTAVAIL,
ErrNo::ECONNREFUSED => ffi::ECONNREFUSED,
ErrNo::EINPROGRESS => ffi::EINPROGRESS,
ErrNo::ENOTSOCK => ffi::ENOTSOCK,
ErrNo::EMSGSIZE => ffi::EMSGSIZE,
ErrNo::EAFNOSUPPORT => ffi::EAFNOSUPPORT,
ErrNo::ENETUNREACH => ffi::ENETUNREACH,
ErrNo::ECONNABORTED => ffi::ECONNABORTED,
ErrNo::ECONNRESET => ffi::ECONNRESET,
ErrNo::ENOTCONN => ffi::ENOTCONN,
ErrNo::ETIMEDOUT => ffi::ETIMEDOUT,
ErrNo::EHOSTUNREACH => ffi::EHOSTUNREACH,
ErrNo::ENETRESET => ffi::ENETRESET,
ErrNo::EFSM => ffi::EFSM,
ErrNo::ENOCOMPATPROTO => ffi::ENOCOMPATPROTO,
ErrNo::ETERM => ffi::ETERM,
ErrNo::EMTHREAD => ffi::EMTHREAD,
ErrNo::Unknown(val) => *val,
}
}
pub fn err_msg(&self) -> String {
get_err_msg(self.errno())
}
}
pub fn get_err_msg(errno: i32) -> String {
unsafe {
std::ffi::CStr::from_ptr(ffi::zmq_strerror(errno))
.to_str()
.unwrap()
.to_string()
}
}
pub struct Error(i32);
impl Error {
pub fn new() -> Self {
Self(unsafe { ffi::zmq_errno() } as i32)
}
pub fn errno(&self) -> ErrNo {
ErrNo::from(self.0)
}
}
impl From<i32> for Error {
fn from(errno: i32) -> Self {
Self(errno)
}
}
impl Default for Error {
fn default() -> Self {
Self::new()
}
}
impl Debug for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("[{}] {}", self.0, get_err_msg(self.0)))
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("[{}] {}", self.0, get_err_msg(self.0)))
}
}
impl std::error::Error for Error {}
pub type Result<T> = std::result::Result<T, Error>;
macro_rules! try_err {
($ex: expr) => {
if $ex == -1 {
return Err(Error::new());
}
};
}
pub struct Message(ffi::ZmqMsgT);
impl Message {
pub fn new() -> Self {
Self::with_capacity(0).unwrap()
}
pub fn close(&mut self) -> Result<()> {
try_err!(unsafe { ffi::zmq_msg_close(&mut self.0) });
Ok(())
}
pub fn with_capacity(size: usize) -> Result<Self> {
let mut msg: ffi::ZmqMsgT = unsafe { mem::zeroed() };
try_err!(unsafe { ffi::zmq_msg_init_size(&mut msg, size) });
Ok(Message(msg))
}
pub fn as_bytes(&mut self) -> Option<&[u8]> {
unsafe {
let data = ffi::zmq_msg_data(&mut self.0);
let len = ffi::zmq_msg_size(&self.0);
ptr::slice_from_raw_parts(data as *const u8, len).as_ref()
}
}
pub fn as_str(&mut self) -> Option<&str> {
self.as_bytes().and_then(|bytes| str::from_utf8(bytes).ok())
}
}
impl Default for Message {
fn default() -> Self {
Self::new()
}
}
impl Clone for Message {
fn clone(&self) -> Self {
Self(unsafe {
let mut src = self.0;
let mut msg: ffi::ZmqMsgT = mem::zeroed();
ffi::zmq_msg_init(&mut msg);
ffi::zmq_msg_copy(&mut msg, &mut src);
msg
})
}
}
impl From<&[u8]> for Message {
fn from(bytes: &[u8]) -> Self {
let mut msg = Message::with_capacity(bytes.len()).unwrap();
unsafe {
ptr::copy_nonoverlapping(
bytes.as_ptr() as *mut c_void,
ffi::zmq_msg_data(&mut msg.0),
bytes.len(),
);
}
msg
}
}
impl From<&str> for Message {
fn from(msg: &str) -> Self {
Self::from(msg.as_bytes())
}
}
impl Drop for Message {
fn drop(&mut self) {
let _ = self.close();
}
}
pub enum SocketType {
PAIR,
PUB,
SUB,
REQ,
REP,
DEALER,
ROUTER,
PULL,
PUSH,
XPUB,
XSUB,
STREAM,
}
impl SocketType {
pub fn code(&self) -> c_int {
match self {
SocketType::PAIR => ffi::ZMQ_PAIR,
SocketType::PUB => ffi::ZMQ_PUB,
SocketType::SUB => ffi::ZMQ_SUB,
SocketType::REQ => ffi::ZMQ_REQ,
SocketType::REP => ffi::ZMQ_REP,
SocketType::DEALER => ffi::ZMQ_DEALER,
SocketType::ROUTER => ffi::ZMQ_ROUTER,
SocketType::PULL => ffi::ZMQ_PULL,
SocketType::PUSH => ffi::ZMQ_PUSH,
SocketType::XPUB => ffi::ZMQ_XPUB,
SocketType::XSUB => ffi::ZMQ_XSUB,
SocketType::STREAM => ffi::ZMQ_STREAM,
}
}
}
pub struct SendFlag(i32);
impl SendFlag {
pub fn new() -> Self {
Self(0)
}
#[allow(non_snake_case)]
pub fn DONTWAIT() -> Self {
Self(ffi::ZMQ_DONTWAIT)
}
#[allow(non_snake_case)]
pub fn SNDMORE() -> Self {
Self(ffi::ZMQ_SNDMORE)
}
pub fn dontwait(&self) -> Self {
Self(self.0 | ffi::ZMQ_DONTWAIT)
}
pub fn sndmore(&self) -> Self {
Self(self.0 | ffi::ZMQ_SNDMORE)
}
}
impl Default for SendFlag {
fn default() -> Self {
Self::new()
}
}
pub struct RecvFlag(i32);
impl RecvFlag {
pub fn new() -> Self {
Self(0)
}
#[allow(non_snake_case)]
pub fn DONTWAIT() -> Self {
Self(ffi::ZMQ_DONTWAIT)
}
pub fn dontwait(&self) -> Self {
Self(self.0 | ffi::ZMQ_DONTWAIT)
}
}
impl Default for RecvFlag {
fn default() -> Self {
Self::new()
}
}
macro_rules! getsockopt {
($name: ident, String, $option: path, $size: literal) => {
pub fn $name(&self) -> Result<String> {
let mut size = $size;
let mut buffer = [0u8; $size];
try_err!(unsafe {
ffi::zmq_getsockopt(
self.0,
$option,
buffer.as_mut_ptr() as *mut c_void,
&mut size,
)
});
// remove \0
Ok(String::from_utf8(buffer[0..size - 1].to_vec()).unwrap())
}
};
($name: ident, Vec<u8>, $option: path, $size: literal) => {
pub fn $name(&self) -> Result<Vec<u8>> {
let mut size = $size;
let mut buffer = [0u8; $size];
try_err!(unsafe {
ffi::zmq_getsockopt(
self.0,
$option,
buffer.as_mut_ptr() as *mut c_void,
&mut size,
)
});
Ok(buffer[0..size].to_vec())
}
};
($name: ident, bool, $option: path) => {
pub fn $name(&self) -> Result<bool> {
let mut value: i32 = 0;
try_err!(unsafe {
ffi::zmq_getsockopt(
self.0,
$option,
&mut value as *mut i32 as *mut c_void,
&mut mem::size_of::<i32>(),
)
});
Ok(value != 0)
}
};
($name: ident, $ty: ty, $option: path) => {
pub fn $name(&self) -> Result<$ty> {
let mut value: $ty = 0;
try_err!(unsafe {
ffi::zmq_getsockopt(
self.0,
$option,
&mut value as *mut $ty as *mut c_void,
&mut mem::size_of::<$ty>(),
)
});
Ok(value)
}
};
}
macro_rules! setsockopt {
($name: ident, bool, $option: path) => {
pub fn $name(&mut self, flag: bool) -> Result<()> {
try_err!(unsafe {
ffi::zmq_setsockopt(
self.0,
$option,
flag as i32 as *const c_void,
mem::size_of::<i32>(),
)
});
Ok(())
}
};
($name: ident, &str, $option: path) => {
pub fn $name(&mut self, value: &str) -> Result<()> {
let bytes = value.as_bytes();
try_err!(unsafe {
ffi::zmq_setsockopt(
self.0,
$option,
bytes.as_ptr() as *const c_void,
bytes.len(),
)
});
Ok(())
}
};
($name: ident, &[u8], $option: path) => {
pub fn $name(&mut self, value: &[u8]) -> Result<()> {
try_err!(unsafe {
ffi::zmq_setsockopt(
self.0,
$option,
value.as_ptr() as *const c_void,
value.len(),
)
});
Ok(())
}
};
($name: ident, $ty: ty, $option: path) => {
pub fn $name(&mut self, value: $ty) -> Result<()> {
try_err!(unsafe {
ffi::zmq_setsockopt(
self.0,
$option,
value as *const c_void,
mem::size_of::<$ty>(),
)
});
Ok(())
}
};
}
pub struct Socket(*mut c_void);
impl Socket {
pub fn bind(&self, addr: &str) -> Result<()> {
try_err!(unsafe {
ffi::zmq_bind(self.0, addr.as_bytes().to_vec().as_ptr() as *const c_char)
});
Ok(())
}
pub fn unbind(&self, addr: &str) -> Result<()> {
try_err!(unsafe {
ffi::zmq_unbind(self.0, addr.as_bytes().to_vec().as_ptr() as *const c_char)
});
Ok(())
}
pub fn connect(&self, addr: &str) -> Result<()> {
try_err!(unsafe {
ffi::zmq_connect(self.0, addr.as_bytes().to_vec().as_ptr() as *const c_char)
});
Ok(())
}
pub fn disconnect(&self, addr: &str) -> Result<()> {
try_err!(unsafe {
ffi::zmq_disconnect(self.0, addr.as_bytes().to_vec().as_ptr() as *const c_char)
});
Ok(())
}
pub fn close(&self) -> Result<()> {
try_err!(unsafe { ffi::zmq_close(self.0) });
Ok(())
}
pub fn send(&self, msg: &[u8], flags: SendFlag) -> Result<i32> {
let len = msg.len();
let rc = unsafe { ffi::zmq_send(self.0, msg.as_ptr() as *const c_void, len, flags.0) };
try_err!(rc);
Ok(rc as i32)
}
pub fn send_msg(&self, msg: &mut Message, flags: SendFlag) -> Result<i32> {
let rc = unsafe { ffi::zmq_msg_send(&mut msg.0, self.0, flags.0) };
try_err!(rc);
Ok(rc as i32)
}
pub fn recv(&self, bytes: &mut [u8], flags: RecvFlag) -> Result<i32> {
let len = bytes.len();
let rc = unsafe { ffi::zmq_recv(self.0, bytes.as_ptr() as *mut c_void, len, flags.0) };
try_err!(rc);
Ok(rc as i32)
}
pub fn recv_msg(&self, msg: &mut Message, flags: RecvFlag) -> Result<i32> {
let rc = unsafe { ffi::zmq_msg_recv(&mut msg.0, self.0, flags.0) };
try_err!(rc);
Ok(rc as i32)
}
// TODO; Add all of options
getsockopt!(get_affinity, u64, ffi::ZMQ_AFFINITY);
getsockopt!(get_backlog, i32, ffi::ZMQ_BACKLOG);
getsockopt!(get_bindtodevice, String, ffi::ZMQ_BINDTODEVICE, 255);
getsockopt!(get_connect_timeout, i32, ffi::ZMQ_CONNECT_TIMEOUT);
getsockopt!(get_curve_publickey, Vec<u8>, ffi::ZMQ_CURVE_PUBLICKEY, 255);
getsockopt!(get_curve_secretkey, Vec<u8>, ffi::ZMQ_CURVE_SECRETKEY, 255);
getsockopt!(get_curve_serverkey, Vec<u8>, ffi::ZMQ_CURVE_SERVERKEY, 255);
getsockopt!(get_events, i32, ffi::ZMQ_EVENTS);
getsockopt!(get_fd, i32, ffi::ZMQ_FD);
getsockopt!(get_gssapi_plaintext, bool, ffi::ZMQ_GSSAPI_PLAINTEXT);
getsockopt!(get_gssapi_principal, String, ffi::ZMQ_GSSAPI_PRINCIPAL, 255);
getsockopt!(get_gssapi_server, bool, ffi::ZMQ_GSSAPI_SERVER);
getsockopt!(
get_gssapi_service_principal,
String,
ffi::ZMQ_GSSAPI_SERVICE_PRINCIPAL,
255
);
getsockopt!(
get_gssapi_service_principal_nametype,
i32,
ffi::ZMQ_GSSAPI_SERVICE_PRINCIPAL_NAMETYPE
);
getsockopt!(
get_gssapi_principal_nametype,
i32,
ffi::ZMQ_GSSAPI_PRINCIPAL_NAMETYPE
);
getsockopt!(get_handshake_ivl, i32, ffi::ZMQ_HANDSHAKE_IVL);
getsockopt!(get_immediate, bool, ffi::ZMQ_IMMEDIATE);
getsockopt!(get_invert_matching, bool, ffi::ZMQ_INVERT_MATCHING);
getsockopt!(get_ipv_6, bool, ffi::ZMQ_IPV6);
getsockopt!(get_last_endpoint, String, ffi::ZMQ_LAST_ENDPOINT, 255);
getsockopt!(get_linger, i32, ffi::ZMQ_LINGER);
getsockopt!(get_maxmsgsize, i64, ffi::ZMQ_MAXMSGSIZE);
getsockopt!(get_mechanism, i32, ffi::ZMQ_MECHANISM);
getsockopt!(get_multicast_hops, i32, ffi::ZMQ_MULTICAST_HOPS);
getsockopt!(get_multicast_maxtpdu, i32, ffi::ZMQ_MULTICAST_MAXTPDU);
getsockopt!(get_plain_password, String, ffi::ZMQ_PLAIN_PASSWORD, 255);
getsockopt!(get_plain_server, bool, ffi::ZMQ_PLAIN_SERVER);
getsockopt!(get_plain_username, String, ffi::ZMQ_PLAIN_USERNAME, 255);
getsockopt!(get_use_fd, i32, ffi::ZMQ_USE_FD);
getsockopt!(get_rate, i32, ffi::ZMQ_RATE);
getsockopt!(get_rcvbuf, i32, ffi::ZMQ_RCVBUF);
getsockopt!(get_rcvhwm, i32, ffi::ZMQ_RCVHWM);
getsockopt!(get_rcvmore, bool, ffi::ZMQ_RCVMORE);
getsockopt!(get_rcvtimeo, i32, ffi::ZMQ_RCVTIMEO);
getsockopt!(get_reconnect_ivl, i32, ffi::ZMQ_RECONNECT_IVL);
getsockopt!(get_reconnect_ivl_max, i32, ffi::ZMQ_RECONNECT_IVL_MAX);
getsockopt!(get_recovery_ivl, i32, ffi::ZMQ_RECOVERY_IVL);
getsockopt!(get_routing_id, Vec<u8>, ffi::ZMQ_ROUTING_ID, 255);
getsockopt!(get_sndbuf, i32, ffi::ZMQ_SNDBUF);
getsockopt!(get_sndhwm, i32, ffi::ZMQ_SNDHWM);
getsockopt!(get_sndtimeo, i32, ffi::ZMQ_SNDTIMEO);
getsockopt!(get_socks_proxy, String, ffi::ZMQ_SOCKS_PROXY, 255);
getsockopt!(get_tcp_keepalive, i32, ffi::ZMQ_TCP_KEEPALIVE);
getsockopt!(get_tcp_keepalive_cnt, i32, ffi::ZMQ_TCP_KEEPALIVE_CNT);
getsockopt!(get_tcp_keepalive_idle, i32, ffi::ZMQ_TCP_KEEPALIVE_IDLE);
getsockopt!(get_tcp_keepalive_intvl, i32, ffi::ZMQ_TCP_KEEPALIVE_INTVL);
getsockopt!(get_tcp_maxrt, i32, ffi::ZMQ_TCP_MAXRT);
getsockopt!(get_thread_safe, bool, ffi::ZMQ_THREAD_SAFE);
getsockopt!(get_tos, i32, ffi::ZMQ_TOS);
getsockopt!(get_type, i32, ffi::ZMQ_TYPE);
getsockopt!(get_zap_domain, String, ffi::ZMQ_ZAP_DOMAIN, 255);
// getsockopt!(get_zap_enforce_domain, bool, ffi::ZMQ_ZAP_ENFORCE_DOMAIN);
getsockopt!(get_vmci_buffer_size, u64, ffi::ZMQ_VMCI_BUFFER_SIZE);
getsockopt!(get_vmci_buffer_min_size, u64, ffi::ZMQ_VMCI_BUFFER_MIN_SIZE);
getsockopt!(get_vmci_buffer_max_size, u64, ffi::ZMQ_VMCI_BUFFER_MAX_SIZE);
getsockopt!(get_vmci_connect_timeout, i32, ffi::ZMQ_VMCI_CONNECT_TIMEOUT);
// getsockopt!(get_multicast_loop, bool, ffi::ZMQ_MULTICAST_LOOP);
setsockopt!(set_affinity, u64, ffi::ZMQ_AFFINITY);
setsockopt!(set_backlog, i32, ffi::ZMQ_BACKLOG);
setsockopt!(set_bindtodevice, &str, ffi::ZMQ_BINDTODEVICE);
setsockopt!(set_connect_routing_id, &[u8], ffi::ZMQ_CONNECT_ROUTING_ID);
setsockopt!(set_conflate, bool, ffi::ZMQ_CONFLATE);
setsockopt!(set_connect_timeout, i32, ffi::ZMQ_CONNECT_TIMEOUT);
setsockopt!(set_curve_publickey, &[u8], ffi::ZMQ_CURVE_PUBLICKEY);
setsockopt!(set_curve_secretkey, &[u8], ffi::ZMQ_CURVE_SECRETKEY);
setsockopt!(set_curve_server, bool, ffi::ZMQ_CURVE_SERVER);
setsockopt!(set_curve_serverkey, &[u8], ffi::ZMQ_CURVE_SERVERKEY);
setsockopt!(set_gssapi_plaintext, bool, ffi::ZMQ_GSSAPI_PLAINTEXT);
setsockopt!(set_gssapi_principal, &str, ffi::ZMQ_GSSAPI_PRINCIPAL);
setsockopt!(set_gssapi_server, bool, ffi::ZMQ_GSSAPI_SERVER);
setsockopt!(
set_gssapi_service_principal,
&str,
ffi::ZMQ_GSSAPI_SERVICE_PRINCIPAL
);
setsockopt!(
set_gssapi_service_principal_nametype,
i32,
ffi::ZMQ_GSSAPI_SERVICE_PRINCIPAL_NAMETYPE
);
setsockopt!(
set_gssapi_principal_nametype,
i32,
ffi::ZMQ_GSSAPI_PRINCIPAL_NAMETYPE
);
setsockopt!(set_handshake_ivl, i32, ffi::ZMQ_HANDSHAKE_IVL);
setsockopt!(set_heartbeat_ivl, i32, ffi::ZMQ_HEARTBEAT_IVL);
setsockopt!(set_heartbeat_timeout, i32, ffi::ZMQ_HEARTBEAT_TIMEOUT);
setsockopt!(set_heartbeat_ttl, i32, ffi::ZMQ_HEARTBEAT_TTL);
setsockopt!(set_immediate, bool, ffi::ZMQ_IMMEDIATE);
setsockopt!(set_invert_matching, bool, ffi::ZMQ_INVERT_MATCHING);
setsockopt!(set_ipv6, bool, ffi::ZMQ_IPV6);
setsockopt!(set_linger, i32, ffi::ZMQ_LINGER);
setsockopt!(set_maxmsgsize, i64, ffi::ZMQ_MAXMSGSIZE);
setsockopt!(set_multicast_hops, i32, ffi::ZMQ_MULTICAST_HOPS);
setsockopt!(set_multicast_maxtpdu, i32, ffi::ZMQ_MULTICAST_MAXTPDU);
setsockopt!(set_plain_password, &str, ffi::ZMQ_PLAIN_PASSWORD);
setsockopt!(set_plain_server, bool, ffi::ZMQ_PLAIN_SERVER);
setsockopt!(set_plain_username, &str, ffi::ZMQ_PLAIN_USERNAME);
setsockopt!(set_use_fd, i32, ffi::ZMQ_USE_FD);
setsockopt!(set_probe_router, bool, ffi::ZMQ_PROBE_ROUTER);
setsockopt!(set_rate, i32, ffi::ZMQ_RATE);
setsockopt!(set_rcvbuf, i32, ffi::ZMQ_RCVBUF);
setsockopt!(set_rcvhwm, i32, ffi::ZMQ_RCVHWM);
setsockopt!(set_rcvtimeo, i32, ffi::ZMQ_RCVTIMEO);
setsockopt!(set_reconnect_ivl, i32, ffi::ZMQ_RECONNECT_IVL);
setsockopt!(set_reconnect_ivl_max, i32, ffi::ZMQ_RECONNECT_IVL_MAX);
setsockopt!(set_recovery_ivl, i32, ffi::ZMQ_RECOVERY_IVL);
setsockopt!(set_req_correlate, bool, ffi::ZMQ_REQ_CORRELATE);
setsockopt!(set_req_relaxed, bool, ffi::ZMQ_REQ_RELAXED);
setsockopt!(set_router_handover, bool, ffi::ZMQ_ROUTER_HANDOVER);
setsockopt!(set_router_mandatory, bool, ffi::ZMQ_ROUTER_MANDATORY);
setsockopt!(set_router_raw, bool, ffi::ZMQ_ROUTER_RAW);
setsockopt!(set_routing_id, &[u8], ffi::ZMQ_ROUTING_ID);
setsockopt!(set_sndbuf, i32, ffi::ZMQ_SNDBUF);
setsockopt!(set_sndhwm, i32, ffi::ZMQ_SNDHWM);
setsockopt!(set_sndtimeo, i32, ffi::ZMQ_SNDTIMEO);
setsockopt!(set_socks_proxy, &str, ffi::ZMQ_SOCKS_PROXY);
setsockopt!(set_stream_notify, bool, ffi::ZMQ_STREAM_NOTIFY);
setsockopt!(set_subscribe, &[u8], ffi::ZMQ_SUBSCRIBE);
setsockopt!(set_tcp_keepalive, i32, ffi::ZMQ_TCP_KEEPALIVE);
setsockopt!(set_tcp_keepalive_cnt, i32, ffi::ZMQ_TCP_KEEPALIVE_CNT);
setsockopt!(set_tcp_keepalive_idle, i32, ffi::ZMQ_TCP_KEEPALIVE_IDLE);
setsockopt!(set_tcp_keepalive_intvl, i32, ffi::ZMQ_TCP_KEEPALIVE_INTVL);
setsockopt!(set_tcp_maxrt, i32, ffi::ZMQ_TCP_MAXRT);
setsockopt!(set_tos, i32, ffi::ZMQ_TOS);
setsockopt!(set_unsubscribe, &[u8], ffi::ZMQ_UNSUBSCRIBE);
setsockopt!(set_xpub_verbose, bool, ffi::ZMQ_XPUB_VERBOSE);
setsockopt!(set_xpub_verboser, bool, ffi::ZMQ_XPUB_VERBOSER);
setsockopt!(set_xpub_manual, bool, ffi::ZMQ_XPUB_MANUAL);
setsockopt!(set_xpub_nodrop, bool, ffi::ZMQ_XPUB_NODROP);
setsockopt!(set_xpub_welcome_msg, &[u8], ffi::ZMQ_XPUB_WELCOME_MSG);
setsockopt!(set_zap_domain, &str, ffi::ZMQ_ZAP_DOMAIN);
setsockopt!(set_vmci_buffer_size, u64, ffi::ZMQ_VMCI_BUFFER_SIZE);
setsockopt!(set_vmci_buffer_min_size, u64, ffi::ZMQ_VMCI_BUFFER_MIN_SIZE);
setsockopt!(set_vmci_buffer_max_size, u64, ffi::ZMQ_VMCI_BUFFER_MAX_SIZE);
setsockopt!(set_vmci_connect_timeout, i32, ffi::ZMQ_VMCI_CONNECT_TIMEOUT);
}
impl Drop for Socket {
fn drop(&mut self) {
let _ = self.close();
}
}
pub fn proxy(frontend: &Socket, backend: &Socket) -> Result<()> {
try_err!(unsafe { ffi::zmq_proxy(frontend.0, backend.0, ptr::null_mut()) });
Ok(())
}
pub struct Context(*mut c_void);
impl Context {
pub fn new() -> Self {
Context(unsafe { ffi::zmq_ctx_new() })
}
pub fn socket(&self, stype: SocketType) -> Socket {
Socket(unsafe { ffi::zmq_socket(self.0, stype.code()) })
}
pub fn terminate(&self) -> Result<()> {
try_err!(unsafe { ffi::zmq_ctx_term(self.0) });
Ok(())
}
pub fn shutdown(&self) -> Result<()> {
try_err!(unsafe { ffi::zmq_ctx_shutdown(self.0) });
Ok(())
}
}
impl Default for Context {
fn default() -> Self {
Self::new()
}
}
impl Drop for Context {
fn drop(&mut self) {
let _ = self.terminate();
}
}
pub struct PollItem(ffi::ZmqPollitemT);
impl PollItem {
pub fn new() -> Self {
PollItem(unsafe { mem::zeroed() })
}
pub fn from_socket(socket: &mut Socket, events: i16) -> Self {
let mut instance = Self::new();
instance.0.socket = socket.0;
instance.0.events = events;
instance
}
pub fn get_revents(&self) -> i16 {
self.0.revents
}
}
impl Default for PollItem {
fn default() -> Self {
Self::new()
}
}
pub fn poll(items: &mut [PollItem], timeout: i64) -> Result<i32> {
let rc = unsafe {
ffi::zmq_poll(
items.as_mut_ptr() as *mut ffi::ZmqPollitemT,
items.len() as c_int,
timeout,
)
};
try_err!(rc);
Ok(rc as i32)
}
| 32.199187 | 95 | 0.578883 |
29b1aabc0e7bf23fcfda6ed124481319c1fdb552
| 25,792 |
//! Customizes the rendering of the elements.
use std::{fmt, io};
use console::{style, Style, StyledObject, Term};
/// Implements a theme for dialoguer.
pub trait Theme {
/// Formats a prompt.
#[inline]
fn format_prompt(&self, f: &mut dyn fmt::Write, prompt: &str) -> fmt::Result {
write!(f, "{}:", prompt)
}
/// Formats out an error.
#[inline]
fn format_error(&self, f: &mut dyn fmt::Write, err: &str) -> fmt::Result {
write!(f, "error: {}", err)
}
/// Formats a confirm prompt.
fn format_confirm_prompt(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
default: Option<bool>,
) -> fmt::Result {
if !prompt.is_empty() {
write!(f, "{} ", &prompt)?;
}
match default {
None => write!(f, "[y/n] ")?,
Some(true) => write!(f, "[Y/n] ")?,
Some(false) => write!(f, "[y/N] ")?,
}
Ok(())
}
/// Formats a confirm prompt after selection.
fn format_confirm_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
selection: Option<bool>,
) -> fmt::Result {
let selection = selection.map(|b| if b { "yes" } else { "no" });
match selection {
Some(selection) if prompt.is_empty() => {
write!(f, "{}", selection)
}
Some(selection) => {
write!(f, "{} {}", &prompt, selection)
}
None if prompt.is_empty() => Ok(()),
None => {
write!(f, "{}", &prompt)
}
}
}
/// Formats an input prompt.
fn format_input_prompt(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
default: Option<&str>,
) -> fmt::Result {
match default {
Some(default) if prompt.is_empty() => write!(f, "[{}]: ", default),
Some(default) => write!(f, "{} [{}]: ", prompt, default),
None => write!(f, "{}: ", prompt),
}
}
/// Formats an input prompt after selection.
#[inline]
fn format_input_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
sel: &str,
) -> fmt::Result {
write!(f, "{}: {}", prompt, sel)
}
/// Formats a password prompt.
#[inline]
#[cfg(feature = "password")]
fn format_password_prompt(&self, f: &mut dyn fmt::Write, prompt: &str) -> fmt::Result {
self.format_input_prompt(f, prompt, None)
}
/// Formats a password prompt after selection.
#[inline]
#[cfg(feature = "password")]
fn format_password_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
) -> fmt::Result {
self.format_input_prompt_selection(f, prompt, "[hidden]")
}
/// Formats a select prompt.
#[inline]
fn format_select_prompt(&self, f: &mut dyn fmt::Write, prompt: &str) -> fmt::Result {
self.format_prompt(f, prompt)
}
/// Formats a select prompt after selection.
#[inline]
fn format_select_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
sel: &str,
) -> fmt::Result {
self.format_input_prompt_selection(f, prompt, sel)
}
/// Formats a multi select prompt.
#[inline]
fn format_multi_select_prompt(&self, f: &mut dyn fmt::Write, prompt: &str) -> fmt::Result {
self.format_prompt(f, prompt)
}
/// Formats a sort prompt.
#[inline]
fn format_sort_prompt(&self, f: &mut dyn fmt::Write, prompt: &str) -> fmt::Result {
self.format_prompt(f, prompt)
}
/// Formats a multi_select prompt after selection.
fn format_multi_select_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
selections: &[&str],
) -> fmt::Result {
write!(f, "{}: ", prompt)?;
for (idx, sel) in selections.iter().enumerate() {
write!(f, "{}{}", if idx == 0 { "" } else { ", " }, sel)?;
}
Ok(())
}
/// Formats a sort prompt after selection.
#[inline]
fn format_sort_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
selections: &[&str],
) -> fmt::Result {
self.format_multi_select_prompt_selection(f, prompt, selections)
}
/// Formats a select prompt item.
fn format_select_prompt_item(
&self,
f: &mut dyn fmt::Write,
text: &str,
active: bool,
) -> fmt::Result {
write!(f, "{} {}", if active { ">" } else { " " }, text)
}
/// Formats a multi select prompt item.
fn format_multi_select_prompt_item(
&self,
f: &mut dyn fmt::Write,
text: &str,
checked: bool,
active: bool,
) -> fmt::Result {
write!(
f,
"{} {}",
match (checked, active) {
(true, true) => "> [x]",
(true, false) => " [x]",
(false, true) => "> [ ]",
(false, false) => " [ ]",
},
text
)
}
/// Formats a sort prompt item.
fn format_sort_prompt_item(
&self,
f: &mut dyn fmt::Write,
text: &str,
picked: bool,
active: bool,
) -> fmt::Result {
write!(
f,
"{} {}",
match (picked, active) {
(true, true) => "> [x]",
(false, true) => "> [ ]",
(_, false) => " [ ]",
},
text
)
}
/// Formats a fuzzy select prompt.
#[cfg(feature = "fuzzy-select")]
fn format_fuzzy_select_prompt(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
search_term: &str,
cursor_pos: usize,
) -> fmt::Result {
if !prompt.is_empty() {
write!(f, "{} ", prompt,)?;
}
if cursor_pos < search_term.len() {
let st_head = search_term[0..cursor_pos].to_string();
let st_tail = search_term[cursor_pos..search_term.len()].to_string();
let st_cursor = "|".to_string();
write!(f, "{}{}{}", st_head, st_cursor, st_tail)
} else {
let cursor = "|".to_string();
write!(f, "{}{}", search_term.to_string(), cursor)
}
}
}
/// The default theme.
pub struct SimpleTheme;
impl Theme for SimpleTheme {}
/// A colorful theme
pub struct ColorfulTheme {
/// The style for default values
pub defaults_style: Style,
/// The style for prompt
pub prompt_style: Style,
/// Prompt prefix value and style
pub prompt_prefix: StyledObject<String>,
/// Prompt suffix value and style
pub prompt_suffix: StyledObject<String>,
/// Prompt on success prefix value and style
pub success_prefix: StyledObject<String>,
/// Prompt on success suffix value and style
pub success_suffix: StyledObject<String>,
/// Error prefix value and style
pub error_prefix: StyledObject<String>,
/// The style for error message
pub error_style: Style,
/// The style for hints
pub hint_style: Style,
/// The style for values on prompt success
pub values_style: Style,
/// The style for active items
pub active_item_style: Style,
/// The style for inactive items
pub inactive_item_style: Style,
/// Active item in select prefix value and style
pub active_item_prefix: StyledObject<String>,
/// Inctive item in select prefix value and style
pub inactive_item_prefix: StyledObject<String>,
/// Checked item in multi select prefix value and style
pub checked_item_prefix: StyledObject<String>,
/// Unchecked item in multi select prefix value and style
pub unchecked_item_prefix: StyledObject<String>,
/// Picked item in sort prefix value and style
pub picked_item_prefix: StyledObject<String>,
/// Unpicked item in sort prefix value and style
pub unpicked_item_prefix: StyledObject<String>,
/// Formats the cursor for a fuzzy select prompt
#[cfg(feature = "fuzzy-select")]
pub fuzzy_cursor_style: Style,
/// Show the selections from certain prompts inline
pub inline_selections: bool,
}
impl Default for ColorfulTheme {
fn default() -> ColorfulTheme {
ColorfulTheme {
defaults_style: Style::new().for_stderr().cyan(),
prompt_style: Style::new().for_stderr().bold(),
prompt_prefix: style("?".to_string()).for_stderr().yellow(),
prompt_suffix: style("›".to_string()).for_stderr().black().bright(),
success_prefix: style("✔".to_string()).for_stderr().green(),
success_suffix: style("·".to_string()).for_stderr().black().bright(),
error_prefix: style("✘".to_string()).for_stderr().red(),
error_style: Style::new().for_stderr().red(),
hint_style: Style::new().for_stderr().black().bright(),
values_style: Style::new().for_stderr().green(),
active_item_style: Style::new().for_stderr().cyan(),
inactive_item_style: Style::new().for_stderr(),
active_item_prefix: style("❯".to_string()).for_stderr().green(),
inactive_item_prefix: style(" ".to_string()).for_stderr(),
checked_item_prefix: style("✔".to_string()).for_stderr().green(),
unchecked_item_prefix: style("✔".to_string()).for_stderr().black(),
picked_item_prefix: style("❯".to_string()).for_stderr().green(),
unpicked_item_prefix: style(" ".to_string()).for_stderr(),
#[cfg(feature = "fuzzy-select")]
fuzzy_cursor_style: Style::new().for_stderr().black().on_white(),
inline_selections: true,
}
}
}
impl Theme for ColorfulTheme {
/// Formats a prompt.
fn format_prompt(&self, f: &mut dyn fmt::Write, prompt: &str) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.prompt_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
write!(f, "{}", &self.prompt_suffix)
}
/// Formats an error
fn format_error(&self, f: &mut dyn fmt::Write, err: &str) -> fmt::Result {
write!(
f,
"{} {}",
&self.error_prefix,
self.error_style.apply_to(err)
)
}
/// Formats an input prompt.
fn format_input_prompt(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
default: Option<&str>,
) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.prompt_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
match default {
Some(default) => write!(
f,
"{} {} ",
self.hint_style.apply_to(&format!("({})", default)),
&self.prompt_suffix
),
None => write!(f, "{} ", &self.prompt_suffix),
}
}
/// Formats a confirm prompt.
fn format_confirm_prompt(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
default: Option<bool>,
) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.prompt_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
match default {
None => write!(
f,
"{} {}",
self.hint_style.apply_to("(y/n)"),
&self.prompt_suffix
),
Some(true) => write!(
f,
"{} {} {}",
self.hint_style.apply_to("(y/n)"),
&self.prompt_suffix,
self.defaults_style.apply_to("yes")
),
Some(false) => write!(
f,
"{} {} {}",
self.hint_style.apply_to("(y/n)"),
&self.prompt_suffix,
self.defaults_style.apply_to("no")
),
}
}
/// Formats a confirm prompt after selection.
fn format_confirm_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
selection: Option<bool>,
) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.success_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
let selection = selection.map(|b| if b { "yes" } else { "no" });
match selection {
Some(selection) => {
write!(
f,
"{} {}",
&self.success_suffix,
self.values_style.apply_to(selection)
)
}
None => {
write!(f, "{}", &self.success_suffix)
}
}
}
/// Formats an input prompt after selection.
fn format_input_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
sel: &str,
) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.success_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
write!(
f,
"{} {}",
&self.success_suffix,
self.values_style.apply_to(sel)
)
}
/// Formats a password prompt after selection.
#[cfg(feature = "password")]
fn format_password_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
) -> fmt::Result {
self.format_input_prompt_selection(f, prompt, "********")
}
/// Formats a multi select prompt after selection.
fn format_multi_select_prompt_selection(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
selections: &[&str],
) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.success_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
write!(f, "{} ", &self.success_suffix)?;
if self.inline_selections {
for (idx, sel) in selections.iter().enumerate() {
write!(
f,
"{}{}",
if idx == 0 { "" } else { ", " },
self.values_style.apply_to(sel)
)?;
}
}
Ok(())
}
/// Formats a select prompt item.
fn format_select_prompt_item(
&self,
f: &mut dyn fmt::Write,
text: &str,
active: bool,
) -> fmt::Result {
let details = match active {
true => (
&self.active_item_prefix,
self.active_item_style.apply_to(text),
),
false => (
&self.inactive_item_prefix,
self.inactive_item_style.apply_to(text),
),
};
write!(f, "{} {}", details.0, details.1)
}
/// Formats a multi select prompt item.
fn format_multi_select_prompt_item(
&self,
f: &mut dyn fmt::Write,
text: &str,
checked: bool,
active: bool,
) -> fmt::Result {
let details = match (checked, active) {
(true, true) => (
&self.checked_item_prefix,
self.active_item_style.apply_to(text),
),
(true, false) => (
&self.checked_item_prefix,
self.inactive_item_style.apply_to(text),
),
(false, true) => (
&self.unchecked_item_prefix,
self.active_item_style.apply_to(text),
),
(false, false) => (
&self.unchecked_item_prefix,
self.inactive_item_style.apply_to(text),
),
};
write!(f, "{} {}", details.0, details.1)
}
/// Formats a sort prompt item.
fn format_sort_prompt_item(
&self,
f: &mut dyn fmt::Write,
text: &str,
picked: bool,
active: bool,
) -> fmt::Result {
let details = match (picked, active) {
(true, true) => (
&self.picked_item_prefix,
self.active_item_style.apply_to(text),
),
(false, true) => (
&self.unpicked_item_prefix,
self.active_item_style.apply_to(text),
),
(_, false) => (
&self.unpicked_item_prefix,
self.inactive_item_style.apply_to(text),
),
};
write!(f, "{} {}", details.0, details.1)
}
/// Formats a fuzzy-selectprompt after selection.
#[cfg(feature = "fuzzy-select")]
fn format_fuzzy_select_prompt(
&self,
f: &mut dyn fmt::Write,
prompt: &str,
search_term: &str,
cursor_pos: usize,
) -> fmt::Result {
if !prompt.is_empty() {
write!(
f,
"{} {} ",
&self.prompt_prefix,
self.prompt_style.apply_to(prompt)
)?;
}
if cursor_pos < search_term.len() {
let st_head = search_term[0..cursor_pos].to_string();
let st_tail = search_term[cursor_pos + 1..search_term.len()].to_string();
let st_cursor = self
.fuzzy_cursor_style
.apply_to(search_term.to_string().chars().nth(cursor_pos).unwrap());
write!(
f,
"{} {}{}{}",
&self.prompt_suffix, st_head, st_cursor, st_tail
)
} else {
let cursor = self.fuzzy_cursor_style.apply_to(" ");
write!(
f,
"{} {}{}",
&self.prompt_suffix,
search_term.to_string(),
cursor
)
}
}
}
/// Helper struct to conveniently render a theme ot a term.
pub(crate) struct TermThemeRenderer<'a> {
term: &'a Term,
theme: &'a dyn Theme,
height: usize,
prompt_height: usize,
prompts_reset_height: bool,
}
impl<'a> TermThemeRenderer<'a> {
pub fn new(term: &'a Term, theme: &'a dyn Theme) -> TermThemeRenderer<'a> {
TermThemeRenderer {
term,
theme,
height: 0,
prompt_height: 0,
prompts_reset_height: true,
}
}
#[cfg(feature = "password")]
pub fn set_prompts_reset_height(&mut self, val: bool) {
self.prompts_reset_height = val;
}
#[cfg(feature = "password")]
pub fn term(&self) -> &Term {
self.term
}
pub fn add_line(&mut self) {
self.height += 1;
}
fn write_formatted_str<
F: FnOnce(&mut TermThemeRenderer, &mut dyn fmt::Write) -> fmt::Result,
>(
&mut self,
f: F,
) -> io::Result<()> {
let mut buf = String::new();
f(self, &mut buf).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
self.height += buf.chars().filter(|&x| x == '\n').count();
self.term.write_str(&buf)
}
fn write_formatted_line<
F: FnOnce(&mut TermThemeRenderer, &mut dyn fmt::Write) -> fmt::Result,
>(
&mut self,
f: F,
) -> io::Result<()> {
let mut buf = String::new();
f(self, &mut buf).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
self.height += buf.chars().filter(|&x| x == '\n').count() + 1;
self.term.write_line(&buf)
}
fn write_formatted_prompt<
F: FnOnce(&mut TermThemeRenderer, &mut dyn fmt::Write) -> fmt::Result,
>(
&mut self,
f: F,
) -> io::Result<()> {
self.write_formatted_line(f)?;
if self.prompts_reset_height {
self.prompt_height = self.height;
self.height = 0;
}
Ok(())
}
fn write_paging_info(buf: &mut dyn fmt::Write, paging_info: (usize, usize)) -> fmt::Result {
write!(buf, " [Page {}/{}] ", paging_info.0, paging_info.1)
}
pub fn error(&mut self, err: &str) -> io::Result<()> {
self.write_formatted_line(|this, buf| this.theme.format_error(buf, err))
}
pub fn confirm_prompt(&mut self, prompt: &str, default: Option<bool>) -> io::Result<()> {
self.write_formatted_str(|this, buf| this.theme.format_confirm_prompt(buf, prompt, default))
}
pub fn confirm_prompt_selection(&mut self, prompt: &str, sel: Option<bool>) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_confirm_prompt_selection(buf, prompt, sel)
})
}
#[cfg(feature = "fuzzy-select")]
pub fn fuzzy_select_prompt(
&mut self,
prompt: &str,
search_term: &str,
cursor_pos: usize,
) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme
.format_fuzzy_select_prompt(buf, prompt, search_term, cursor_pos)
})
}
pub fn input_prompt(&mut self, prompt: &str, default: Option<&str>) -> io::Result<()> {
self.write_formatted_str(|this, buf| this.theme.format_input_prompt(buf, prompt, default))
}
pub fn input_prompt_selection(&mut self, prompt: &str, sel: &str) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_input_prompt_selection(buf, prompt, sel)
})
}
#[cfg(feature = "password")]
pub fn password_prompt(&mut self, prompt: &str) -> io::Result<()> {
self.write_formatted_str(|this, buf| {
write!(buf, "\r")?;
this.theme.format_password_prompt(buf, prompt)
})
}
#[cfg(feature = "password")]
pub fn password_prompt_selection(&mut self, prompt: &str) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_password_prompt_selection(buf, prompt)
})
}
pub fn select_prompt(
&mut self,
prompt: &str,
paging_info: Option<(usize, usize)>,
) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_select_prompt(buf, prompt)?;
if let Some(paging_info) = paging_info {
TermThemeRenderer::write_paging_info(buf, paging_info)?;
}
Ok(())
})
}
pub fn select_prompt_selection(&mut self, prompt: &str, sel: &str) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_select_prompt_selection(buf, prompt, sel)
})
}
pub fn select_prompt_item(&mut self, text: &str, active: bool) -> io::Result<()> {
self.write_formatted_line(|this, buf| {
this.theme.format_select_prompt_item(buf, text, active)
})
}
pub fn multi_select_prompt(
&mut self,
prompt: &str,
paging_info: Option<(usize, usize)>,
) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_multi_select_prompt(buf, prompt)?;
if let Some(paging_info) = paging_info {
TermThemeRenderer::write_paging_info(buf, paging_info)?;
}
Ok(())
})
}
pub fn multi_select_prompt_selection(&mut self, prompt: &str, sel: &[&str]) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme
.format_multi_select_prompt_selection(buf, prompt, sel)
})
}
pub fn multi_select_prompt_item(
&mut self,
text: &str,
checked: bool,
active: bool,
) -> io::Result<()> {
self.write_formatted_line(|this, buf| {
this.theme
.format_multi_select_prompt_item(buf, text, checked, active)
})
}
pub fn sort_prompt(
&mut self,
prompt: &str,
paging_info: Option<(usize, usize)>,
) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_sort_prompt(buf, prompt)?;
if let Some(paging_info) = paging_info {
TermThemeRenderer::write_paging_info(buf, paging_info)?;
}
Ok(())
})
}
pub fn sort_prompt_selection(&mut self, prompt: &str, sel: &[&str]) -> io::Result<()> {
self.write_formatted_prompt(|this, buf| {
this.theme.format_sort_prompt_selection(buf, prompt, sel)
})
}
pub fn sort_prompt_item(&mut self, text: &str, picked: bool, active: bool) -> io::Result<()> {
self.write_formatted_line(|this, buf| {
this.theme
.format_sort_prompt_item(buf, text, picked, active)
})
}
pub fn clear(&mut self) -> io::Result<()> {
self.term
.clear_last_lines(self.height + self.prompt_height)?;
self.height = 0;
Ok(())
}
pub fn clear_preserve_prompt(&mut self, size_vec: &[usize]) -> io::Result<()> {
let mut new_height = self.height;
//Check each item size, increment on finding an overflow
for size in size_vec {
if *size > self.term.size().1 as usize {
new_height += 1;
}
}
self.term.clear_last_lines(new_height)?;
self.height = 0;
Ok(())
}
}
| 29.921114 | 100 | 0.510275 |
877cb607e421b95737176c8e65ac24ef69135eeb
| 1,167 |
#[doc = "Reader of register PRINCE_REGION2_IV_CODE3"]
pub type R = crate::R<u32, super::PRINCE_REGION2_IV_CODE3>;
#[doc = "Writer for register PRINCE_REGION2_IV_CODE3"]
pub type W = crate::W<u32, super::PRINCE_REGION2_IV_CODE3>;
#[doc = "Register PRINCE_REGION2_IV_CODE3 `reset()`'s with value 0"]
impl crate::ResetValue for super::PRINCE_REGION2_IV_CODE3 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `FIELD`"]
pub type FIELD_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `FIELD`"]
pub struct FIELD_W<'a> {
w: &'a mut W,
}
impl<'a> FIELD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31 - ."]
#[inline(always)]
pub fn field(&self) -> FIELD_R {
FIELD_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31 - ."]
#[inline(always)]
pub fn field(&mut self) -> FIELD_W {
FIELD_W { w: self }
}
}
| 28.463415 | 84 | 0.599829 |
18052a0392a5707a922e4d6ccbb0942405137abb
| 19,988 |
use crate::{
cli::connect::jwt_token::*, cli::connect::*, command::Command, system_services::*, ConfigError,
};
use rumqttc::QoS::AtLeastOnce;
use rumqttc::{Event, Incoming, MqttOptions, Outgoing, Packet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use tedge_config::*;
use tedge_users::UserManager;
use tedge_utils::paths::{create_directories, ok_if_not_found, DraftFile};
use which::which;
pub(crate) const DEFAULT_HOST: &str = "localhost";
const WAIT_FOR_CHECK_SECONDS: u64 = 2;
const C8Y_CONFIG_FILENAME: &str = "c8y-bridge.conf";
const AZURE_CONFIG_FILENAME: &str = "az-bridge.conf";
pub(crate) const RESPONSE_TIMEOUT: Duration = Duration::from_secs(10);
const MOSQUITTO_RESTART_TIMEOUT_SECONDS: u64 = 5;
const MQTT_TLS_PORT: u16 = 8883;
const TEDGE_BRIDGE_CONF_DIR_PATH: &str = "mosquitto-conf";
pub struct ConnectCommand {
pub config_location: TEdgeConfigLocation,
pub config_repository: TEdgeConfigRepository,
pub cloud: Cloud,
pub common_mosquitto_config: CommonMosquittoConfig,
pub is_test_connection: bool,
pub service_manager: Arc<dyn SystemServiceManager>,
pub user_manager: UserManager,
}
pub enum DeviceStatus {
AlreadyExists,
Unknown,
}
#[derive(Debug)]
pub enum Cloud {
Azure,
C8y,
}
impl Cloud {
fn dependent_mapper_service(&self) -> SystemService {
match self {
Cloud::Azure => SystemService::TEdgeMapperAz,
Cloud::C8y => SystemService::TEdgeMapperC8y,
}
}
}
impl Cloud {
fn as_str(&self) -> &'static str {
match self {
Self::Azure => "Azure",
Self::C8y => "Cumulocity",
}
}
}
impl Command for ConnectCommand {
fn description(&self) -> String {
if self.is_test_connection {
format!("test connection to {} cloud.", self.cloud.as_str())
} else {
format!("connect {} cloud.", self.cloud.as_str())
}
}
fn execute(&self) -> anyhow::Result<()> {
let mut config = self.config_repository.load()?;
if self.is_test_connection {
let br_config = self.bridge_config(&config)?;
if self.check_if_bridge_exists(&br_config) {
return match self.check_connection(&config) {
Ok(_) => {
let cloud = br_config.cloud_name.clone();
println!("Connection check to {} cloud is successfull.\n", cloud);
Ok(())
}
Err(err) => Err(err.into()),
};
} else {
return Err((ConnectError::DeviceNotConnected {
cloud: self.cloud.as_str().into(),
})
.into());
}
}
// XXX: Do we really need to persist the defaults?
match self.cloud {
Cloud::Azure => assign_default(&mut config, AzureRootCertPathSetting)?,
Cloud::C8y => assign_default(&mut config, C8yRootCertPathSetting)?,
}
let bridge_config = self.bridge_config(&config)?;
let updated_mosquitto_config = self
.common_mosquitto_config
.clone()
.with_internal_opts(config.query(MqttPortSetting)?.into())
.with_external_opts(
config.query(MqttExternalPortSetting).ok().map(|x| x.into()),
config.query(MqttExternalBindAddressSetting).ok(),
config.query(MqttExternalBindInterfaceSetting).ok(),
config
.query(MqttExternalCAPathSetting)
.ok()
.map(|x| x.to_string()),
config
.query(MqttExternalCertfileSetting)
.ok()
.map(|x| x.to_string()),
config
.query(MqttExternalKeyfileSetting)
.ok()
.map(|x| x.to_string()),
);
self.config_repository.store(&config)?;
let device_type = config.query(DeviceTypeSetting)?;
new_bridge(
&bridge_config,
&updated_mosquitto_config,
self.service_manager.as_ref(),
self.user_manager.clone(),
&self.config_location,
&device_type,
)?;
match self.check_connection(&config) {
Ok(DeviceStatus::AlreadyExists) => {
println!("Connection check is successfull.\n");
}
_ => {
println!(
"Warning: Bridge has been configured, but {} connection check failed.\n",
self.cloud.as_str()
);
}
}
if bridge_config.use_mapper {
println!("Checking if tedge-mapper is installed.\n");
if which("tedge_mapper").is_err() {
println!("Warning: tedge_mapper is not installed.\n");
} else {
self.service_manager.as_ref().start_and_enable_service(
self.cloud.dependent_mapper_service(),
std::io::stdout(),
);
}
}
if let Cloud::C8y = self.cloud {
check_connected_c8y_tenant_as_configured(
&config.query_string(C8yUrlSetting)?,
config.query(MqttPortSetting)?.into(),
);
enable_software_management(&bridge_config, self.service_manager.as_ref());
}
Ok(())
}
}
impl ConnectCommand {
fn bridge_config(&self, config: &TEdgeConfig) -> Result<BridgeConfig, ConfigError> {
match self.cloud {
Cloud::Azure => {
let params = BridgeConfigAzureParams {
connect_url: config.query(AzureUrlSetting)?,
mqtt_tls_port: MQTT_TLS_PORT,
config_file: AZURE_CONFIG_FILENAME.into(),
bridge_root_cert_path: config.query(AzureRootCertPathSetting)?,
remote_clientid: config.query(DeviceIdSetting)?,
bridge_certfile: config.query(DeviceCertPathSetting)?,
bridge_keyfile: config.query(DeviceKeyPathSetting)?,
};
Ok(BridgeConfig::from(params))
}
Cloud::C8y => {
let params = BridgeConfigC8yParams {
connect_url: config.query(C8yUrlSetting)?,
mqtt_tls_port: MQTT_TLS_PORT,
config_file: C8Y_CONFIG_FILENAME.into(),
bridge_root_cert_path: config.query(C8yRootCertPathSetting)?,
remote_clientid: config.query(DeviceIdSetting)?,
bridge_certfile: config.query(DeviceCertPathSetting)?,
bridge_keyfile: config.query(DeviceKeyPathSetting)?,
};
Ok(BridgeConfig::from(params))
}
}
}
fn check_connection(&self, config: &TEdgeConfig) -> Result<DeviceStatus, ConnectError> {
let port = config.query(MqttPortSetting)?.into();
println!(
"Sending packets to check connection. This may take up to {} seconds.\n",
WAIT_FOR_CHECK_SECONDS
);
match self.cloud {
Cloud::Azure => check_device_status_azure(port),
Cloud::C8y => check_device_status_c8y(config),
}
}
fn check_if_bridge_exists(&self, br_config: &BridgeConfig) -> bool {
let bridge_conf_path = self
.config_location
.tedge_config_root_path
.join(TEDGE_BRIDGE_CONF_DIR_PATH)
.join(br_config.config_file.clone());
Path::new(&bridge_conf_path).exists()
}
}
// XXX: Improve naming
fn assign_default<T: ConfigSetting + Copy>(
config: &mut TEdgeConfig,
setting: T,
) -> Result<(), ConfigError>
where
TEdgeConfig: ConfigSettingAccessor<T>,
{
let value = config.query(setting)?;
let () = config.update(setting, value)?;
Ok(())
}
// Check the connection by using the jwt token retrival over the mqtt.
// If successfull in getting the jwt token '71,xxxxx', the connection is established.
fn check_device_status_c8y(tedge_config: &TEdgeConfig) -> Result<DeviceStatus, ConnectError> {
const C8Y_TOPIC_BUILTIN_JWT_TOKEN_DOWNSTREAM: &str = "c8y/s/dat";
const C8Y_TOPIC_BUILTIN_JWT_TOKEN_UPSTREAM: &str = "c8y/s/uat";
const CLIENT_ID: &str = "check_connection_c8y";
let mut options = MqttOptions::new(
CLIENT_ID,
DEFAULT_HOST,
tedge_config.query(MqttPortSetting)?.into(),
);
options.set_keep_alive(RESPONSE_TIMEOUT);
let (mut client, mut connection) = rumqttc::Client::new(options, 10);
let mut acknowledged = false;
client.subscribe(C8Y_TOPIC_BUILTIN_JWT_TOKEN_DOWNSTREAM, AtLeastOnce)?;
for event in connection.iter() {
match event {
Ok(Event::Incoming(Packet::SubAck(_))) => {
// We are ready to get the response, hence send the request
client.publish(C8Y_TOPIC_BUILTIN_JWT_TOKEN_UPSTREAM, AtLeastOnce, false, "")?;
}
Ok(Event::Incoming(Packet::PubAck(_))) => {
// The request has been sent
acknowledged = true;
}
Ok(Event::Incoming(Packet::Publish(response))) => {
// We got a response
let token = String::from_utf8(response.payload.to_vec()).unwrap();
if token.contains("71") {
return Ok(DeviceStatus::AlreadyExists);
}
}
Ok(Event::Outgoing(Outgoing::PingReq)) => {
// No messages have been received for a while
println!("Local MQTT publish has timed out.");
break;
}
Ok(Event::Incoming(Incoming::Disconnect)) => {
eprintln!("ERROR: Disconnected");
break;
}
Err(err) => {
eprintln!("ERROR: {:?}", err);
break;
}
_ => {}
}
}
if acknowledged {
// The request has been sent but without a response
Ok(DeviceStatus::Unknown)
} else {
// The request has not even been sent
println!("\nMake sure mosquitto is running.");
Err(ConnectError::TimeoutElapsedError)
}
}
// Here We check the az device twin properties over mqtt to check if connection has been open.
// First the mqtt client will subscribe to a topic az/$iothub/twin/res/#, listen to the
// device twin property output.
// Empty payload will be published to az/$iothub/twin/GET/?$rid=1, here 1 is request ID.
// The result will be published by the iothub on the az/$iothub/twin/res/{status}/?$rid={request id}.
// Here if the status is 200 then it's success.
fn check_device_status_azure(port: u16) -> Result<DeviceStatus, ConnectError> {
const AZURE_TOPIC_DEVICE_TWIN_DOWNSTREAM: &str = r##"az/twin/res/#"##;
const AZURE_TOPIC_DEVICE_TWIN_UPSTREAM: &str = r#"az/twin/GET/?$rid=1"#;
const CLIENT_ID: &str = "check_connection_az";
const REGISTRATION_PAYLOAD: &[u8] = b"";
const REGISTRATION_OK: &str = "200";
let mut options = MqttOptions::new(CLIENT_ID, DEFAULT_HOST, port);
options.set_keep_alive(RESPONSE_TIMEOUT);
let (mut client, mut connection) = rumqttc::Client::new(options, 10);
let mut acknowledged = false;
client.subscribe(AZURE_TOPIC_DEVICE_TWIN_DOWNSTREAM, AtLeastOnce)?;
for event in connection.iter() {
match event {
Ok(Event::Incoming(Packet::SubAck(_))) => {
// We are ready to get the response, hence send the request
client.publish(
AZURE_TOPIC_DEVICE_TWIN_UPSTREAM,
AtLeastOnce,
false,
REGISTRATION_PAYLOAD,
)?;
}
Ok(Event::Incoming(Packet::PubAck(_))) => {
// The request has been sent
acknowledged = true;
}
Ok(Event::Incoming(Packet::Publish(response))) => {
// We got a response
if response.topic.contains(REGISTRATION_OK) {
println!("Received expected response message, connection check is successful.");
return Ok(DeviceStatus::AlreadyExists);
} else {
break;
}
}
Ok(Event::Outgoing(Outgoing::PingReq)) => {
// No messages have been received for a while
println!("Local MQTT publish has timed out.");
break;
}
Ok(Event::Incoming(Incoming::Disconnect)) => {
eprintln!("ERROR: Disconnected");
break;
}
Err(err) => {
eprintln!("ERROR: {:?}", err);
break;
}
_ => {}
}
}
if acknowledged {
// The request has been sent but without a response
Ok(DeviceStatus::Unknown)
} else {
// The request has not even been sent
println!("Make sure mosquitto is running.");
Err(ConnectError::TimeoutElapsedError)
}
}
fn new_bridge(
bridge_config: &BridgeConfig,
common_mosquitto_config: &CommonMosquittoConfig,
service_manager: &dyn SystemServiceManager,
user_manager: UserManager,
config_location: &TEdgeConfigLocation,
device_type: &str,
) -> Result<(), ConnectError> {
println!("Checking if {} is available.\n", service_manager.name());
let service_manager_result = service_manager.check_operational();
if let Err(SystemServiceError::ServiceManagerUnavailable { cmd: _, name }) =
&service_manager_result
{
println!(
"Warning: '{}' service manager is not available on the system.\n",
name
);
}
println!("Checking if configuration for requested bridge already exists.\n");
let () = bridge_config_exists(config_location, bridge_config)?;
println!("Validating the bridge certificates.\n");
let () = bridge_config.validate()?;
if bridge_config.cloud_name.eq("c8y") {
println!("Creating the device in Cumulocity cloud.\n");
let () = c8y_direct_connection::create_device_with_direct_connection(
user_manager,
bridge_config,
&device_type,
)?;
}
println!("Saving configuration for requested bridge.\n");
if let Err(err) =
write_bridge_config_to_file(config_location, bridge_config, common_mosquitto_config)
{
// We want to preserve previous errors and therefore discard result of this function.
let _ = clean_up(config_location, bridge_config);
return Err(err);
}
if let Err(err) = service_manager_result {
println!("'tedge connect' configured the necessary tedge components, but you will have to start the required services on your own.");
println!("Start/restart mosquitto and other thin edge components.");
println!("thin-edge.io works seamlessly with 'systemd'.\n");
return Err(err.into());
}
restart_mosquitto(bridge_config, service_manager, config_location)?;
println!(
"Awaiting mosquitto to start. This may take up to {} seconds.\n",
MOSQUITTO_RESTART_TIMEOUT_SECONDS
);
std::thread::sleep(std::time::Duration::from_secs(
MOSQUITTO_RESTART_TIMEOUT_SECONDS,
));
println!("Enabling mosquitto service on reboots.\n");
if let Err(err) = service_manager.enable_service(SystemService::Mosquitto) {
clean_up(config_location, bridge_config)?;
return Err(err.into());
}
println!("Successfully created bridge connection!\n");
Ok(())
}
fn restart_mosquitto(
bridge_config: &BridgeConfig,
service_manager: &dyn SystemServiceManager,
config_location: &TEdgeConfigLocation,
) -> Result<(), ConnectError> {
println!("Restarting mosquitto service.\n");
if let Err(err) = service_manager.restart_service(SystemService::Mosquitto) {
clean_up(config_location, bridge_config)?;
return Err(err.into());
}
Ok(())
}
fn enable_software_management(
bridge_config: &BridgeConfig,
service_manager: &dyn SystemServiceManager,
) {
println!("Enabling software management.\n");
if bridge_config.use_agent {
println!("Checking if tedge-agent is installed.\n");
if which("tedge_agent").is_ok() {
service_manager
.start_and_enable_service(SystemService::TEdgeSMAgent, std::io::stdout());
service_manager
.start_and_enable_service(SystemService::TEdgeSMMapperC8Y, std::io::stdout());
} else {
println!("Info: Software management is not installed. So, skipping enabling related components.\n");
}
}
}
// To preserve error chain and not discard other errors we need to ignore error here
// (don't use '?' with the call to this function to preserve original error).
fn clean_up(
config_location: &TEdgeConfigLocation,
bridge_config: &BridgeConfig,
) -> Result<(), ConnectError> {
let path = get_bridge_config_file_path(config_location, bridge_config);
let _ = std::fs::remove_file(&path).or_else(ok_if_not_found)?;
Ok(())
}
fn bridge_config_exists(
config_location: &TEdgeConfigLocation,
bridge_config: &BridgeConfig,
) -> Result<(), ConnectError> {
let path = get_bridge_config_file_path(config_location, bridge_config);
if Path::new(&path).exists() {
return Err(ConnectError::ConfigurationExists {
cloud: bridge_config.cloud_name.to_string(),
});
}
Ok(())
}
fn write_bridge_config_to_file(
config_location: &TEdgeConfigLocation,
bridge_config: &BridgeConfig,
common_mosquitto_config: &CommonMosquittoConfig,
) -> Result<(), ConnectError> {
let dir_path = config_location
.tedge_config_root_path
.join(TEDGE_BRIDGE_CONF_DIR_PATH);
// This will forcefully create directory structure if it doesn't exist, we should find better way to do it, maybe config should deal with it?
let _ = create_directories(&dir_path)?;
let common_config_path =
get_common_mosquitto_config_file_path(config_location, common_mosquitto_config);
let mut common_draft = DraftFile::new(&common_config_path)?;
common_mosquitto_config.serialize(&mut common_draft)?;
let () = common_draft.persist()?;
let config_path = get_bridge_config_file_path(config_location, bridge_config);
let mut config_draft = DraftFile::new(config_path)?;
bridge_config.serialize(&mut config_draft)?;
let () = config_draft.persist()?;
Ok(())
}
fn get_bridge_config_file_path(
config_location: &TEdgeConfigLocation,
bridge_config: &BridgeConfig,
) -> PathBuf {
config_location
.tedge_config_root_path
.join(TEDGE_BRIDGE_CONF_DIR_PATH)
.join(&bridge_config.config_file)
}
fn get_common_mosquitto_config_file_path(
config_location: &TEdgeConfigLocation,
common_mosquitto_config: &CommonMosquittoConfig,
) -> PathBuf {
config_location
.tedge_config_root_path
.join(TEDGE_BRIDGE_CONF_DIR_PATH)
.join(&common_mosquitto_config.config_file)
}
// To confirm the connected c8y tenant is the one that user configured.
fn check_connected_c8y_tenant_as_configured(configured_url: &str, port: u16) {
match get_connected_c8y_url(port) {
Ok(url) if url == configured_url => {}
Ok(url) => println!(
"Warning: Connecting to {}, but the configured URL is {}.\n\
The device certificate has to be removed from the former tenant.\n",
url, configured_url
),
Err(_) => println!("Failed to get the connected tenant URL from Cumulocity.\n"),
}
}
| 35.756708 | 145 | 0.605313 |
891a4fa20485735f9303cec5d06d94daeb5ab666
| 115 |
//! General purpose DNA routines.
/// Valid nucleotide 1-letter codes.
pub const MONOMERS: &'static str = "ACGT";
| 23 | 42 | 0.704348 |
fe0db681d041892dc821ded58acde0ba0d793e74
| 25,663 |
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::cell::UnsafeCell;
use std::fmt;
use std::time::Duration;
use std::{error, ptr, result};
use engine::rocks::TablePropertiesCollection;
use engine::IterOption;
use engine::{CfName, CF_DEFAULT};
use keys::{Key, Value};
use kvproto::errorpb::Error as ErrorHeader;
use kvproto::kvrpcpb::Context;
use crate::into_other::IntoOther;
use crate::raftstore::coprocessor::SeekRegionCallback;
mod btree_engine;
mod compact_listener;
mod cursor;
mod perf_context;
mod rocksdb_engine;
mod stats;
pub use self::btree_engine::{BTreeEngine, BTreeEngineIterator, BTreeEngineSnapshot};
pub use self::compact_listener::{CompactedEvent, CompactionListener};
pub use self::cursor::{Cursor, CursorBuilder};
pub use self::perf_context::{PerfStatisticsDelta, PerfStatisticsInstant};
pub use self::rocksdb_engine::{RocksEngine, RocksSnapshot, TestEngineBuilder};
pub use self::stats::{
CfStatistics, FlowStatistics, FlowStatsReporter, Statistics, StatisticsSummary,
};
pub const SEEK_BOUND: u64 = 8;
const DEFAULT_TIMEOUT_SECS: u64 = 5;
pub type Callback<T> = Box<dyn FnOnce((CbContext, Result<T>)) + Send>;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub struct CbContext {
pub term: Option<u64>,
}
impl CbContext {
pub fn new() -> CbContext {
CbContext { term: None }
}
}
#[derive(Debug)]
pub enum Modify {
Delete(CfName, Key),
Put(CfName, Key, Value),
// cf_name, start_key, end_key, notify_only
DeleteRange(CfName, Key, Key, bool),
}
pub trait Engine: Send + Clone + 'static {
type Snap: Snapshot;
fn async_write(&self, ctx: &Context, batch: Vec<Modify>, callback: Callback<()>) -> Result<()>;
fn async_snapshot(&self, ctx: &Context, callback: Callback<Self::Snap>) -> Result<()>;
fn write(&self, ctx: &Context, batch: Vec<Modify>) -> Result<()> {
let timeout = Duration::from_secs(DEFAULT_TIMEOUT_SECS);
match wait_op!(|cb| self.async_write(ctx, batch, cb), timeout) {
Some((_, res)) => res,
None => Err(Error::from(ErrorInner::Timeout(timeout))),
}
}
fn snapshot(&self, ctx: &Context) -> Result<Self::Snap> {
let timeout = Duration::from_secs(DEFAULT_TIMEOUT_SECS);
match wait_op!(|cb| self.async_snapshot(ctx, cb), timeout) {
Some((_, res)) => res,
None => Err(Error::from(ErrorInner::Timeout(timeout))),
}
}
fn put(&self, ctx: &Context, key: Key, value: Value) -> Result<()> {
self.put_cf(ctx, CF_DEFAULT, key, value)
}
fn put_cf(&self, ctx: &Context, cf: CfName, key: Key, value: Value) -> Result<()> {
self.write(ctx, vec![Modify::Put(cf, key, value)])
}
fn delete(&self, ctx: &Context, key: Key) -> Result<()> {
self.delete_cf(ctx, CF_DEFAULT, key)
}
fn delete_cf(&self, ctx: &Context, cf: CfName, key: Key) -> Result<()> {
self.write(ctx, vec![Modify::Delete(cf, key)])
}
}
pub trait Snapshot: Send + Clone {
type Iter: Iterator;
fn get(&self, key: &Key) -> Result<Option<Value>>;
fn get_cf(&self, cf: CfName, key: &Key) -> Result<Option<Value>>;
fn iter(&self, iter_opt: IterOption, mode: ScanMode) -> Result<Cursor<Self::Iter>>;
fn iter_cf(
&self,
cf: CfName,
iter_opt: IterOption,
mode: ScanMode,
) -> Result<Cursor<Self::Iter>>;
fn get_properties(&self) -> Result<TablePropertiesCollection> {
self.get_properties_cf(CF_DEFAULT)
}
fn get_properties_cf(&self, _: CfName) -> Result<TablePropertiesCollection> {
Err(box_err!("no user properties"))
}
// The minimum key this snapshot can retrieve.
#[inline]
fn lower_bound(&self) -> Option<&[u8]> {
None
}
// The maximum key can be fetched from the snapshot should less than the upper bound.
#[inline]
fn upper_bound(&self) -> Option<&[u8]> {
None
}
/// Retrieves a version that represents the modification status of the underlying data.
/// Version should be changed when underlying data is changed.
///
/// If the engine does not support data version, then `None` is returned.
#[inline]
fn get_data_version(&self) -> Option<u64> {
None
}
}
pub trait Iterator: Send {
fn next(&mut self) -> bool;
fn prev(&mut self) -> bool;
fn seek(&mut self, key: &Key) -> Result<bool>;
fn seek_for_prev(&mut self, key: &Key) -> Result<bool>;
fn seek_to_first(&mut self) -> bool;
fn seek_to_last(&mut self) -> bool;
fn valid(&self) -> bool;
fn status(&self) -> Result<()>;
fn validate_key(&self, _: &Key) -> Result<()> {
Ok(())
}
fn key(&self) -> &[u8];
fn value(&self) -> &[u8];
}
pub trait RegionInfoProvider: Send + Clone + 'static {
/// Find the first region `r` whose range contains or greater than `from_key` and the peer on
/// this TiKV satisfies `filter(peer)` returns true.
fn seek_region(&self, from: &[u8], filter: SeekRegionCallback) -> Result<()>;
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ScanMode {
Forward,
Backward,
Mixed,
}
quick_error! {
#[derive(Debug)]
pub enum ErrorInner {
Request(err: ErrorHeader) {
from()
description("request to underhook engine failed")
display("{:?}", err)
}
Timeout(d: Duration) {
description("request timeout")
display("timeout after {:?}", d)
}
EmptyRequest {
description("an empty request")
display("an empty request")
}
Other(err: Box<dyn error::Error + Send + Sync>) {
from()
cause(err.as_ref())
description(err.description())
display("unknown error {:?}", err)
}
}
}
impl From<engine::Error> for ErrorInner {
fn from(err: engine::Error) -> ErrorInner {
ErrorInner::Request(err.into())
}
}
impl From<engine_traits::Error> for ErrorInner {
fn from(err: engine_traits::Error) -> ErrorInner {
ErrorInner::Request(err.into_other())
}
}
impl ErrorInner {
pub fn maybe_clone(&self) -> Option<ErrorInner> {
match *self {
ErrorInner::Request(ref e) => Some(ErrorInner::Request(e.clone())),
ErrorInner::Timeout(d) => Some(ErrorInner::Timeout(d)),
ErrorInner::EmptyRequest => Some(ErrorInner::EmptyRequest),
ErrorInner::Other(_) => None,
}
}
}
pub struct Error(pub Box<ErrorInner>);
impl Error {
pub fn maybe_clone(&self) -> Option<Error> {
self.0.maybe_clone().map(Error::from)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
std::error::Error::description(&self.0)
}
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
std::error::Error::source(&self.0)
}
}
impl From<ErrorInner> for Error {
#[inline]
fn from(e: ErrorInner) -> Self {
Error(Box::new(e))
}
}
impl<T: Into<ErrorInner>> From<T> for Error {
#[inline]
default fn from(err: T) -> Self {
let err = err.into();
err.into()
}
}
thread_local! {
// A pointer to thread local engine. Use raw pointer and `UnsafeCell` to reduce runtime check.
static TLS_ENGINE_ANY: UnsafeCell<*mut ()> = UnsafeCell::new(ptr::null_mut());
}
/// Execute the closure on the thread local engine.
///
/// Safety: precondition: `TLS_ENGINE_ANY` is non-null.
pub unsafe fn with_tls_engine<E: Engine, F, R>(f: F) -> R
where
F: FnOnce(&E) -> R,
{
TLS_ENGINE_ANY.with(|e| {
let engine = &*(*e.get() as *const E);
f(engine)
})
}
/// Set the thread local engine.
///
/// Postcondition: `TLS_ENGINE_ANY` is non-null.
pub fn set_tls_engine<E: Engine>(engine: E) {
// Safety: we check that `TLS_ENGINE_ANY` is null to ensure we don't leak an existing
// engine; we ensure there are no other references to `engine`.
TLS_ENGINE_ANY.with(move |e| unsafe {
if (*e.get()).is_null() {
let engine = Box::into_raw(Box::new(engine)) as *mut ();
*e.get() = engine;
}
});
}
/// Destroy the thread local engine.
///
/// Safety: the current tls engine must have the same type as `E` (or at least
/// there destructors must be compatible).
/// Postcondition: `TLS_ENGINE_ANY` is null.
pub unsafe fn destroy_tls_engine<E: Engine>() {
// Safety: we check that `TLS_ENGINE_ANY` is non-null, we must ensure that references
// to `TLS_ENGINE_ANY` can never be stored outside of `TLS_ENGINE_ANY`.
TLS_ENGINE_ANY.with(|e| {
let ptr = *e.get();
if !ptr.is_null() {
drop(Box::from_raw(ptr as *mut E));
*e.get() = ptr::null_mut();
}
});
}
#[cfg(test)]
pub mod tests {
use super::*;
use tikv_util::codec::bytes;
pub const TEST_ENGINE_CFS: &[CfName] = &["cf"];
pub fn must_put<E: Engine>(engine: &E, key: &[u8], value: &[u8]) {
engine
.put(&Context::default(), Key::from_raw(key), value.to_vec())
.unwrap();
}
pub fn must_put_cf<E: Engine>(engine: &E, cf: CfName, key: &[u8], value: &[u8]) {
engine
.put_cf(&Context::default(), cf, Key::from_raw(key), value.to_vec())
.unwrap();
}
pub fn must_delete<E: Engine>(engine: &E, key: &[u8]) {
engine
.delete(&Context::default(), Key::from_raw(key))
.unwrap();
}
pub fn must_delete_cf<E: Engine>(engine: &E, cf: CfName, key: &[u8]) {
engine
.delete_cf(&Context::default(), cf, Key::from_raw(key))
.unwrap();
}
pub fn assert_has<E: Engine>(engine: &E, key: &[u8], value: &[u8]) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
assert_eq!(snapshot.get(&Key::from_raw(key)).unwrap().unwrap(), value);
}
pub fn assert_has_cf<E: Engine>(engine: &E, cf: CfName, key: &[u8], value: &[u8]) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
assert_eq!(
snapshot.get_cf(cf, &Key::from_raw(key)).unwrap().unwrap(),
value
);
}
pub fn assert_none<E: Engine>(engine: &E, key: &[u8]) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
assert_eq!(snapshot.get(&Key::from_raw(key)).unwrap(), None);
}
pub fn assert_none_cf<E: Engine>(engine: &E, cf: CfName, key: &[u8]) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
assert_eq!(snapshot.get_cf(cf, &Key::from_raw(key)).unwrap(), None);
}
fn assert_seek<E: Engine>(engine: &E, key: &[u8], pair: (&[u8], &[u8])) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut cursor = snapshot
.iter(IterOption::default(), ScanMode::Mixed)
.unwrap();
let mut statistics = CfStatistics::default();
cursor.seek(&Key::from_raw(key), &mut statistics).unwrap();
assert_eq!(cursor.key(&mut statistics), &*bytes::encode_bytes(pair.0));
assert_eq!(cursor.value(&mut statistics), pair.1);
}
fn assert_reverse_seek<E: Engine>(engine: &E, key: &[u8], pair: (&[u8], &[u8])) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut cursor = snapshot
.iter(IterOption::default(), ScanMode::Mixed)
.unwrap();
let mut statistics = CfStatistics::default();
cursor
.reverse_seek(&Key::from_raw(key), &mut statistics)
.unwrap();
assert_eq!(cursor.key(&mut statistics), &*bytes::encode_bytes(pair.0));
assert_eq!(cursor.value(&mut statistics), pair.1);
}
fn assert_near_seek<I: Iterator>(cursor: &mut Cursor<I>, key: &[u8], pair: (&[u8], &[u8])) {
let mut statistics = CfStatistics::default();
assert!(
cursor
.near_seek(&Key::from_raw(key), &mut statistics)
.unwrap(),
hex::encode_upper(key)
);
assert_eq!(cursor.key(&mut statistics), &*bytes::encode_bytes(pair.0));
assert_eq!(cursor.value(&mut statistics), pair.1);
}
fn assert_near_reverse_seek<I: Iterator>(
cursor: &mut Cursor<I>,
key: &[u8],
pair: (&[u8], &[u8]),
) {
let mut statistics = CfStatistics::default();
assert!(
cursor
.near_reverse_seek(&Key::from_raw(key), &mut statistics)
.unwrap(),
hex::encode_upper(key)
);
assert_eq!(cursor.key(&mut statistics), &*bytes::encode_bytes(pair.0));
assert_eq!(cursor.value(&mut statistics), pair.1);
}
pub fn test_base_curd_options<E: Engine>(engine: &E) {
test_get_put(engine);
test_batch(engine);
test_empty_seek(engine);
test_seek(engine);
test_near_seek(engine);
test_cf(engine);
test_empty_write(engine);
}
fn test_get_put<E: Engine>(engine: &E) {
assert_none(engine, b"x");
must_put(engine, b"x", b"1");
assert_has(engine, b"x", b"1");
must_put(engine, b"x", b"2");
assert_has(engine, b"x", b"2");
}
fn test_batch<E: Engine>(engine: &E) {
engine
.write(
&Context::default(),
vec![
Modify::Put(CF_DEFAULT, Key::from_raw(b"x"), b"1".to_vec()),
Modify::Put(CF_DEFAULT, Key::from_raw(b"y"), b"2".to_vec()),
],
)
.unwrap();
assert_has(engine, b"x", b"1");
assert_has(engine, b"y", b"2");
engine
.write(
&Context::default(),
vec![
Modify::Delete(CF_DEFAULT, Key::from_raw(b"x")),
Modify::Delete(CF_DEFAULT, Key::from_raw(b"y")),
],
)
.unwrap();
assert_none(engine, b"y");
assert_none(engine, b"y");
}
fn test_seek<E: Engine>(engine: &E) {
must_put(engine, b"x", b"1");
assert_seek(engine, b"x", (b"x", b"1"));
assert_seek(engine, b"a", (b"x", b"1"));
assert_reverse_seek(engine, b"x1", (b"x", b"1"));
must_put(engine, b"z", b"2");
assert_seek(engine, b"y", (b"z", b"2"));
assert_seek(engine, b"x\x00", (b"z", b"2"));
assert_reverse_seek(engine, b"y", (b"x", b"1"));
assert_reverse_seek(engine, b"z", (b"x", b"1"));
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut iter = snapshot
.iter(IterOption::default(), ScanMode::Mixed)
.unwrap();
let mut statistics = CfStatistics::default();
assert!(!iter
.seek(&Key::from_raw(b"z\x00"), &mut statistics)
.unwrap());
assert!(!iter
.reverse_seek(&Key::from_raw(b"x"), &mut statistics)
.unwrap());
must_delete(engine, b"x");
must_delete(engine, b"z");
}
fn test_near_seek<E: Engine>(engine: &E) {
must_put(engine, b"x", b"1");
must_put(engine, b"z", b"2");
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut cursor = snapshot
.iter(IterOption::default(), ScanMode::Mixed)
.unwrap();
assert_near_seek(&mut cursor, b"x", (b"x", b"1"));
assert_near_seek(&mut cursor, b"a", (b"x", b"1"));
assert_near_reverse_seek(&mut cursor, b"z1", (b"z", b"2"));
assert_near_reverse_seek(&mut cursor, b"x1", (b"x", b"1"));
assert_near_seek(&mut cursor, b"y", (b"z", b"2"));
assert_near_seek(&mut cursor, b"x\x00", (b"z", b"2"));
let mut statistics = CfStatistics::default();
assert!(!cursor
.near_seek(&Key::from_raw(b"z\x00"), &mut statistics)
.unwrap());
// Insert many key-values between 'x' and 'z' then near_seek will fallback to seek.
for i in 0..super::SEEK_BOUND {
let key = format!("y{}", i);
must_put(engine, key.as_bytes(), b"3");
}
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut cursor = snapshot
.iter(IterOption::default(), ScanMode::Mixed)
.unwrap();
assert_near_seek(&mut cursor, b"x", (b"x", b"1"));
assert_near_seek(&mut cursor, b"z", (b"z", b"2"));
must_delete(engine, b"x");
must_delete(engine, b"z");
for i in 0..super::SEEK_BOUND {
let key = format!("y{}", i);
must_delete(engine, key.as_bytes());
}
}
fn test_empty_seek<E: Engine>(engine: &E) {
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut cursor = snapshot
.iter(IterOption::default(), ScanMode::Mixed)
.unwrap();
let mut statistics = CfStatistics::default();
assert!(!cursor
.near_reverse_seek(&Key::from_raw(b"x"), &mut statistics)
.unwrap());
assert!(!cursor
.near_reverse_seek(&Key::from_raw(b"z"), &mut statistics)
.unwrap());
assert!(!cursor
.near_reverse_seek(&Key::from_raw(b"w"), &mut statistics)
.unwrap());
assert!(!cursor
.near_seek(&Key::from_raw(b"x"), &mut statistics)
.unwrap());
assert!(!cursor
.near_seek(&Key::from_raw(b"z"), &mut statistics)
.unwrap());
assert!(!cursor
.near_seek(&Key::from_raw(b"w"), &mut statistics)
.unwrap());
}
macro_rules! assert_seek {
($cursor:ident, $func:ident, $k:expr, $res:ident) => {{
let mut statistics = CfStatistics::default();
assert_eq!(
$cursor.$func(&$k, &mut statistics).unwrap(),
$res.is_some(),
"assert_seek {} failed exp {:?}",
$k,
$res
);
if let Some((ref k, ref v)) = $res {
assert_eq!(
$cursor.key(&mut statistics),
bytes::encode_bytes(k.as_bytes()).as_slice()
);
assert_eq!($cursor.value(&mut statistics), v.as_bytes());
}
}};
}
#[derive(PartialEq, Eq, Clone, Copy)]
enum SeekMode {
Normal,
Reverse,
ForPrev,
}
// use step to control the distance between target key and current key in cursor.
fn test_linear_seek<S: Snapshot>(
snapshot: &S,
mode: ScanMode,
seek_mode: SeekMode,
start_idx: usize,
step: usize,
) {
let mut cursor = snapshot.iter(IterOption::default(), mode).unwrap();
let mut near_cursor = snapshot.iter(IterOption::default(), mode).unwrap();
let limit = (SEEK_BOUND as usize * 10 + 50 - 1) * 2;
for (_, mut i) in (start_idx..(SEEK_BOUND as usize * 30))
.enumerate()
.filter(|&(i, _)| i % step == 0)
{
if seek_mode != SeekMode::Normal {
i = SEEK_BOUND as usize * 30 - 1 - i;
}
let key = format!("key_{:03}", i);
let seek_key = Key::from_raw(key.as_bytes());
let exp_kv = if i <= 100 {
match seek_mode {
SeekMode::Reverse => None,
SeekMode::ForPrev if i < 100 => None,
SeekMode::Normal | SeekMode::ForPrev => {
Some(("key_100".to_owned(), "value_50".to_owned()))
}
}
} else if i <= limit {
if seek_mode == SeekMode::Reverse {
Some((
format!("key_{}", (i - 1) / 2 * 2),
format!("value_{}", (i - 1) / 2),
))
} else if seek_mode == SeekMode::ForPrev {
Some((format!("key_{}", i / 2 * 2), format!("value_{}", i / 2)))
} else {
Some((
format!("key_{}", (i + 1) / 2 * 2),
format!("value_{}", (i + 1) / 2),
))
}
} else if seek_mode != SeekMode::Normal {
Some((
format!("key_{:03}", limit),
format!("value_{:03}", limit / 2),
))
} else {
None
};
match seek_mode {
SeekMode::Reverse => {
assert_seek!(cursor, reverse_seek, seek_key, exp_kv);
assert_seek!(near_cursor, near_reverse_seek, seek_key, exp_kv);
}
SeekMode::Normal => {
assert_seek!(cursor, seek, seek_key, exp_kv);
assert_seek!(near_cursor, near_seek, seek_key, exp_kv);
}
SeekMode::ForPrev => {
assert_seek!(cursor, seek_for_prev, seek_key, exp_kv);
assert_seek!(near_cursor, near_seek_for_prev, seek_key, exp_kv);
}
}
}
}
pub fn test_linear<E: Engine>(engine: &E) {
for i in 50..50 + SEEK_BOUND * 10 {
let key = format!("key_{}", i * 2);
let value = format!("value_{}", i);
must_put(engine, key.as_bytes(), value.as_bytes());
}
let snapshot = engine.snapshot(&Context::default()).unwrap();
for step in 1..SEEK_BOUND as usize * 3 {
for start in 0..10 {
test_linear_seek(
&snapshot,
ScanMode::Forward,
SeekMode::Normal,
start * SEEK_BOUND as usize,
step,
);
test_linear_seek(
&snapshot,
ScanMode::Backward,
SeekMode::Reverse,
start * SEEK_BOUND as usize,
step,
);
test_linear_seek(
&snapshot,
ScanMode::Backward,
SeekMode::ForPrev,
start * SEEK_BOUND as usize,
step,
);
}
}
for &seek_mode in &[SeekMode::Reverse, SeekMode::Normal, SeekMode::ForPrev] {
for step in 1..SEEK_BOUND as usize * 3 {
for start in 0..10 {
test_linear_seek(
&snapshot,
ScanMode::Mixed,
seek_mode,
start * SEEK_BOUND as usize,
step,
);
}
}
}
}
fn test_cf<E: Engine>(engine: &E) {
assert_none_cf(engine, "cf", b"key");
must_put_cf(engine, "cf", b"key", b"value");
assert_has_cf(engine, "cf", b"key", b"value");
must_delete_cf(engine, "cf", b"key");
assert_none_cf(engine, "cf", b"key");
}
fn test_empty_write<E: Engine>(engine: &E) {
engine.write(&Context::default(), vec![]).unwrap_err();
}
pub fn test_cfs_statistics<E: Engine>(engine: &E) {
must_put(engine, b"foo", b"bar1");
must_put(engine, b"foo2", b"bar2");
must_put(engine, b"foo3", b"bar3"); // deleted
must_put(engine, b"foo4", b"bar4");
must_put(engine, b"foo42", b"bar42"); // deleted
must_put(engine, b"foo5", b"bar5"); // deleted
must_put(engine, b"foo6", b"bar6");
must_delete(engine, b"foo3");
must_delete(engine, b"foo42");
must_delete(engine, b"foo5");
let snapshot = engine.snapshot(&Context::default()).unwrap();
let mut iter = snapshot
.iter(IterOption::default(), ScanMode::Forward)
.unwrap();
let mut statistics = CfStatistics::default();
iter.seek(&Key::from_raw(b"foo30"), &mut statistics)
.unwrap();
assert_eq!(iter.key(&mut statistics), &*bytes::encode_bytes(b"foo4"));
assert_eq!(iter.value(&mut statistics), b"bar4");
assert_eq!(statistics.seek, 1);
let mut statistics = CfStatistics::default();
iter.near_seek(&Key::from_raw(b"foo55"), &mut statistics)
.unwrap();
assert_eq!(iter.key(&mut statistics), &*bytes::encode_bytes(b"foo6"));
assert_eq!(iter.value(&mut statistics), b"bar6");
assert_eq!(statistics.seek, 0);
assert_eq!(statistics.next, 1);
let mut statistics = CfStatistics::default();
iter.prev(&mut statistics);
assert_eq!(iter.key(&mut statistics), &*bytes::encode_bytes(b"foo4"));
assert_eq!(iter.value(&mut statistics), b"bar4");
assert_eq!(statistics.prev, 1);
iter.prev(&mut statistics);
assert_eq!(iter.key(&mut statistics), &*bytes::encode_bytes(b"foo2"));
assert_eq!(iter.value(&mut statistics), b"bar2");
assert_eq!(statistics.prev, 2);
iter.prev(&mut statistics);
assert_eq!(iter.key(&mut statistics), &*bytes::encode_bytes(b"foo"));
assert_eq!(iter.value(&mut statistics), b"bar1");
assert_eq!(statistics.prev, 3);
}
}
| 33.945767 | 99 | 0.540895 |
f4edde0acd9d5affbec1459d8771a88bee1e30bf
| 1,037 |
use serde::{Deserialize, Serialize};
use crate::apps::{CommunicationMethod, LogSettings, MonitoringSettings};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PartialUpdateEngineSettings {
pub communication_method: CommunicationMethod,
pub sleep_phase_length: u64,
pub kafka: PartialUpdateEngineKafkaSettings,
pub notification_consumer: PartialUpdateEngineNotificationConsumerSettings,
pub services: PartialUpdateEngineServicesSettings,
pub monitoring: MonitoringSettings,
#[serde(default)]
pub log: LogSettings,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PartialUpdateEngineKafkaSettings {
pub brokers: String,
pub egest_topic: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PartialUpdateEngineNotificationConsumerSettings {
pub brokers: String,
pub group_id: String,
pub source: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PartialUpdateEngineServicesSettings {
pub schema_registry_url: String,
}
| 28.027027 | 79 | 0.775313 |
22cb6b211fcac0861bfb6b588138c7172e8f77bd
| 5,502 |
pub const SEQ_NO: i32 = 1;
pub const PROTOCOL_VERSION: usize = 2;
pub const WALLET: &'static str = "wallet_1"; // FIXME never use global names
pub const TYPE: &'static str = "default";
pub const INMEM_TYPE: &'static str = "inmem";
pub const SIGNATURE_TYPE: &'static str = "CL";
pub const TRUSTEE_SEED: &'static str = "000000000000000000000000Trustee1";
pub const STEWARD_SEED: &'static str = "000000000000000000000000Steward1";
pub const MY1_SEED: &'static str = "00000000000000000000000000000My1";
pub const MY2_SEED: &'static str = "00000000000000000000000000000My2";
pub const ISSUER_DID: &'static str = "NcYxiDXkpYi6ov5FcYDi1e";
pub const ISSUER_DID_2: &'static str = "CnEDk9HrMnmiHXEV1WFgbVCRteYnPqsJwrTdcZaNhFVW";
pub const DID: &'static str = "CnEDk9HrMnmiHXEV1WFgbVCRteYnPqsJwrTdcZaNhFVW";
pub const DID_MY1: &'static str = "VsKV7grR1BUE29mG2Fm2kX";
pub const DID_MY2: &'static str = "2PRyVHmkXQnQzJQKxHxnXC";
pub const DID_TRUSTEE: &'static str = "V4SGRU86Z58d6TV7PBUe6f";
pub const INVALID_BASE58_DID: &'static str = "invalid_base58string";
pub const IDENTIFIER: &'static str = "Th7MpTaRZVRYnPiabds81Y";
pub const INVALID_IDENTIFIER: &'static str = "invalid_base58_identifier";
pub const DEST: &'static str = "FYmoFw55GeQH7SRFa37dkx1d2dZ3zUF8ckg7wmL7ofN4";
pub const GVT_SCHEMA_NAME: &'static str = "gvt";
pub const XYZ_SCHEMA_NAME: &'static str = "xyz";
pub const SCHEMA_VERSION: &'static str = "1.0";
pub const GVT_SCHEMA_ATTRIBUTES: &'static str = r#"["name", "age", "sex", "height"]"#;
pub const XYZ_SCHEMA_ATTRIBUTES: &'static str = r#"["status", "period"]"#;
pub const SCHEMA_DATA: &'static str = r#"{"id":"id", "name":"gvt","version":"1.0","attrNames":["name"],"ver":"1.0"}"#;
pub const ENDPOINT: &'static str = "127.0.0.1:9700";
pub const VERKEY: &'static str = "CnEDk9HrMnmiHXEV1WFgbVCRteYnPqsJwrTdcZaNhFVW";
pub const VERKEY_MY1: &'static str = "GjZWsBLgZCR18aL468JAT7w9CZRiBnpxUPPgyQxh4voa";
pub const INVALID_VERKEY_LENGTH: &'static str = "invalidVerkeyLength";
pub const INVALID_BASE58_VERKEY: &'static str = "CnEDk___MnmiHXEV1WFgbV___eYnPqs___TdcZaNhFVW";
pub const NONCE: &'static [u8; 24] = &[242, 246, 53, 153, 106, 37, 185, 65, 212, 14, 109, 131, 200, 169, 94, 110, 51, 47, 101, 89, 0, 171, 105, 183];
pub const VERKEY_MY2: &'static str = "kqa2HyagzfMAq42H5f9u3UMwnSBPQx2QfrSyXbUPxMn";
pub const VERKEY_TRUSTEE: &'static str = "GJ1SzoWzavQYfNL9XkaJdrQejfztN4XqdsiV4ct3LXKL";
pub const METADATA: &'static str = "some_metadata";
pub const MESSAGE: &'static str = r#"{"reqId":1496822211362017764}"#;
pub const REQUEST: &'static str = r#"{"reqId":1496822211362017764,"identifier":"GJ1SzoWzavQYfNL9XkaJdrQejfztN4XqdsiV4ct3LXKL","operation":{"type":"1","dest":"VsKV7grR1BUE29mG2Fm2kX","verkey":"GjZWsBLgZCR18aL468JAT7w9CZRiBnpxUPPgyQxh4voa"}}"#;
pub const REQUEST_FROM_TRUSTEE: &'static str = r#"{"reqId":1496822211362017764,"identifier":"V4SGRU86Z58d6TV7PBUe6f","operation":{"type":"1","dest":"VsKV7grR1BUE29mG2Fm2kX","verkey":"GjZWsBLgZCR18aL468JAT7w9CZRiBnpxUPPgyQxh4voa"}}"#;
pub const GET_SCHEMA_DATA: &'static str = r#"{"name":"name","version":"1.0"}"#;
pub const ATTRIB_RAW_DATA: &'static str = r#"{"endpoint":{"ha":"127.0.0.1:5555"}}"#;
pub const ATTRIB_HASH_DATA: &'static str = r#"83d907821df1c87db829e96569a11f6fc2e7880acba5e43d07ab786959e13bd3"#;
pub const ATTRIB_ENC_DATA: &'static str = r#"aa3f41f619aa7e5e6b6d0de555e05331787f9bf9aa672b94b57ab65b9b66c3ea960b18a98e3834b1fc6cebf49f463b81fd6e3181"#;
pub const NODE_DATA: &'static str = r#"{"alias":"Node5","blskey":"4N8aUNHSgjQVgkpm8nhNEfDf6txHznoYREg9kirmJrkivgL4oSEimFF6nsQ6M41QvhM2Z33nves5vfSn9n1UwNFJBYtWVnHYMATn76vLuL3zU88KyeAYcHfsih3He6UHcXDxcaecHVz6jhCYz1P2UZn2bDVruL5wXpehgBfBaLKm3Ba","blskey_pop":"RahHYiCvoNCtPTrVtP7nMC5eTYrsUA8WjXbdhNc8debh1agE9bGiJxWBXYNFbnJXoXhWFMvyqhqhRoq737YQemH5ik9oL7R4NTTCz2LEZhkgLJzB3QRQqJyBNyv7acbdHrAT8nQ9UkLbaVL9NBpnWXBTw4LEMePaSHEw66RzPNdAX1","client_ip":"10.0.0.100","client_port":1,"node_ip":"10.0.0.100","node_port":2,"services":["VALIDATOR"]}"#;
pub const TAG_1: &'static str = "TAG_1";
pub const TAG_2: &'static str = "TAG_2";
pub const REVOC_REG_TYPE: &'static str = "CL_ACCUM";
pub const WALLET_CREDENTIALS: &'static str = r#"{"key":"8dvfYSt5d1taSd6yJdpjq4emkwsPDDLYxkNFysFD2cZY", "key_derivation_method":"RAW"}"#;
pub const WALLET_CREDENTIALS_ARGON2I_MOD: &'static str = r#"{"key":"key", "key_derivation_method":"ARGON2I_MOD"}"#;
pub const WALLET_CREDENTIALS_ARGON2I_INT: &'static str = r#"{"key":"key", "key_derivation_method":"ARGON2I_INT"}"#;
pub const WALLET_CREDENTIALS_RAW: &'static str = r#"{"key":"8dvfYSt5d1taSd6yJdpjq4emkwsPDDLYxkNFysFD2cZY", "key_derivation_method":"RAW"}"#;
pub const DEFAULT_WALLET_CONFIG: &'static str = r#"{"id":"default_wallet_1","storage_type":"default"}"#; // FIXME never use global names
pub const INMEM_WALLET_CONFIG: &'static str = r#"{"id":"inmem_wallet_1","storage_type":"inmem"}"#; // FIXME never use global names
pub const UNKNOWN_WALLET_CONFIG: &'static str = r#"{"id":"unknown_wallet_1","storage_type":"unknown"}"#; // FIXME never use global names
pub const AGENT_MESSAGE: &'static str = r#"{ "@id": "123456780","@type":"did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/basicmessage/1.0/message","sent_time": "2019-01-15 18:42:01Z","content": "Your hovercraft is full of eels."}"#;
pub const SCHEMA_ID: &'static str = "NcYxiDXkpYi6ov5FcYDi1e:2:gvt:1.0";
pub const CRED_DEF_ID: &'static str = "NcYxiDXkpYi6ov5FcYDi1e:3:CL:1";
pub const REV_REG_ID: &'static str = "NcYxiDXkpYi6ov5FcYDi1e:4:NcYxiDXkpYi6ov5FcYDi1e:3:CL:1:CL_ACCUM:TAG_1";
| 94.862069 | 539 | 0.765176 |
0848007e76c00d4d042a23bfd4fefa6afd41a518
| 1,783 |
use crate::*;
pub fn repr(expr: &JVal) -> String {
match &*expr {
JVal::Nil => "()".to_string(),
JVal::Int(n) => format!("{}", n),
JVal::Float(x) => format!("{}", x),
JVal::Bool(b) => (if *b { "true" } else { "false" }).to_string(),
JVal::Symbol(s) => s.to_string(),
JVal::String(s) => format!("\"{}\"", s),
JVal::Error(e) => format!("#[error {}]", e),
JVal::Builtin(b) => format!("#[function {}]", b),
JVal::SpecialForm(b) => format!("#[specialform {}]", b),
JVal::Lambda(l) => format!("#[lambda {}]", l),
JVal::Macro(l) => format!("#[macro {}]", l),
JVal::Pair(c) => repr_pair(c),
JVal::Vector(v) => repr_vec(v),
JVal::Quote(val) => format!("'{}", repr(&*val)),
JVal::Quasiquote(val) => format!("`{}", repr(&*val)),
JVal::Unquote(val) => format!(",{}", repr(&*val)),
JVal::UnquoteSplice(val) => format!(",@{}", repr(&*val)),
JVal::Env(env) => format!("{}", env),
JVal::Token(t) => format!("#[token {}]", t),
JVal::TokenMatcher(tm) => format!("#[tokenmatcher {}]", tm),
}
}
fn repr_vec(v: &JVector) -> String {
let vecref = v.borrow();
format!(
"#({})",
vecref
.iter()
.map(|v| repr(v))
.collect::<Vec<String>>()
.join(" ")
)
}
fn repr_pair(cell: &JPair) -> String {
match cell.iter() {
Ok(iterator) => {
format!(
"({})",
iterator
.map(|v| repr(&v))
.collect::<Vec<String>>()
.join(" ")
)
}
// Not a list
Err(_) => format!("({} . {})", repr(&cell.car()), repr(&cell.cdr())),
}
}
| 32.418182 | 77 | 0.415031 |
8f5b5e440c6caa004b6b0db7883e81cc5bcbed55
| 27,289 |
//! # Staking Module
//! Based on the [Scalable Reward Distribution](https://solmaz.io/2019/02/24/scalable-reward-changing/) algorithm.
#![deny(warnings)]
#![cfg_attr(test, feature(proc_macro_hygiene))]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
use codec::{Decode, Encode, EncodeLike};
use frame_support::{
dispatch::{DispatchError, DispatchResult},
traits::Get,
};
use primitives::TruncateFixedPointToInt;
use sp_arithmetic::{FixedPointNumber, FixedPointOperand};
use sp_runtime::traits::{CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, MaybeSerializeDeserialize, One, Zero};
use sp_std::{cmp, marker::PhantomData};
pub(crate) type SignedFixedPoint<T> = <T as Config>::SignedFixedPoint;
pub use pallet::*;
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
/// ## Configuration
/// The pallet's configuration trait.
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
/// The `Inner` type of the `SignedFixedPoint`.
type SignedInner: CheckedDiv + Ord + FixedPointOperand;
/// Signed fixed point type.
type SignedFixedPoint: FixedPointNumber<Inner = Self::SignedInner>
+ TruncateFixedPointToInt
+ Encode
+ EncodeLike
+ Decode;
/// The currency ID type.
type CurrencyId: Parameter + Member + Copy + MaybeSerializeDeserialize + Ord;
}
// The pallet's events
#[pallet::event]
#[pallet::generate_deposit(pub(crate) fn deposit_event)]
#[pallet::metadata(
T::CurrencyId = "CurrencyId",
T::AccountId = "AccountId",
T::SignedFixedPoint = "SignedFixedPoint",
T::Index = "Index"
)]
pub enum Event<T: Config> {
DepositStake(T::AccountId, T::AccountId, T::SignedFixedPoint),
DistributeReward(T::CurrencyId, T::AccountId, T::SignedFixedPoint),
WithdrawStake(T::AccountId, T::AccountId, T::SignedFixedPoint),
WithdrawReward(T::Index, T::CurrencyId, T::AccountId, T::AccountId, T::SignedFixedPoint),
ForceRefund(T::AccountId),
IncreaseNonce(T::AccountId, T::Index),
}
#[pallet::error]
pub enum Error<T> {
ArithmeticOverflow,
ArithmeticUnderflow,
TryIntoIntError,
InsufficientFunds,
}
#[pallet::hooks]
impl<T: Config> Hooks<T::BlockNumber> for Pallet<T> {}
/// The total stake - this will increase on deposit and decrease on withdrawal.
#[pallet::storage]
#[pallet::getter(fn total_stake_at_index)]
pub type TotalStake<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::Index,
Blake2_128Concat,
T::AccountId,
SignedFixedPoint<T>,
ValueQuery,
>;
/// The total stake - this will increase on deposit and decrease on withdrawal or slashing.
#[pallet::storage]
#[pallet::getter(fn total_current_stake_at_index)]
pub type TotalCurrentStake<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::Index,
Blake2_128Concat,
T::AccountId,
SignedFixedPoint<T>,
ValueQuery,
>;
/// The total unclaimed rewards distributed to this reward pool.
/// NOTE: this is currently only used for integration tests.
#[pallet::storage]
#[pallet::getter(fn total_rewards)]
pub type TotalRewards<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::CurrencyId,
Blake2_128Concat,
(T::Index, T::AccountId),
SignedFixedPoint<T>,
ValueQuery,
>;
/// Used to compute the rewards for a participant's stake.
#[pallet::storage]
#[pallet::getter(fn reward_per_token)]
pub type RewardPerToken<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::CurrencyId,
Blake2_128Concat,
(T::Index, T::AccountId),
SignedFixedPoint<T>,
ValueQuery,
>;
/// Used to compute the amount to slash from a participant's stake.
#[pallet::storage]
pub type SlashPerToken<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::Index,
Blake2_128Concat,
T::AccountId,
SignedFixedPoint<T>,
ValueQuery,
>;
/// The stake of a participant in this reward pool.
#[pallet::storage]
pub type Stake<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::Index,
Blake2_128Concat,
(T::AccountId, T::AccountId),
SignedFixedPoint<T>,
ValueQuery,
>;
/// Accounts for previous changes in stake size.
#[pallet::storage]
pub type RewardTally<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::CurrencyId,
Blake2_128Concat,
(T::Index, T::AccountId, T::AccountId),
SignedFixedPoint<T>,
ValueQuery,
>;
/// Accounts for previous changes in stake size.
#[pallet::storage]
pub type SlashTally<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
T::Index,
Blake2_128Concat,
(T::AccountId, T::AccountId),
SignedFixedPoint<T>,
ValueQuery,
>;
/// The nonce of the current staking pool, used in force refunds.
/// This is a strictly increasing value.
#[pallet::storage]
pub type Nonce<T: Config> = StorageMap<_, Blake2_128Concat, T::AccountId, T::Index, ValueQuery>;
#[pallet::pallet]
pub struct Pallet<T>(_);
// The pallet's dispatchable functions.
#[pallet::call]
impl<T: Config> Pallet<T> {}
}
macro_rules! checked_add_mut {
($storage:ty, $currency:expr, $amount:expr) => {
<$storage>::mutate($currency, |value| {
*value = value.checked_add($amount).ok_or(Error::<T>::ArithmeticOverflow)?;
Ok::<_, Error<T>>(*value)
})?
};
($storage:ty, $currency:expr, $account:expr, $amount:expr) => {
<$storage>::mutate($currency, $account, |value| {
*value = value.checked_add($amount).ok_or(Error::<T>::ArithmeticOverflow)?;
Ok::<_, Error<T>>(*value)
})?
};
}
macro_rules! checked_sub_mut {
($storage:ty, $currency:expr, $amount:expr) => {
<$storage>::mutate($currency, |value| {
*value = value.checked_sub($amount).ok_or(Error::<T>::ArithmeticUnderflow)?;
Ok::<_, Error<T>>(*value)
})?
};
($storage:ty, $currency:expr, $account:expr, $amount:expr) => {
<$storage>::mutate($currency, $account, |value| {
*value = value.checked_sub($amount).ok_or(Error::<T>::ArithmeticUnderflow)?;
Ok::<_, Error<T>>(*value)
})?
};
}
// "Internal" functions, callable by code.
impl<T: Config> Pallet<T> {
/// Get the stake associated with a vault / nominator.
pub fn stake(vault_id: &T::AccountId, nominator_id: &T::AccountId) -> SignedFixedPoint<T> {
let nonce = Self::nonce(vault_id);
Self::stake_at_index(nonce, vault_id, nominator_id)
}
fn stake_at_index(nonce: T::Index, vault_id: &T::AccountId, nominator_id: &T::AccountId) -> SignedFixedPoint<T> {
<Stake<T>>::get(nonce, (vault_id, nominator_id))
}
/// Get the total stake *after* slashing.
pub fn total_current_stake(
vault_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let nonce = Self::nonce(vault_id);
let total = Self::total_current_stake_at_index(nonce, vault_id);
total.truncate_to_inner().ok_or(Error::<T>::TryIntoIntError.into())
}
fn reward_tally(
nonce: T::Index,
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> SignedFixedPoint<T> {
<RewardTally<T>>::get(currency_id, (nonce, vault_id, nominator_id))
}
/// Get the nominator's `slash_tally` for the staking pool.
pub fn slash_tally(vault_id: &T::AccountId, nominator_id: &T::AccountId) -> SignedFixedPoint<T> {
let nonce = Self::nonce(vault_id);
Self::slash_tally_at_index(nonce, vault_id, nominator_id)
}
fn slash_tally_at_index(
nonce: T::Index,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> SignedFixedPoint<T> {
<SlashTally<T>>::get(nonce, (vault_id, nominator_id))
}
/// Get the newest nonce for the staking pool.
pub fn nonce(vault_id: &T::AccountId) -> T::Index {
<Nonce<T>>::get(vault_id)
}
/// Get the vault's `slash_per_token` for the staking pool.
pub fn slash_per_token(vault_id: &T::AccountId) -> SignedFixedPoint<T> {
let nonce = Self::nonce(vault_id);
Self::slash_per_token_at_index(nonce, vault_id)
}
fn slash_per_token_at_index(nonce: T::Index, vault_id: &T::AccountId) -> SignedFixedPoint<T> {
<SlashPerToken<T>>::get(nonce, vault_id)
}
/// Deposit an `amount` of stake to the `vault_id` for the `nominator_id`.
pub fn deposit_stake(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
amount: SignedFixedPoint<T>,
) -> DispatchResult {
let nonce = Self::nonce(vault_id);
Self::apply_slash(vault_id, nominator_id)?;
checked_add_mut!(Stake<T>, nonce, (vault_id, nominator_id), &amount);
checked_add_mut!(TotalStake<T>, nonce, vault_id, &amount);
checked_add_mut!(TotalCurrentStake<T>, nonce, vault_id, &amount);
<SlashTally<T>>::mutate(nonce, (vault_id, nominator_id), |slash_tally| {
let slash_per_token = Self::slash_per_token_at_index(nonce, vault_id);
let slash_per_token_mul_amount = slash_per_token
.checked_mul(&amount)
.ok_or(Error::<T>::ArithmeticOverflow)?;
*slash_tally = slash_tally
.checked_add(&slash_per_token_mul_amount)
.ok_or(Error::<T>::ArithmeticOverflow)?;
Ok::<_, Error<T>>(())
})?;
<RewardTally<T>>::mutate(currency_id, (nonce, vault_id, nominator_id), |reward_tally| {
let reward_per_token = Self::reward_per_token(currency_id, (nonce, vault_id));
let reward_per_token_mul_amount = reward_per_token
.checked_mul(&amount)
.ok_or(Error::<T>::ArithmeticOverflow)?;
*reward_tally = reward_tally
.checked_add(&reward_per_token_mul_amount)
.ok_or(Error::<T>::ArithmeticOverflow)?;
Ok::<_, Error<T>>(())
})?;
Self::deposit_event(Event::<T>::DepositStake(vault_id.clone(), nominator_id.clone(), amount));
Ok(())
}
/// Slash an `amount` of stake from the `vault_id`.
pub fn slash_stake(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
amount: SignedFixedPoint<T>,
) -> DispatchResult {
let nonce = Self::nonce(vault_id);
let total_stake = Self::total_stake_at_index(nonce, vault_id);
if amount.is_zero() {
return Ok(());
} else if total_stake.is_zero() {
return Err(Error::<T>::InsufficientFunds.into());
}
let amount_div_total_stake = amount
.checked_div(&total_stake)
.ok_or(Error::<T>::ArithmeticUnderflow)?;
checked_add_mut!(SlashPerToken<T>, nonce, vault_id, &amount_div_total_stake);
checked_sub_mut!(TotalCurrentStake<T>, nonce, vault_id, &amount);
// A slash means reward per token is no longer representative of the rewards
// since `amount * reward_per_token` will be lost from the system. As such,
// replenish rewards by the amount of reward lost with this slash
Self::increase_rewards(
nonce,
currency_id,
vault_id,
Self::reward_per_token(currency_id, (nonce, vault_id))
.checked_mul(&amount)
.ok_or(Error::<T>::ArithmeticOverflow)?,
)?;
Ok(())
}
fn compute_amount_to_slash(
stake: SignedFixedPoint<T>,
slash_per_token: SignedFixedPoint<T>,
slash_tally: SignedFixedPoint<T>,
) -> Result<SignedFixedPoint<T>, DispatchError> {
let stake_mul_slash_per_token = stake
.checked_mul(&slash_per_token)
.ok_or(Error::<T>::ArithmeticOverflow)?;
let to_slash = stake_mul_slash_per_token
.checked_sub(&slash_tally)
.ok_or(Error::<T>::ArithmeticUnderflow)?;
Ok(to_slash)
}
/// Delegates to `compute_stake_at_index` with the current nonce.
pub fn compute_stake(
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let nonce = Self::nonce(vault_id);
Self::compute_stake_at_index(nonce, vault_id, nominator_id)
}
/// Compute the stake in `vault_id` owned by `nominator_id`.
pub fn compute_stake_at_index(
nonce: T::Index,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let stake = Self::stake_at_index(nonce, vault_id, nominator_id);
let slash_per_token = Self::slash_per_token_at_index(nonce, vault_id);
let slash_tally = Self::slash_tally_at_index(nonce, vault_id, nominator_id);
let to_slash = Self::compute_amount_to_slash(stake, slash_per_token, slash_tally)?;
let stake_sub_to_slash = stake
.checked_sub(&to_slash)
.ok_or(Error::<T>::ArithmeticUnderflow)?
.truncate_to_inner()
.ok_or(Error::<T>::TryIntoIntError)?;
Ok(cmp::max(Zero::zero(), stake_sub_to_slash))
}
fn increase_rewards(
nonce: T::Index,
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
reward: SignedFixedPoint<T>,
) -> Result<SignedFixedPoint<T>, DispatchError> {
let total_current_stake = Self::total_current_stake_at_index(nonce, vault_id);
if total_current_stake.is_zero() {
return Ok(Zero::zero());
}
let reward_div_total_current_stake = reward
.checked_div(&total_current_stake)
.ok_or(Error::<T>::ArithmeticUnderflow)?;
checked_add_mut!(
RewardPerToken<T>,
currency_id,
(nonce, vault_id),
&reward_div_total_current_stake
);
Ok(reward)
}
/// Distribute the `reward` to all participants.
pub fn distribute_reward(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
reward: SignedFixedPoint<T>,
) -> Result<SignedFixedPoint<T>, DispatchError> {
let nonce = Self::nonce(vault_id);
let reward = Self::increase_rewards(nonce, currency_id, vault_id, reward)?;
if reward.is_zero() {
return Ok(Zero::zero());
}
checked_add_mut!(TotalRewards<T>, currency_id, (nonce, vault_id), &reward);
Self::deposit_event(Event::<T>::DistributeReward(currency_id, vault_id.clone(), reward));
Ok(reward)
}
/// Delegates to `compute_reward_at_index` with the current nonce.
pub fn compute_reward(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let nonce = Self::nonce(vault_id);
Self::compute_reward_at_index(nonce, currency_id, vault_id, nominator_id)
}
/// Compute the expected reward for `nominator_id` who is nominating `vault_id`.
pub fn compute_reward_at_index(
nonce: T::Index,
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let stake =
SignedFixedPoint::<T>::checked_from_integer(Self::compute_stake_at_index(nonce, vault_id, nominator_id)?)
.ok_or(Error::<T>::TryIntoIntError)?;
let reward_per_token = Self::reward_per_token(currency_id, (nonce, vault_id));
// FIXME: this can easily overflow with large numbers
let stake_mul_reward_per_token = stake
.checked_mul(&reward_per_token)
.ok_or(Error::<T>::ArithmeticOverflow)?;
let reward_tally = Self::reward_tally(nonce, currency_id, vault_id, nominator_id);
// TODO: this can probably be saturated
let reward = stake_mul_reward_per_token
.checked_sub(&reward_tally)
.ok_or(Error::<T>::ArithmeticUnderflow)?
.truncate_to_inner()
.ok_or(Error::<T>::TryIntoIntError)?;
Ok(cmp::max(Zero::zero(), reward))
}
fn apply_slash(vault_id: &T::AccountId, nominator_id: &T::AccountId) -> Result<SignedFixedPoint<T>, DispatchError> {
let nonce = Self::nonce(vault_id);
let stake = Self::stake_at_index(nonce, vault_id, nominator_id);
let slash_per_token = Self::slash_per_token_at_index(nonce, vault_id);
let slash_tally = Self::slash_tally_at_index(nonce, vault_id, nominator_id);
let to_slash = Self::compute_amount_to_slash(stake, slash_per_token, slash_tally)?;
checked_sub_mut!(TotalStake<T>, nonce, vault_id, &to_slash);
let stake = checked_sub_mut!(Stake<T>, nonce, (vault_id, nominator_id), &to_slash);
<SlashTally<T>>::insert(
nonce,
(vault_id, nominator_id),
stake
.checked_mul(&slash_per_token)
.ok_or(Error::<T>::ArithmeticOverflow)?,
);
return Ok(stake);
}
/// Withdraw an `amount` of stake from the `vault_id` for the `nominator_id`.
pub fn withdraw_stake(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
amount: SignedFixedPoint<T>,
index: Option<T::Index>,
) -> DispatchResult {
let nonce = index.unwrap_or(Self::nonce(vault_id));
let stake = Self::apply_slash(vault_id, nominator_id)?;
if amount.is_zero() {
return Ok(());
} else if amount > stake {
return Err(Error::<T>::InsufficientFunds.into());
}
checked_sub_mut!(Stake<T>, nonce, (vault_id, nominator_id), &amount);
checked_sub_mut!(TotalStake<T>, nonce, vault_id, &amount);
checked_sub_mut!(TotalCurrentStake<T>, nonce, vault_id, &amount);
<SlashTally<T>>::mutate(nonce, (vault_id, nominator_id), |slash_tally| {
let slash_per_token = Self::slash_per_token_at_index(nonce, vault_id);
let slash_per_token_mul_amount = slash_per_token
.checked_mul(&amount)
.ok_or(Error::<T>::ArithmeticOverflow)?;
*slash_tally = slash_tally
.checked_sub(&slash_per_token_mul_amount)
.ok_or(Error::<T>::ArithmeticUnderflow)?;
Ok::<_, Error<T>>(())
})?;
<RewardTally<T>>::mutate(currency_id, (nonce, vault_id, nominator_id), |reward_tally| {
let reward_per_token = Self::reward_per_token(currency_id, (nonce, vault_id));
let reward_per_token_mul_amount = reward_per_token
.checked_mul(&amount)
.ok_or(Error::<T>::ArithmeticOverflow)?;
*reward_tally = reward_tally
.checked_sub(&reward_per_token_mul_amount)
.ok_or(Error::<T>::ArithmeticUnderflow)?;
Ok::<_, Error<T>>(())
})?;
Self::deposit_event(Event::<T>::WithdrawStake(
vault_id.clone(),
nominator_id.clone(),
amount,
));
Ok(())
}
/// Delegates to `withdraw_reward_at_index` with the current nonce.
pub fn withdraw_reward(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let nonce = Self::nonce(vault_id);
Self::withdraw_reward_at_index(nonce, currency_id, vault_id, nominator_id)
}
/// Withdraw all rewards earned by `vault_id` for the `nominator_id`.
pub fn withdraw_reward_at_index(
nonce: T::Index,
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let reward = Self::compute_reward_at_index(nonce, currency_id, vault_id, nominator_id)?;
let reward_as_fixed = SignedFixedPoint::<T>::checked_from_integer(reward).ok_or(Error::<T>::TryIntoIntError)?;
checked_sub_mut!(TotalRewards<T>, currency_id, (nonce, vault_id), &reward_as_fixed);
let stake = Self::stake_at_index(nonce, vault_id, nominator_id);
let reward_per_token = Self::reward_per_token(currency_id, (nonce, vault_id));
<RewardTally<T>>::insert(
currency_id,
(nonce, vault_id, nominator_id),
stake
.checked_mul(&reward_per_token)
.ok_or(Error::<T>::ArithmeticOverflow)?,
);
Self::deposit_event(Event::<T>::WithdrawReward(
nonce,
currency_id,
vault_id.clone(),
nominator_id.clone(),
reward_as_fixed,
));
Ok(reward)
}
/// Force refund the entire nomination to `vault_id` by depositing it as reward. It
/// returns the amount of collateral that is refunded
pub fn force_refund(
currency_id: T::CurrencyId,
vault_id: &T::AccountId,
) -> Result<<SignedFixedPoint<T> as FixedPointNumber>::Inner, DispatchError> {
let nonce = Self::nonce(vault_id);
let total_current_stake = Self::total_current_stake_at_index(nonce, vault_id);
// only withdraw the vault's stake from the current pool
// nominators must withdraw manually using the nonce
let stake =
SignedFixedPoint::<T>::checked_from_integer(Self::compute_stake_at_index(nonce, vault_id, vault_id)?)
.ok_or(Error::<T>::TryIntoIntError)?;
Self::withdraw_stake(currency_id, vault_id, vault_id, stake, Some(nonce))?;
Self::increment_nonce(vault_id)?;
// only deposit vault stake after increasing the nonce
Self::deposit_stake(currency_id, vault_id, vault_id, stake)?;
Self::deposit_event(Event::<T>::ForceRefund(vault_id.clone()));
let refunded_collateral = total_current_stake
.checked_sub(&stake)
.ok_or(Error::<T>::ArithmeticUnderflow)?
.truncate_to_inner()
.ok_or(Error::<T>::TryIntoIntError)?;
Ok(refunded_collateral)
}
pub fn increment_nonce(vault_id: &T::AccountId) -> DispatchResult {
<Nonce<T>>::mutate(vault_id, |nonce| {
*nonce = nonce
.checked_add(&T::Index::one())
.ok_or(Error::<T>::ArithmeticOverflow)?;
Ok::<_, Error<T>>(())
})?;
Self::deposit_event(Event::<T>::IncreaseNonce(vault_id.clone(), Self::nonce(vault_id)));
Ok(())
}
}
pub trait Staking<AccountId, Index> {
/// Signed fixed point type.
type SignedFixedPoint: FixedPointNumber;
/// Deposit an `amount` of stake to the `vault_id` for the `nominator_id`.
fn deposit_stake(vault_id: &AccountId, nominator_id: &AccountId, amount: Self::SignedFixedPoint) -> DispatchResult;
/// Slash an `amount` of stake from the `vault_id`.
fn slash_stake(vault_id: &AccountId, amount: Self::SignedFixedPoint) -> DispatchResult;
/// Compute the stake in `vault_id` owned by `nominator_id`.
fn compute_stake(
vault_id: &AccountId,
nominator_id: &AccountId,
) -> Result<<Self::SignedFixedPoint as FixedPointNumber>::Inner, DispatchError>;
/// Distribute the `reward` to all participants.
fn distribute_reward(
vault_id: &AccountId,
reward: Self::SignedFixedPoint,
) -> Result<Self::SignedFixedPoint, DispatchError>;
/// Compute the expected reward for `nominator_id` who is nominating `vault_id`.
fn compute_reward(
vault_id: &AccountId,
nominator_id: &AccountId,
) -> Result<<Self::SignedFixedPoint as FixedPointNumber>::Inner, DispatchError>;
/// Withdraw an `amount` of stake from the `vault_id` for the `nominator_id`.
fn withdraw_stake(vault_id: &AccountId, nominator_id: &AccountId, amount: Self::SignedFixedPoint)
-> DispatchResult;
/// Withdraw all rewards earned by `vault_id` for the `nominator_id`.
fn withdraw_reward(
vault_id: &AccountId,
nominator_id: &AccountId,
index: Option<Index>,
) -> Result<<Self::SignedFixedPoint as FixedPointNumber>::Inner, DispatchError>;
}
pub struct StakingCurrencyAdapter<T, GetCurrencyId>(PhantomData<(T, GetCurrencyId)>);
impl<T, GetCurrencyId> Staking<T::AccountId, T::Index> for StakingCurrencyAdapter<T, GetCurrencyId>
where
T: Config,
GetCurrencyId: Get<T::CurrencyId>,
{
type SignedFixedPoint = SignedFixedPoint<T>;
fn deposit_stake(
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
amount: Self::SignedFixedPoint,
) -> DispatchResult {
Pallet::<T>::deposit_stake(GetCurrencyId::get(), vault_id, nominator_id, amount)
}
fn slash_stake(vault_id: &T::AccountId, amount: Self::SignedFixedPoint) -> DispatchResult {
Pallet::<T>::slash_stake(GetCurrencyId::get(), vault_id, amount)
}
fn compute_stake(
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<Self::SignedFixedPoint as FixedPointNumber>::Inner, DispatchError> {
Pallet::<T>::compute_stake(vault_id, nominator_id)
}
fn distribute_reward(
vault_id: &T::AccountId,
reward: Self::SignedFixedPoint,
) -> Result<Self::SignedFixedPoint, DispatchError> {
Pallet::<T>::distribute_reward(GetCurrencyId::get(), vault_id, reward)
}
fn compute_reward(
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
) -> Result<<Self::SignedFixedPoint as FixedPointNumber>::Inner, DispatchError> {
Pallet::<T>::compute_reward(GetCurrencyId::get(), vault_id, nominator_id)
}
fn withdraw_stake(
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
amount: Self::SignedFixedPoint,
) -> DispatchResult {
Pallet::<T>::withdraw_stake(GetCurrencyId::get(), vault_id, nominator_id, amount, None)
}
fn withdraw_reward(
vault_id: &T::AccountId,
nominator_id: &T::AccountId,
index: Option<T::Index>,
) -> Result<<Self::SignedFixedPoint as FixedPointNumber>::Inner, DispatchError> {
let nonce = index.unwrap_or(Pallet::<T>::nonce(vault_id));
Pallet::<T>::withdraw_reward_at_index(nonce, GetCurrencyId::get(), vault_id, nominator_id)
}
}
| 37.127891 | 120 | 0.624537 |
5dad2babfaf175e53ce1fee4c7be3d5333686806
| 10,991 |
//! A concurrent, lock-free, FIFO list.
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize};
use crate::loom::thread;
use crate::sync::mpsc::block::{self, Block};
use std::fmt;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
/// List queue transmit handle
pub(crate) struct Tx<T> {
/// Tail in the `Block` mpmc list.
block_tail: AtomicPtr<Block<T>>,
/// Position to push the next message. This reference a block and offset
/// into the block.
tail_position: AtomicUsize,
}
/// List queue receive handle
pub(crate) struct Rx<T> {
/// Pointer to the block being processed
head: NonNull<Block<T>>,
/// Next slot index to process
index: usize,
/// Pointer to the next block pending release
free_head: NonNull<Block<T>>,
}
pub(crate) fn channel<T>() -> (Tx<T>, Rx<T>) {
// Create the initial block shared between the tx and rx halves.
let initial_block = Box::new(Block::new(0));
let initial_block_ptr = Box::into_raw(initial_block);
let tx = Tx {
block_tail: AtomicPtr::new(initial_block_ptr),
tail_position: AtomicUsize::new(0),
};
let head = NonNull::new(initial_block_ptr).unwrap();
let rx = Rx {
head,
index: 0,
free_head: head,
};
(tx, rx)
}
impl<T> Tx<T> {
/// Pushes a value into the list.
pub(crate) fn push(&self, value: T) {
// First, claim a slot for the value. `Acquire` is used here to
// synchronize with the `fetch_add` in `reclaim_blocks`.
let slot_index = self.tail_position.fetch_add(1, Acquire);
// Load the current block and write the value
let block = self.find_block(slot_index);
unsafe {
// Write the value to the block
block.as_ref().write(slot_index, value);
}
}
/// Closes the send half of the list
///
/// Similar process as pushing a value, but instead of writing the value &
/// setting the ready flag, the TX_CLOSED flag is set on the block.
pub(crate) fn close(&self) {
// First, claim a slot for the value. This is the last slot that will be
// claimed.
let slot_index = self.tail_position.fetch_add(1, Acquire);
let block = self.find_block(slot_index);
unsafe { block.as_ref().tx_close() }
}
fn find_block(&self, slot_index: usize) -> NonNull<Block<T>> {
// The start index of the block that contains `index`.
let start_index = block::start_index(slot_index);
// The index offset into the block
let offset = block::offset(slot_index);
// Load the current head of the block
let mut block_ptr = self.block_tail.load(Acquire);
let block = unsafe { &*block_ptr };
// Calculate the distance between the tail ptr and the target block
let distance = block.distance(start_index);
// Decide if this call to `find_block` should attempt to update the
// `block_tail` pointer.
//
// Updating `block_tail` is not always performed in order to reduce
// contention.
//
// When set, as the routine walks the linked list, it attempts to update
// `block_tail`. If the update cannot be performed, `try_updating_tail`
// is unset.
let mut try_updating_tail = distance > offset;
// Walk the linked list of blocks until the block with `start_index` is
// found.
loop {
let block = unsafe { &(*block_ptr) };
if block.is_at_index(start_index) {
return unsafe { NonNull::new_unchecked(block_ptr) };
}
let next_block = block
.load_next(Acquire)
// There is no allocated next block, grow the linked list.
.unwrap_or_else(|| block.grow());
// If the block is **not** final, then the tail pointer cannot be
// advanced any more.
try_updating_tail &= block.is_final();
if try_updating_tail {
// Advancing `block_tail` must happen when walking the linked
// list. `block_tail` may not advance passed any blocks that are
// not "final". At the point a block is finalized, it is unknown
// if there are any prior blocks that are unfinalized, which
// makes it impossible to advance `block_tail`.
//
// While walking the linked list, `block_tail` can be advanced
// as long as finalized blocks are traversed.
//
// Release ordering is used to ensure that any subsequent reads
// are able to see the memory pointed to by `block_tail`.
//
// Acquire is not needed as any "actual" value is not accessed.
// At this point, the linked list is walked to acquire blocks.
if self
.block_tail
.compare_exchange(block_ptr, next_block.as_ptr(), Release, Relaxed)
.is_ok()
{
// Synchronize with any senders
let tail_position = self.tail_position.fetch_add(0, Release);
unsafe {
block.tx_release(tail_position);
}
} else {
// A concurrent sender is also working on advancing
// `block_tail` and this thread is falling behind.
//
// Stop trying to advance the tail pointer
try_updating_tail = false;
}
}
block_ptr = next_block.as_ptr();
thread::yield_now();
}
}
pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull<Block<T>>) {
// The block has been removed from the linked list and ownership
// is reclaimed.
//
// Before dropping the block, see if it can be reused by
// inserting it back at the end of the linked list.
//
// First, reset the data
block.as_mut().reclaim();
let mut reused = false;
// Attempt to insert the block at the end
//
// Walk at most three times
//
let curr_ptr = self.block_tail.load(Acquire);
// The pointer can never be null
debug_assert!(!curr_ptr.is_null());
let mut curr = NonNull::new_unchecked(curr_ptr);
// TODO: Unify this logic with Block::grow
for _ in 0..3 {
match curr.as_ref().try_push(&mut block, AcqRel, Acquire) {
Ok(_) => {
reused = true;
break;
}
Err(next) => {
curr = next;
}
}
}
if !reused {
let _ = Box::from_raw(block.as_ptr());
}
}
}
impl<T> fmt::Debug for Tx<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Tx")
.field("block_tail", &self.block_tail.load(Relaxed))
.field("tail_position", &self.tail_position.load(Relaxed))
.finish()
}
}
impl<T> Rx<T> {
/// Pops the next value off the queue
pub(crate) fn pop(&mut self, tx: &Tx<T>) -> Option<block::Read<T>> {
// Advance `head`, if needed
if !self.try_advancing_head() {
return None;
}
self.reclaim_blocks(tx);
unsafe {
let block = self.head.as_ref();
let ret = block.read(self.index);
if let Some(block::Read::Value(..)) = ret {
self.index = self.index.wrapping_add(1);
}
ret
}
}
/// Tries advancing the block pointer to the block referenced by `self.index`.
///
/// Returns `true` if successful, `false` if there is no next block to load.
fn try_advancing_head(&mut self) -> bool {
let block_index = block::start_index(self.index);
loop {
let next_block = {
let block = unsafe { self.head.as_ref() };
if block.is_at_index(block_index) {
return true;
}
block.load_next(Acquire)
};
let next_block = match next_block {
Some(next_block) => next_block,
None => {
return false;
}
};
self.head = next_block;
thread::yield_now();
}
}
fn reclaim_blocks(&mut self, tx: &Tx<T>) {
while self.free_head != self.head {
unsafe {
// Get a handle to the block that will be freed and update
// `free_head` to point to the next block.
let block = self.free_head;
let observed_tail_position = block.as_ref().observed_tail_position();
let required_index = match observed_tail_position {
Some(i) => i,
None => return,
};
if required_index > self.index {
return;
}
// We may read the next pointer with `Relaxed` ordering as it is
// guaranteed that the `reclaim_blocks` routine trails the `recv`
// routine. Any memory accessed by `reclaim_blocks` has already
// been acquired by `recv`.
let next_block = block.as_ref().load_next(Relaxed);
// Update the free list head
self.free_head = next_block.unwrap();
// Push the emptied block onto the back of the queue, making it
// available to senders.
tx.reclaim_block(block);
}
thread::yield_now();
}
}
/// Effectively `Drop` all the blocks. Should only be called once, when
/// the list is dropping.
pub(super) unsafe fn free_blocks(&mut self) {
debug_assert_ne!(self.free_head, NonNull::dangling());
let mut cur = Some(self.free_head);
#[cfg(debug_assertions)]
{
// to trigger the debug assert above so as to catch that we
// don't call `free_blocks` more than once.
self.free_head = NonNull::dangling();
self.head = NonNull::dangling();
}
while let Some(block) = cur {
cur = block.as_ref().load_next(Relaxed);
drop(Box::from_raw(block.as_ptr()));
}
}
}
impl<T> fmt::Debug for Rx<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Rx")
.field("head", &self.head)
.field("index", &self.index)
.field("free_head", &self.free_head)
.finish()
}
}
| 32.326471 | 87 | 0.540351 |
712842b46d08718769924ec8c79e2021863d774b
| 646 |
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use isin;
// const ISIN_STRINGS: [&str; 3] = [
// "AA0000000005", // The least taxing input for the functional style because digit expansion is rarely needed
// "US0378331005", // A typical input (this is the payload for the Apple (AAPL) commons stock ISIN)
// "ZZZZZZZZZZZ5", // The most taxing input for the functional style because digit expansion is maximized
// ];
fn bench_parses(c: &mut Criterion) {
c.bench_function("x", |b| b.iter(|| isin::parse(black_box("US0378331005"))));
}
criterion_group!(benches, bench_parses);
criterion_main!(benches);
| 38 | 114 | 0.716718 |
72043938f53511f0e441e6c2b8bd6e1045d376a3
| 1,576 |
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a test that the `#![feature(nll)]` opt-in overrides the
// migration mode. The intention here is to emulate the goal behavior
// that `--edition 2018` effects on borrowck (modeled here by `-Z
// borrowck=migrate`) are themselves overridden by the
// `#![feature(nll)]` opt-in.
//
// Therefore, for developer convenience, under `#[feature(nll)]` the
// NLL checks will be emitted as errors *even* in the presence of `-Z
// borrowck=migrate`.
// revisions: zflag edition
// [zflag]compile-flags: -Z borrowck=migrate
// [edition]compile-flags: --edition 2018
#![feature(nll)]
fn main() {
match Some(&4) {
None => {},
ref mut foo
if {
(|| { let bar = foo; bar.take() })();
//[zflag]~^ ERROR cannot move out of borrowed content [E0507]
//[edition]~^^ ERROR cannot move out of borrowed content [E0507]
false
} => {},
Some(ref _s) => println!("Note this arm is bogus; the `Some` became `None` in the guard."),
_ => println!("Here is some supposedly unreachable code."),
}
}
| 38.439024 | 99 | 0.640228 |
1c4d53701734676556b253c0b66c147d2f419ee2
| 2,393 |
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(test)]
use {
anyhow::Error,
fuchsia_async as fasync,
fuchsia_component::{client, fuchsia_single_component_package_url, server::ServiceFs},
fuchsia_zircon as zx,
futures::prelude::*,
};
const NETWORK_SPEED_TEST_URL: &'static str =
fuchsia_single_component_package_url!("network-speed-test");
fn start_fake_loader(stream: fidl_fuchsia_net_http::LoaderRequestStream) {
fasync::spawn(async move {
stream
.err_into()
.try_for_each_concurrent(None, |message| async move {
match message {
fidl_fuchsia_net_http::LoaderRequest::Fetch { responder, .. } => {
let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?;
responder.send(fidl_fuchsia_net_http::Response {
error: None,
body: Some(rx),
final_url: Some("http://www.test.com".as_bytes().to_vec()),
status_code: Some(200),
status_line: Some("ok".as_bytes().to_vec()),
headers: None,
redirect: None,
})?;
fasync::spawn(async move {
for i in 0..100 {
let _ =
tx.write(&std::iter::repeat(i).take(100).collect::<Vec<u8>>());
}
});
Ok(())
}
_ => Err(anyhow::anyhow!("Unhandled")),
}
})
.await
.unwrap()
});
}
#[fasync::run_singlethreaded(test)]
async fn test_run_network_speed_test() -> Result<(), Error> {
let mut fs = ServiceFs::new();
fs.add_fidl_service(start_fake_loader);
let env = fs.create_salted_nested_environment("network-speed-test_integration_test_env")?;
fasync::spawn(fs.collect());
assert!(client::AppBuilder::new(NETWORK_SPEED_TEST_URL)
.arg("-u")
.arg("http://www.test.com")
.status(env.launcher())?
.await?
.success());
Ok(())
}
| 33.704225 | 99 | 0.506059 |
bf24045387187265fc0ddacac93488593fb2084f
| 28,611 |
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::account_address::AccountAddress;
use crate::block_metadata::BlockMetadata;
use crate::genesis_config::{ChainId, ConsensusStrategy};
use crate::language_storage::CORE_CODE_ADDRESS;
use crate::transaction::SignedUserTransaction;
use crate::U256;
use bcs_ext::Sample;
use serde::de::Error;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub use starcoin_accumulator::accumulator_info::AccumulatorInfo;
use starcoin_crypto::hash::{ACCUMULATOR_PLACEHOLDER_HASH, SPARSE_MERKLE_PLACEHOLDER_HASH};
use starcoin_crypto::{
hash::{CryptoHash, CryptoHasher, PlainCryptoHash},
HashValue,
};
use starcoin_vm_types::account_config::genesis_address;
use starcoin_vm_types::transaction::authenticator::AuthenticationKey;
use std::fmt::Formatter;
/// Type for block number.
pub type BlockNumber = u64;
/// Type for block header extra
#[derive(Clone, Default, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct BlockHeaderExtra([u8; 4]);
impl BlockHeaderExtra {
pub fn new(extra: [u8; 4]) -> Self {
Self(extra)
}
pub fn to_vec(&self) -> Vec<u8> {
self.0.to_vec()
}
pub fn as_slice(&self) -> &[u8; 4] {
&self.0
}
}
impl std::fmt::Display for BlockHeaderExtra {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<'de> Deserialize<'de> for BlockHeaderExtra {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = <String>::deserialize(deserializer)?;
let literal = s.strip_prefix("0x").unwrap_or(&s);
if literal.len() != 8 {
return Err(D::Error::custom("Invalid block header extra len"));
}
let result = hex::decode(literal).map_err(D::Error::custom)?;
if result.len() != 4 {
return Err(D::Error::custom("Invalid block header extra len"));
}
let mut extra = [0u8; 4];
extra.copy_from_slice(&result);
Ok(BlockHeaderExtra::new(extra))
} else {
#[derive(::serde::Deserialize)]
#[serde(rename = "BlockHeaderExtra")]
struct Value([u8; 4]);
let value = Value::deserialize(deserializer)?;
Ok(BlockHeaderExtra::new(value.0))
}
}
}
impl Serialize for BlockHeaderExtra {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
if serializer.is_human_readable() {
format!("0x{}", hex::encode(self.0)).serialize(serializer)
} else {
serializer.serialize_newtype_struct("BlockHeaderExtra", &self.0)
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Deserialize, Serialize)]
pub struct BlockIdAndNumber {
pub id: HashValue,
pub number: BlockNumber,
}
impl BlockIdAndNumber {
pub fn new(id: HashValue, number: BlockNumber) -> Self {
Self { id, number }
}
pub fn id(&self) -> HashValue {
self.id
}
pub fn number(&self) -> BlockNumber {
self.number
}
}
impl From<BlockHeader> for BlockIdAndNumber {
fn from(header: BlockHeader) -> Self {
Self {
id: header.id(),
number: header.number(),
}
}
}
/// block timestamp allowed future times
pub const ALLOWED_FUTURE_BLOCKTIME: u64 = 30000; // 30 second;
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, CryptoHasher, CryptoHash)]
pub struct BlockHeader {
#[serde(skip)]
id: Option<HashValue>,
/// Parent hash.
parent_hash: HashValue,
/// Block timestamp.
timestamp: u64,
/// Block number.
number: BlockNumber,
/// Block author.
author: AccountAddress,
/// Block author auth key.
author_auth_key: Option<AuthenticationKey>,
/// The transaction accumulator root hash after executing this block.
txn_accumulator_root: HashValue,
/// The parent block info's block accumulator root hash.
block_accumulator_root: HashValue,
/// The last transaction state_root of this block after execute.
state_root: HashValue,
/// Gas used for contracts execution.
gas_used: u64,
/// Block difficulty
difficulty: U256,
/// hash for block body
body_hash: HashValue,
/// The chain id
chain_id: ChainId,
/// Consensus nonce field.
nonce: u32,
/// block header extra
extra: BlockHeaderExtra,
}
impl BlockHeader {
pub fn new(
parent_hash: HashValue,
timestamp: u64,
number: BlockNumber,
author: AccountAddress,
author_auth_key: Option<AuthenticationKey>,
txn_accumulator_root: HashValue,
block_accumulator_root: HashValue,
state_root: HashValue,
gas_used: u64,
difficulty: U256,
body_hash: HashValue,
chain_id: ChainId,
nonce: u32,
extra: BlockHeaderExtra,
) -> BlockHeader {
let mut header = BlockHeader {
id: None,
parent_hash,
block_accumulator_root,
number,
timestamp,
author,
author_auth_key,
txn_accumulator_root,
state_root,
gas_used,
difficulty,
nonce,
body_hash,
chain_id,
extra,
};
header.id = Some(header.crypto_hash());
header
}
pub fn as_pow_header_blob(&self) -> Vec<u8> {
let mut blob = Vec::new();
let raw_header: RawBlockHeader = self.to_owned().into();
let raw_header_hash = raw_header.crypto_hash();
let mut diff_bytes = [0u8; 32];
raw_header.difficulty.to_big_endian(&mut diff_bytes);
let extend_and_nonce = [0u8; 12];
blob.extend_from_slice(raw_header_hash.to_vec().as_slice());
blob.extend_from_slice(&extend_and_nonce);
blob.extend_from_slice(&diff_bytes);
blob
}
pub fn id(&self) -> HashValue {
self.id
.expect("BlockHeader id should bean Some after init.")
}
pub fn parent_hash(&self) -> HashValue {
self.parent_hash
}
pub fn timestamp(&self) -> u64 {
self.timestamp
}
pub fn number(&self) -> BlockNumber {
self.number
}
pub fn author(&self) -> AccountAddress {
self.author
}
pub fn author_auth_key(&self) -> Option<AuthenticationKey> {
self.author_auth_key
}
pub fn txn_accumulator_root(&self) -> HashValue {
self.txn_accumulator_root
}
pub fn state_root(&self) -> HashValue {
self.state_root
}
pub fn gas_used(&self) -> u64 {
self.gas_used
}
pub fn nonce(&self) -> u32 {
self.nonce
}
pub fn difficulty(&self) -> U256 {
self.difficulty
}
pub fn block_accumulator_root(&self) -> HashValue {
self.block_accumulator_root
}
pub fn body_hash(&self) -> HashValue {
self.body_hash
}
pub fn chain_id(&self) -> ChainId {
self.chain_id
}
pub fn extra(&self) -> &BlockHeaderExtra {
&self.extra
}
pub fn is_genesis(&self) -> bool {
self.number == 0
}
pub fn genesis_block_header(
parent_hash: HashValue,
timestamp: u64,
txn_accumulator_root: HashValue,
state_root: HashValue,
difficulty: U256,
body_hash: HashValue,
chain_id: ChainId,
) -> Self {
Self::new(
parent_hash,
timestamp,
0,
CORE_CODE_ADDRESS,
None,
txn_accumulator_root,
*ACCUMULATOR_PLACEHOLDER_HASH,
state_root,
0,
difficulty,
body_hash,
chain_id,
0,
BlockHeaderExtra::default(),
)
}
pub fn random() -> Self {
Self::new(
HashValue::random(),
rand::random(),
rand::random(),
AccountAddress::random(),
None,
HashValue::random(),
HashValue::random(),
HashValue::random(),
rand::random(),
U256::max_value(),
HashValue::random(),
ChainId::test(),
0,
BlockHeaderExtra([0u8; 4]),
)
}
pub fn as_builder(&self) -> BlockHeaderBuilder {
BlockHeaderBuilder::new_with(self.clone())
}
}
impl<'de> Deserialize<'de> for BlockHeader {
fn deserialize<D>(deserializer: D) -> Result<Self, <D as Deserializer<'de>>::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(rename = "BlockHeader")]
struct BlockHeaderData {
parent_hash: HashValue,
timestamp: u64,
number: BlockNumber,
author: AccountAddress,
author_auth_key: Option<AuthenticationKey>,
txn_accumulator_root: HashValue,
block_accumulator_root: HashValue,
state_root: HashValue,
gas_used: u64,
difficulty: U256,
body_hash: HashValue,
chain_id: ChainId,
nonce: u32,
extra: BlockHeaderExtra,
}
let header = BlockHeaderData::deserialize(deserializer)?;
Ok(Self::new(
header.parent_hash,
header.timestamp,
header.number,
header.author,
header.author_auth_key,
header.txn_accumulator_root,
header.block_accumulator_root,
header.state_root,
header.gas_used,
header.difficulty,
header.body_hash,
header.chain_id,
header.nonce,
header.extra,
))
}
}
impl Default for BlockHeader {
fn default() -> Self {
Self::new(
HashValue::zero(),
0,
0,
AccountAddress::ZERO,
None,
HashValue::zero(),
HashValue::zero(),
HashValue::zero(),
0,
0.into(),
HashValue::zero(),
ChainId::test(),
0,
BlockHeaderExtra([0u8; 4]),
)
}
}
impl Sample for BlockHeader {
fn sample() -> Self {
Self::new(
HashValue::zero(),
1610110515000,
0,
genesis_address(),
None,
*ACCUMULATOR_PLACEHOLDER_HASH,
*ACCUMULATOR_PLACEHOLDER_HASH,
*SPARSE_MERKLE_PLACEHOLDER_HASH,
0,
U256::from(1),
BlockBody::sample().crypto_hash(),
ChainId::test(),
0,
BlockHeaderExtra([0u8; 4]),
)
}
}
impl Into<RawBlockHeader> for BlockHeader {
fn into(self) -> RawBlockHeader {
RawBlockHeader {
parent_hash: self.parent_hash,
timestamp: self.timestamp,
number: self.number,
author: self.author,
author_auth_key: self.author_auth_key,
accumulator_root: self.txn_accumulator_root,
parent_block_accumulator_root: self.block_accumulator_root,
state_root: self.state_root,
gas_used: self.gas_used,
difficulty: self.difficulty,
body_hash: self.body_hash,
chain_id: self.chain_id,
}
}
}
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct RawBlockHeader {
/// Parent hash.
pub parent_hash: HashValue,
/// Block timestamp.
pub timestamp: u64,
/// Block number.
pub number: BlockNumber,
/// Block author.
pub author: AccountAddress,
/// Block author auth key.
pub author_auth_key: Option<AuthenticationKey>,
/// The transaction accumulator root hash after executing this block.
pub accumulator_root: HashValue,
/// The parent block accumulator root hash.
pub parent_block_accumulator_root: HashValue,
/// The last transaction state_root of this block after execute.
pub state_root: HashValue,
/// Gas used for contracts execution.
pub gas_used: u64,
/// Block difficulty
pub difficulty: U256,
/// hash for block body
pub body_hash: HashValue,
/// The chain id
pub chain_id: ChainId,
}
#[derive(Default)]
pub struct BlockHeaderBuilder {
buffer: BlockHeader,
}
impl BlockHeaderBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn random() -> Self {
Self {
buffer: BlockHeader::random(),
}
}
fn new_with(buffer: BlockHeader) -> Self {
Self { buffer }
}
pub fn with_parent_hash(mut self, parent_hash: HashValue) -> Self {
self.buffer.parent_hash = parent_hash;
self
}
pub fn with_timestamp(mut self, timestamp: u64) -> Self {
self.buffer.timestamp = timestamp;
self
}
pub fn with_number(mut self, number: BlockNumber) -> Self {
self.buffer.number = number;
self
}
pub fn with_author(mut self, author: AccountAddress) -> Self {
self.buffer.author = author;
self
}
pub fn with_author_auth_key(mut self, author_auth_key: Option<AuthenticationKey>) -> Self {
self.buffer.author_auth_key = author_auth_key;
self
}
pub fn with_accumulator_root(mut self, accumulator_root: HashValue) -> Self {
self.buffer.txn_accumulator_root = accumulator_root;
self
}
pub fn with_parent_block_accumulator_root(
mut self,
parent_block_accumulator_root: HashValue,
) -> Self {
self.buffer.block_accumulator_root = parent_block_accumulator_root;
self
}
pub fn with_state_root(mut self, state_root: HashValue) -> Self {
self.buffer.state_root = state_root;
self
}
pub fn with_gas_used(mut self, gas_used: u64) -> Self {
self.buffer.gas_used = gas_used;
self
}
pub fn with_difficulty(mut self, difficulty: U256) -> Self {
self.buffer.difficulty = difficulty;
self
}
pub fn with_body_hash(mut self, body_hash: HashValue) -> Self {
self.buffer.body_hash = body_hash;
self
}
pub fn with_chain_id(mut self, chain_id: ChainId) -> Self {
self.buffer.chain_id = chain_id;
self
}
pub fn with_nonce(mut self, nonce: u32) -> Self {
self.buffer.nonce = nonce;
self
}
pub fn with_extra(mut self, extra: BlockHeaderExtra) -> Self {
self.buffer.extra = extra;
self
}
pub fn build(mut self) -> BlockHeader {
self.buffer.id = Some(self.buffer.crypto_hash());
self.buffer
}
}
#[derive(
Default, Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash,
)]
pub struct BlockBody {
/// The transactions in this block.
pub transactions: Vec<SignedUserTransaction>,
/// uncles block header
pub uncles: Option<Vec<BlockHeader>>,
}
impl BlockBody {
pub fn new(transactions: Vec<SignedUserTransaction>, uncles: Option<Vec<BlockHeader>>) -> Self {
Self {
transactions,
uncles,
}
}
pub fn get_txn(&self, index: usize) -> Option<&SignedUserTransaction> {
self.transactions.get(index)
}
/// Just for test
pub fn new_empty() -> BlockBody {
BlockBody {
transactions: Vec::new(),
uncles: None,
}
}
pub fn hash(&self) -> HashValue {
self.crypto_hash()
}
}
impl Into<BlockBody> for Vec<SignedUserTransaction> {
fn into(self) -> BlockBody {
BlockBody {
transactions: self,
uncles: None,
}
}
}
impl Into<Vec<SignedUserTransaction>> for BlockBody {
fn into(self) -> Vec<SignedUserTransaction> {
self.transactions
}
}
impl Sample for BlockBody {
fn sample() -> Self {
Self {
transactions: vec![],
uncles: None,
}
}
}
/// A block, encoded as it is on the block chain.
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct Block {
/// The header of this block.
pub header: BlockHeader,
/// The body of this block.
pub body: BlockBody,
}
impl Block {
pub fn new<B>(header: BlockHeader, body: B) -> Self
where
B: Into<BlockBody>,
{
Block {
header,
body: body.into(),
}
}
pub fn id(&self) -> HashValue {
self.header.id()
}
pub fn header(&self) -> &BlockHeader {
&self.header
}
pub fn transactions(&self) -> &[SignedUserTransaction] {
self.body.transactions.as_slice()
}
pub fn uncles(&self) -> Option<&[BlockHeader]> {
match &self.body.uncles {
Some(uncles) => Some(uncles.as_slice()),
None => None,
}
}
pub fn uncle_ids(&self) -> Vec<HashValue> {
self.uncles()
.map(|uncles| uncles.iter().map(|header| header.id()).collect())
.unwrap_or_default()
}
pub fn into_inner(self) -> (BlockHeader, BlockBody) {
(self.header, self.body)
}
pub fn genesis_block(
parent_hash: HashValue,
timestamp: u64,
accumulator_root: HashValue,
state_root: HashValue,
difficulty: U256,
genesis_txn: SignedUserTransaction,
) -> Self {
let chain_id = genesis_txn.chain_id();
let block_body = BlockBody::new(vec![genesis_txn], None);
let header = BlockHeader::genesis_block_header(
parent_hash,
timestamp,
accumulator_root,
state_root,
difficulty,
block_body.hash(),
chain_id,
);
Self {
header,
body: block_body,
}
}
pub fn to_metadata(&self, parent_gas_used: u64) -> BlockMetadata {
let uncles = self
.body
.uncles
.as_ref()
.map(|uncles| uncles.len() as u64)
.unwrap_or(0);
BlockMetadata::new(
self.header.parent_hash(),
self.header.timestamp,
self.header.author,
self.header.author_auth_key,
uncles,
self.header.number,
self.header.chain_id,
parent_gas_used,
)
}
}
impl std::fmt::Display for Block {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Block{{id:\"{}\", number:\"{}\", parent_id:\"{}\",",
self.id(),
self.header().number(),
self.header().parent_hash()
)?;
if let Some(uncles) = &self.body.uncles {
write!(f, "uncles:[")?;
for uncle in uncles {
write!(f, "\"{}\",", uncle.id())?;
}
write!(f, "],")?;
}
write!(f, "transactions:[")?;
for txn in &self.body.transactions {
write!(f, "\"{}\",", txn.id())?;
}
write!(f, "]}}")
}
}
impl Sample for Block {
fn sample() -> Self {
Self {
header: BlockHeader::sample(),
body: BlockBody::sample(),
}
}
}
/// `BlockInfo` is the object we store in the storage. It consists of the
/// block as well as the execution result of this block.
#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct BlockInfo {
/// Block id
pub block_id: HashValue,
/// The total difficulty.
pub total_difficulty: U256,
/// The transaction accumulator info
pub txn_accumulator_info: AccumulatorInfo,
/// The block accumulator info.
pub block_accumulator_info: AccumulatorInfo,
}
impl BlockInfo {
pub fn new(
block_id: HashValue,
total_difficulty: U256,
txn_accumulator_info: AccumulatorInfo,
block_accumulator_info: AccumulatorInfo,
) -> Self {
Self {
block_id,
total_difficulty,
txn_accumulator_info,
block_accumulator_info,
}
}
pub fn id(&self) -> HashValue {
self.crypto_hash()
}
pub fn get_total_difficulty(&self) -> U256 {
self.total_difficulty
}
pub fn get_block_accumulator_info(&self) -> &AccumulatorInfo {
&self.block_accumulator_info
}
pub fn get_txn_accumulator_info(&self) -> &AccumulatorInfo {
&self.txn_accumulator_info
}
pub fn block_id(&self) -> &HashValue {
&self.block_id
}
}
impl Sample for BlockInfo {
fn sample() -> Self {
Self {
block_id: BlockHeader::sample().id(),
total_difficulty: 0.into(),
txn_accumulator_info: AccumulatorInfo::sample(),
block_accumulator_info: AccumulatorInfo::sample(),
}
}
}
#[derive(Clone, Debug)]
pub struct BlockTemplate {
/// Parent hash.
pub parent_hash: HashValue,
/// Block timestamp.
pub timestamp: u64,
/// Block number.
pub number: BlockNumber,
/// Block author.
pub author: AccountAddress,
/// Block author auth key.
pub author_auth_key: Option<AuthenticationKey>,
/// The transaction accumulator root hash after executing this block.
pub txn_accumulator_root: HashValue,
/// The block accumulator root hash.
pub block_accumulator_root: HashValue,
/// The last transaction state_root of this block after execute.
pub state_root: HashValue,
/// Gas used for contracts execution.
pub gas_used: u64,
/// hash for block body
pub body_hash: HashValue,
/// body of the block
pub body: BlockBody,
/// The chain id
pub chain_id: ChainId,
/// Block difficulty
pub difficulty: U256,
/// Block consensus strategy
pub strategy: ConsensusStrategy,
}
impl BlockTemplate {
pub fn new(
parent_block_accumulator_root: HashValue,
accumulator_root: HashValue,
state_root: HashValue,
gas_used: u64,
body: BlockBody,
chain_id: ChainId,
difficulty: U256,
strategy: ConsensusStrategy,
block_metadata: BlockMetadata,
) -> Self {
let (parent_hash, timestamp, author, author_auth_key, _, number, _, _) =
block_metadata.into_inner();
Self {
parent_hash,
block_accumulator_root: parent_block_accumulator_root,
timestamp,
number,
author,
author_auth_key,
txn_accumulator_root: accumulator_root,
state_root,
gas_used,
body_hash: body.hash(),
body,
chain_id,
difficulty,
strategy,
}
}
pub fn into_block(self, nonce: u32, extra: BlockHeaderExtra) -> Block {
let header = BlockHeader::new(
self.parent_hash,
self.timestamp,
self.number,
self.author,
self.author_auth_key,
self.txn_accumulator_root,
self.block_accumulator_root,
self.state_root,
self.gas_used,
self.difficulty,
self.body_hash,
self.chain_id,
nonce,
extra,
);
Block {
header,
body: self.body,
}
}
pub fn as_raw_block_header(&self) -> RawBlockHeader {
RawBlockHeader {
parent_hash: self.parent_hash,
timestamp: self.timestamp,
number: self.number,
author: self.author,
author_auth_key: self.author_auth_key,
accumulator_root: self.txn_accumulator_root,
parent_block_accumulator_root: self.block_accumulator_root,
state_root: self.state_root,
gas_used: self.gas_used,
body_hash: self.body_hash,
difficulty: self.difficulty,
chain_id: self.chain_id,
}
}
pub fn as_pow_header_blob(&self) -> Vec<u8> {
let mut blob = Vec::new();
let raw_header = self.as_raw_block_header();
let raw_header_hash = raw_header.crypto_hash();
let mut dh = [0u8; 32];
raw_header.difficulty.to_big_endian(&mut dh);
let extend_and_nonce = [0u8; 12];
blob.extend_from_slice(raw_header_hash.to_vec().as_slice());
blob.extend_from_slice(&extend_and_nonce);
blob.extend_from_slice(&dh);
blob
}
pub fn into_block_header(self, nonce: u32, extra: BlockHeaderExtra) -> BlockHeader {
BlockHeader::new(
self.parent_hash,
self.timestamp,
self.number,
self.author,
self.author_auth_key,
self.txn_accumulator_root,
self.block_accumulator_root,
self.state_root,
self.gas_used,
self.difficulty,
self.body_hash,
self.chain_id,
nonce,
extra,
)
}
pub fn from_block(block: Block, strategy: ConsensusStrategy) -> Self {
BlockTemplate {
parent_hash: block.header().parent_hash,
block_accumulator_root: block.header().block_accumulator_root(),
timestamp: block.header().timestamp,
number: block.header().number,
author: block.header().author,
author_auth_key: block.header().author_auth_key,
txn_accumulator_root: block.header().txn_accumulator_root,
state_root: block.header().state_root,
gas_used: block.header().gas_used,
body: block.body,
body_hash: block.header.body_hash,
chain_id: block.header.chain_id,
difficulty: block.header.difficulty,
strategy,
}
}
}
#[derive(Clone, Debug, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)]
pub struct ExecutedBlock {
pub block: Block,
pub block_info: BlockInfo,
}
impl ExecutedBlock {
pub fn new(block: Block, block_info: BlockInfo) -> Self {
ExecutedBlock { block, block_info }
}
pub fn total_difficulty(&self) -> U256 {
self.block_info.total_difficulty
}
pub fn block(&self) -> &Block {
&self.block
}
pub fn block_info(&self) -> &BlockInfo {
&self.block_info
}
pub fn header(&self) -> &BlockHeader {
self.block.header()
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlockSummary {
pub block_header: BlockHeader,
pub uncles: Vec<BlockHeader>,
}
impl BlockSummary {
pub fn uncles(&self) -> &[BlockHeader] {
&self.uncles
}
pub fn header(&self) -> &BlockHeader {
&self.block_header
}
}
impl From<Block> for BlockSummary {
fn from(block: Block) -> Self {
Self {
block_header: block.header,
uncles: block.body.uncles.unwrap_or_default(),
}
}
}
impl Into<(BlockHeader, Vec<BlockHeader>)> for BlockSummary {
fn into(self) -> (BlockHeader, Vec<BlockHeader>) {
(self.block_header, self.uncles)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct UncleSummary {
/// total uncle
pub uncles: u64,
/// sum(number of the block which contain uncle block - uncle parent block number).
pub sum: u64,
pub avg: u64,
pub time_sum: u64,
pub time_avg: u64,
}
impl UncleSummary {
pub fn new(uncles: u64, sum: u64, time_sum: u64) -> Self {
let (avg, time_avg) = (
sum.checked_div(uncles).unwrap_or_default(),
time_sum.checked_div(uncles).unwrap_or_default(),
);
Self {
uncles,
sum,
avg,
time_sum,
time_avg,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct EpochUncleSummary {
/// epoch number
pub epoch: u64,
pub number_summary: UncleSummary,
pub epoch_summary: UncleSummary,
}
impl EpochUncleSummary {
pub fn new(epoch: u64, number_summary: UncleSummary, epoch_summary: UncleSummary) -> Self {
Self {
epoch,
number_summary,
epoch_summary,
}
}
}
| 27.068117 | 100 | 0.576981 |
de61ca2a8421c0d60fd0f69de8aa59e881df953b
| 8,039 |
use nom::error::ErrorKind;
use std::fmt;
/// Parser error that can print itself in a human-readable format.
#[derive(Clone, PartialEq)]
pub struct PrettyParseError<'a> {
/// Inner error
pub error: ParseError,
/// Input to the parser
pub input: &'a str,
/// Remaining input after partially tokenizing
pub remaining: &'a str,
}
/// Simple offset calculator to determine where to place the carrot for indicating an error.
fn offset(input: &str, substring: &str) -> usize {
input.len() - substring.len()
}
impl<'a> fmt::Debug for PrettyParseError<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Could not parse route.")?;
f.write_str("\n")?;
let route_str: &str = "Route: ";
f.write_str(route_str)?;
f.write_str(self.input)?;
f.write_str("\n")?;
let offset = offset(self.input, self.remaining);
let offset = offset + self.error.offset;
let pad = (0..offset + route_str.len())
.map(|_| '-')
.collect::<String>();
f.write_str(&format!("{}^", pad))?;
f.write_str("\n")?;
if !self.error.expected.is_empty() {
f.write_str("Expected: ")?;
self.error.expected[..self.error.expected.len() - 1]
.iter()
.try_for_each(|expected| {
<ExpectedToken as fmt::Display>::fmt(expected, f)
.and_then(|_| f.write_str(", "))
})?;
self.error
.expected
.last()
.map(|expected| <ExpectedToken as fmt::Display>::fmt(expected, f))
.transpose()?;
f.write_str("\n")?;
}
if let Some(reason) = self.error.reason {
f.write_str("Reason: ")?;
<ParserErrorReason as fmt::Display>::fmt(&reason, f)?;
}
Ok(())
}
}
/// Error for parsing the route
#[derive(Debug, Clone, PartialEq)]
pub struct ParseError {
/// A concrete reason why the parse failed.
pub reason: Option<ParserErrorReason>,
/// Expected token sequences
pub expected: Vec<ExpectedToken>,
/// Additional offset for failures within sub-parsers.
/// Eg. if `{` parses, but then a bad ident is presented, some offset is needed here then.
pub offset: usize,
}
impl ParseError {
pub(crate) fn expected(expected: ExpectedToken) -> Self {
ParseError {
reason: None,
expected: vec![expected],
offset: 0,
}
}
}
impl nom::error::ParseError<&str> for ParseError {
fn from_error_kind(_input: &str, _kind: ErrorKind) -> Self {
ParseError {
reason: None,
expected: vec![],
offset: 0,
}
}
fn append(_input: &str, _kind: ErrorKind, other: Self) -> Self {
other
}
fn or(mut self, other: Self) -> Self {
// It is assumed that there aren't duplicates.
self.expected.extend(other.expected);
ParseError {
reason: other.reason.or(self.reason), // Take the right most reason
expected: self.expected,
offset: other.offset, /* Defer to the "other"'s offset. TODO it might make sense if the offsets are different, only show the other's "expected". */
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ExpectedToken {
/// /
Separator,
/// specific string.
Literal,
/// ?
QueryBegin,
/// &
QuerySeparator,
/// \#
FragmentBegin,
/// !
End,
/// identifier within {}
Ident,
/// {
OpenBracket,
/// }
CloseBracket,
/// =
Equals,
/// *
Star,
/// :
Colon,
}
impl fmt::Display for ExpectedToken {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ExpectedToken::Separator => f.write_str("/"),
ExpectedToken::Literal => f.write_str("<literal>"),
ExpectedToken::QueryBegin => f.write_str("?"),
ExpectedToken::QuerySeparator => f.write_str("&"),
ExpectedToken::FragmentBegin => f.write_str("#"),
ExpectedToken::End => f.write_str("!"),
ExpectedToken::Ident => f.write_str("<ident>"),
ExpectedToken::OpenBracket => f.write_str("{"),
ExpectedToken::CloseBracket => f.write_str("}"),
ExpectedToken::Equals => f.write_str("="),
ExpectedToken::Star => f.write_str("*"),
ExpectedToken::Colon => f.write_str(":"),
}
}
}
/// A concrete reason why a parse failed
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ParserErrorReason {
/// Some token encountered after the end token.
TokensAfterEndToken,
/// Two slashes are able to occur next to each other.
DoubleSlash,
/// End after a {}
EndAfterCapture,
/// A & appears before a ?
AndBeforeQuestion,
/// Captures can't be next to each other
AdjacentCaptures,
/// There can only be one question mark in the query section
MultipleQuestions,
/// The provided ident within a capture group could never match with a valid rust identifier.
BadRustIdent(char),
/// A bad literal.
BadLiteral,
/// Invalid state
InvalidState,
/// Can't have capture sections for unit structs/variants
CapturesInUnit,
/// Internal check on valid state transitions
/// This should never actually be created.
NotAllowedStateTransition,
/// Expected a specific token
Expected(ExpectedToken),
}
impl fmt::Display for ParserErrorReason {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParserErrorReason::TokensAfterEndToken => {
f.write_str("Characters appeared after the end token (!).")?;
}
ParserErrorReason::DoubleSlash => {
f.write_str("Two slashes are not allowed to be next to each other (//).")?;
}
ParserErrorReason::AndBeforeQuestion => {
f.write_str("The first query must be indicated with a '?', not a '&'.")?;
}
ParserErrorReason::AdjacentCaptures => {
f.write_str("Capture groups can't be next to each other. There must be some character in between the '}' and '{' characters.")?;
}
ParserErrorReason::InvalidState => {
f.write_str("Library Error: The parser was able to enter into an invalid state.")?;
}
ParserErrorReason::NotAllowedStateTransition => {
f.write_str("Library Error: A state transition was attempted that would put the parser in an invalid state")?;
}
ParserErrorReason::MultipleQuestions => {
f.write_str("There can only be one question mark in the query section. `&` should be used to separate other queries.")?;
}
ParserErrorReason::BadRustIdent(c) => {
f.write_str(&format!(
"The character: '{}' could not be used as a Rust identifier.",
c
))?;
}
ParserErrorReason::EndAfterCapture => {
f.write_str("The end token (!) can't appear after a capture ({}).")?;
}
ParserErrorReason::Expected(expected) => {
f.write_str(&format!("Expected: {}", expected))?;
}
ParserErrorReason::BadLiteral => {
f.write_str("Malformed literal.")?;
}
ParserErrorReason::CapturesInUnit => {
f.write_str("Cannot have a capture section for a unit struct or variant.")?;
}
}
Ok(())
}
}
pub(crate) fn get_reason(err: &mut nom::Err<ParseError>) -> &mut Option<ParserErrorReason> {
match err {
nom::Err::Error(err) | nom::Err::Failure(err) => &mut err.reason,
nom::Err::Incomplete(_) => panic!("Incomplete not possible"),
}
}
| 33.495833 | 159 | 0.561637 |
08608c9af851f170ff5639c13a5369dcf4c245c6
| 12,199 |
/*!
For Rust-to-Rust ffi,
with a focus on creating libraries loaded at program startup,
and with load-time type-checking.
This library allows defining Rust libraries that can be loaded at runtime,
even if they were built with a different Rust version than the crate that depends on it.
These are some usecases for this library:
- Converting a Rust dependency tree from compiling statically into a single binary,
into one binary (and potentially) many dynamic libraries,
allowing separate re-compilation on changes.
- Creating a plugin system (without support for unloading).
# Features
Currently this library has these features:
- Features the [`sabi_trait`] attribute macro, for creating ffi-safe trait objects.
- Ffi-safe equivalent of some trait objects with [`DynTrait`].
- Provides ffi-safe alternatives/wrappers for many standard library types,
in the [`std_types`] module.
- Provides ffi-safe wrappers for some types defined in external crates,
in the [`external_types`] module.
- Provides the [`StableAbi`] trait for asserting that types are ffi-safe.
- The [prefix types] feature for building extensible modules and vtables,
without breaking ABI compatibility.
- Supports ffi-safe [nonexhaustive enums], wrapped in [`NonExhaustive`].
- Checking at load-time that the types in the dynamic library have the expected layout,
allowing for semver compatible changes while checking the layout of types.
- Provides the [`StableAbi` derive] macro
to both assert that the type is ffi compatible,
and to get the layout of the type at load-time to check that it is still compatible.
# Examples
For **examples** of using `abi_stable` you can look at [the readme example],
or for the crates in the examples directory in the repository for this crate.
This crate also has examples in the docs for most features.
To run the example crates you'll generally have to build the `*_impl` crate,
then run the `*_user` crate (all `*_user` crates should have a help message and a readme.md).
# Cargo Features
If it becomes possible to disable build scripts,
you can manually enable support for Rust past 1.41.0 features with the `rust_*_*` cargo features.
These are default cargo features that enable optional crates :
- "channels":
Depends on `crossbeam-channel`,
wrapping channels from it for ffi in `abi_stable::external_types::crossbeam_channel` .
- "serde_json":
Depends on `serde_json`,
providing ffi-safe equivalents of
`&serde_json::value::RawValue` and `Box<serde_json::value::RawValue>`,
in `abi_stable::external_types::serde_json` .
To disable the default features use:
```text
[dependencies.abi_stable]
version = "<current_version>"
default-features = false
features = [ ]
```
enabling the features you need in the `features` array.
### Manually enabled
These are features to manually enable support for newer language features,
required until this library is updated to automatically detect them,
every one of which has a `nightly_*` equivalent.
Features:
- "const_params":
Enables impls which require using const generics,
including implementing StableAbi for arrays of all lengths, requires a Rust version
where const generics are stable.
- "nightly_const_params":
Enables impls which require using const generics,
including implementing StableAbi for arrays of all lengths, needed for
nightly Rust versions where const generics are unstable.
# Glossary
`interface crate`:the crate that declares the public functions, types, and traits that
are necessary to load the library at runtime.
`ìmplementation crate`:A crate that implements all the functions in the interface crate.
`user crate`:A crate that depends on an `interface crate` and
loads 1 or more `ìmplementation crate`s for it.
`module`:refers to a struct of function pointers and other static values.
The root module implement the [`RootModule`] trait.
These are declared in the `interface crate`,exported in the `implementation crate`,
and loaded in the `user crate`.
# Rust-to-Rust FFI types.
Types must implement [`StableAbi`] to be safely passed through the FFI boundary,
which can be done using the [`StableAbi` derive] macro.
For how to evolve dynamically loaded libraries you can look at the [library_evolution] module.
These are the kinds of types passed through FFI:
- Value kind:<br>
This is the default kind when deriving StableAbi.
The layout of these types must not change in a minor versions.
- [Nonexhaustive enums] :<br>
Enums wrapped inside [`NonExhaustive`],
which can add variants in minor versions of the library.
- [Trait objects] :<br>
Trait object-like types generated using the [`sabi_trait`] attribute macro,
which erase the type of the value they wrap,implements the methods of the trait,
and can only be unwrapped back to the original type in the dynamic library/binary
that created it.
- Opaque kind:<br>
Types wrapped in [`DynTrait`],
whose layout can change in any version of the library,
and can only be unwrapped back to the original type in the dynamic library/binary
that created it.
- [Prefix types] :<br>
Types only accessible through some custom pointer types,
most commonly vtables and modules,
which can be extended in minor versions while staying ABI compatible,
by adding fields at the end.
# Extra documentation
- [Unsafe code guidelines] :<br>
Describes how to write unsafe code ,relating to this library.
- [Troubleshooting] :<br>
Some problems and their solutions.
# Macros (derive and attribute)
- [`sabi_trait`] attribute macro:<br>
For generating ffi-safe trait objects.
- [`StableAbi` derive] :<br>
For asserting abi-stability of a type,
and obtaining the layout of the type at runtime.
- [Nonexhaustive enums] :<br>
Details for how to declare nonexhaustive enums.
- [Prefix types] \(using the StableAbi derive macro):<br>
The method by which *vtables* and *modules* are implemented,
allowing extending them in minor versions of a library.
[`std_types`]: ./std_types/index.html
[`external_types`]: ./external_types/index.html
[prefix types]: ./docs/prefix_types/index.html
[Prefix types]: ./docs/prefix_types/index.html
[nonexhaustive enums]: ./docs/sabi_nonexhaustive/index.html
[Nonexhaustive enums]: ./docs/sabi_nonexhaustive/index.html
[library_evolution]: ./docs/library_evolution/index.html
[`NonExhaustive`]: ./nonexhaustive_enum/struct.NonExhaustive.html
[the readme example]:
https://github.com/rodrimati1992/abi_stable_crates/blob/master/readme.md#readme_example
[`RootModule`]: ./library/trait.RootModule.html
[`StableAbi`]: ./abi_stability/stable_abi_trait/trait.StableAbi.html
[`sabi_trait`]: ./docs/sabi_trait_attribute/index.html
[Trait objects]: ./docs/sabi_trait_attribute/index.html
[`StableAbi` derive]: ./docs/stable_abi_derive/index.html
[`DynTrait`]: ./struct.DynTrait.html
[Troubleshooting]: ./docs/troubleshooting/index.html
[Unsafe code guidelines]: ./docs/unsafe_code_guidelines/index.html
*/
// `improper_ctypes` is way too noisy of a lint,
// every single warning was a false positive.
// the true positives are caught by the StableAbi trait.
#![allow(improper_ctypes)]
#![allow(improper_ctypes_definitions)]
#![allow(unused_unsafe)]
#![allow(non_camel_case_types)]
#![deny(unused_must_use)]
#![warn(rust_2018_idioms)]
#![allow(clippy::declare_interior_mutable_const)]
#![allow(clippy::needless_doctest_main)]
#![allow(clippy::redundant_closure_call)]
#![allow(clippy::suspicious_assignment_formatting)]
#![allow(clippy::zero_prefixed_literal)]
#![allow(clippy::type_complexity)]
// This lint is telling me to use `#[non_exhaustive]` for structs that will never change,
// that is very silly.
#![allow(clippy::manual_non_exhaustive)]
#![allow(clippy::ptr_offset_with_cast)]
#![allow(clippy::empty_loop)]
#![deny(clippy::missing_safety_doc)]
// Left here for nightly Rust users before this got stabilized.
// Necessary for array impls of all sizes.
#![cfg_attr(
feature="nightly_const_params",
feature(min_const_generics)
)]
#![cfg_attr(feature = "docsrs", feature(doc_cfg))]
#[allow(unused_imports)]
#[cfg(test)]
use abi_stable_shared::file_span;
#[macro_use]
extern crate serde_derive;
#[macro_use(StableAbi)]
extern crate abi_stable_derive;
extern crate self as abi_stable;
#[doc(inline)]
pub use abi_stable_derive::{
StableAbi,
GetStaticEquivalent,
};
#[doc(inline)]
pub use abi_stable_derive::{
sabi_trait,
sabi_extern_fn,
};
#[doc(inline)]
pub use abi_stable_derive::export_root_module;
use abi_stable_derive::{
impl_InterfaceType,
};
#[doc(hidden)]
pub use abi_stable_derive::{
get_root_module_static,
};
#[macro_use]
mod impls;
#[macro_use]
mod internal_macros;
#[macro_use]
mod macros;
#[cfg(test)]
#[macro_use]
mod test_macros;
#[cfg(feature = "testing")]
#[macro_use]
pub mod test_utils;
#[cfg(test)]
mod misc_tests;
#[macro_use]
pub mod utils;
#[macro_use]
pub mod const_utils;
#[macro_use]
pub mod traits;
pub mod for_examples;
#[macro_use]
pub mod abi_stability;
#[macro_use]
pub mod erased_types;
pub mod external_types;
#[macro_use]
pub mod library;
pub mod marker_type;
mod multikey_map;
pub mod nonexhaustive_enum;
pub mod pointer_trait;
pub mod prefix_type;
pub mod type_layout;
pub mod inline_storage;
#[doc(hidden)]
pub mod derive_macro_reexports;
// `pmr` is what I call "private" reexport for macros in newer crates.
#[doc(hidden)]
pub use self::derive_macro_reexports as pmr;
pub mod std_types;
pub mod sabi_types;
pub mod reflection;
pub mod type_level;
pub mod docs;
pub mod sabi_trait;
/// The header used to identify the version number of abi_stable
/// that a dynamic libraries uses.
pub static LIB_HEADER:library::AbiHeader=library::AbiHeader::VALUE;
/// Miscelaneous items re-exported from core_extensions.
pub mod reexports{
pub use core_extensions::SelfOps;
pub use core_extensions::type_level_bool::{True, False};
}
#[doc(hidden)]
pub const ABI_STABLE_VERSION:sabi_types::VersionStrings=package_version_strings!();
/*
I am using this static as the `identity` of this dynamic library/executable,
this assumes that private static variables don't get merged between
Rust dynamic libraries that have a different global allocator.
If the address of this is the same among dynamic libraries that have *different*
allocators,please create an issue for this.
*/
use std::sync::atomic::AtomicUsize;
static EXECUTABLE_IDENTITY: AtomicUsize = AtomicUsize::new(1);
#[doc(inline)]
pub use crate::{
abi_stability::StableAbi,
erased_types::{DynTrait,ImplType, InterfaceType},
};
#[doc(hidden)]
pub mod globals{
use crate::{
abi_stability::{
abi_checking::{check_layout_compatibility_for_ffi},
},
sabi_types::LateStaticRef,
std_types::{RResult,RBoxError},
type_layout::TypeLayout,
utils::leak_value,
};
#[repr(C)]
#[derive(StableAbi)]
// #[sabi(debug_print)]
pub struct Globals{
pub layout_checking:
extern "C" fn(&'static TypeLayout,&'static TypeLayout) -> RResult<(), RBoxError> ,
}
impl Globals{
pub fn new()->&'static Self{
leak_value(Globals{
layout_checking:check_layout_compatibility_for_ffi,
})
}
}
pub(crate)static GLOBALS:LateStaticRef<&Globals>=LateStaticRef::new();
#[inline(never)]
pub fn initialized_globals()->&'static Globals{
GLOBALS.init(|| Globals::new() )
}
#[inline(never)]
pub extern "C" fn initialize_globals_with(globs:&'static Globals){
GLOBALS.init(|| globs );
}
}
#[cfg(all(test, not(feature = "testing")))]
compile_error! { "tests must be run with the \"testing\" feature" }
#[cfg(miri)]
extern "Rust" {
/// Miri-provided extern function to mark the block `ptr` points to as a "root"
/// for some static memory. This memory and everything reachable by it is not
/// considered leaking even if it still exists when the program terminates.
///
/// `ptr` has to point to the beginning of an allocated block.
fn miri_static_root(ptr: *const u8);
}
| 28.435897 | 97 | 0.739241 |
2951f5128e07402229b190d5fef9a64b4324d575
| 165 |
#![crate_name = "foo"]
include!("primitive/primitive-generic-impl.rs");
// @has foo/primitive.i32.html '//div[@id="impl-ToString"]//code' 'impl<T> ToString for T'
| 27.5 | 90 | 0.672727 |
624a4361d7d24a1789721582480e776a19bafee7
| 1,706 |
pub fn compute() {
let mut sprawl = sprawl::Sprawl::new();
let node0 = sprawl
.new_node(
sprawl::style::Style {
flex_grow: 1f32,
flex_basis: sprawl::style::Dimension::Points(50f32),
size: sprawl::geometry::Size { height: sprawl::style::Dimension::Points(20f32), ..Default::default() },
..Default::default()
},
&[],
)
.unwrap();
let node1 = sprawl
.new_node(
sprawl::style::Style {
flex_grow: 1f32,
size: sprawl::geometry::Size { height: sprawl::style::Dimension::Points(10f32), ..Default::default() },
..Default::default()
},
&[],
)
.unwrap();
let node2 = sprawl
.new_node(
sprawl::style::Style {
flex_grow: 1f32,
size: sprawl::geometry::Size { height: sprawl::style::Dimension::Points(10f32), ..Default::default() },
..Default::default()
},
&[],
)
.unwrap();
let node = sprawl
.new_node(
sprawl::style::Style {
flex_direction: sprawl::style::FlexDirection::Column,
size: sprawl::geometry::Size {
width: sprawl::style::Dimension::Points(100f32),
height: sprawl::style::Dimension::Points(113f32),
..Default::default()
},
..Default::default()
},
&[node0, node1, node2],
)
.unwrap();
sprawl.compute_layout(node, sprawl::geometry::Size::undefined()).unwrap();
}
| 34.12 | 119 | 0.466589 |
38061606b01e09401ccfa3ce01235ee3efaea45c
| 1,052 |
use arrayvec::ArrayString;
use core::fmt::Write;
pub(crate) const CREATE_MIGRATION_TABLES: &str = concat!(
"CREATE TABLE IF NOT EXISTS _oapth_migration_group (",
oapth_migration_group_columns!(),
"); \
CREATE TABLE IF NOT EXISTS _oapth_migration (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,",
oapth_migration_columns!(),
");"
);
#[oapth_macros::_dev_tools]
#[inline]
pub(crate) async fn clean<B>(back_end: &mut B) -> crate::Result<()>
where
B: crate::BackEnd,
{
let mut buffer: ArrayString<1024> = ArrayString::new();
for table in back_end.tables("").await? {
buffer.write_fmt(format_args!("DROP TABLE {};", table))?;
}
back_end.execute(&buffer).await?;
Ok(())
}
#[inline]
pub(crate) fn tables(_: &str) -> crate::Result<ArrayString<128>> {
let mut buffer = ArrayString::new();
buffer.write_fmt(format_args!(
"SELECT tbl_name generic_column FROM sqlite_master tables WHERE type='table' AND tbl_name NOT LIKE 'sqlite_%';"
))?;
Ok(buffer)
}
| 26.974359 | 115 | 0.697719 |
1deadc7467827f3194472da1a27e21f0ce015e37
| 3,201 |
//! The same behavior as blink, but implemented using interrupts. Don't worry that it looks quite a
//! bit different -- try shaking the device to see that it does blink. Having a lower blink rate
//! would require using a timer that's currently not implemented in the HAL crate.
//!
//! This requires nightly as it uses a statically initialized queue (would be needlessly
//! verbose without const-fn).
#![no_main]
#![no_std]
#![feature(const_fn)]
extern crate panic_semihosting;
use efr32xg1::interrupt;
use cortex_m_rt::entry;
use embedded_hal::blocking::delay::DelayMs;
#[entry]
fn main() -> ! {
let board = thunderboard_sltb001a::Board::new();
let mut leds = board.leds;
let buttons = board.buttons;
let mut delay = board.delay;
let mut pic = board.pic;
let mut nvic = board.nvic;
nvic.enable(efr32xg1::Interrupt::TIMER1);
// Show that nothing bad happens even if we call this too early
cortex_m::peripheral::NVIC::pend(efr32xg1::Interrupt::TIMER1);
// unsafe: The SPSC documentation does it too.
let mut for_timer1 = unsafe { FOR_TIMER1.split().0 };
for_timer1.enqueue((buttons, leds, pic)).ok().unwrap();
// Matter of taste: I rather make sure all the initialization work is done before we really
// start spinning
cortex_m::peripheral::NVIC::pend(efr32xg1::Interrupt::TIMER1);
let mut timer1 = board.timer1;
timer1.enable_outputcompare(0);
timer1.interrupt_enable(efm32gg_hal::timer::InterruptFlag::CC0);
timer1.start();
loop {
}
}
use heapless::spsc::Queue;
use heapless::consts::U1;
use thunderboard_sltb001a::{button::Buttons, pic::PIC, RefCellDelay};
#[cfg(feature = "led-pwm")]
use thunderboard_sltb001a::led_pwm::LEDs;
#[cfg(not(feature = "led-pwm"))]
use thunderboard_sltb001a::led::LEDs;
// Queue along which peripherals are moved into the timer.
// See https://github.com/rust-embedded/wg/issues/294 for future safe directions.
// It would feel a tad more safe to .split() this right away, but the signature 'd get ugly.
static mut FOR_TIMER1: Queue<(Buttons, LEDs, PIC<RefCellDelay>), U1> = Queue::new();
#[interrupt]
fn TIMER1() {
static mut stuff: Option<(Buttons, LEDs, PIC<RefCellDelay>)> = None;
static mut halfcount: i32 = 0;
efm32gg_hal::timer::Timer0::interrupt_unpend(efm32gg_hal::timer::InterruptFlag::CC0);
if let Some((buttons, leds, pic)) = stuff {
let count = *halfcount / 2;
let phase = *halfcount % 2;
match phase {
0 => {
pic.set_leds(count == 0, count == 1, count == 2, count == 3);
leds.led1_off();
if buttons.button1_pressed() {
leds.led0_off();
} else {
leds.led0_on();
}
},
1 => {
if !buttons.button0_pressed() {
leds.led1_on();
}
},
_ => unreachable!(),
}
*halfcount = (*halfcount + 1) % 8;
} else {
// unsafe: The SPSC documentation does it too.
let mut for_timer1 = unsafe { FOR_TIMER1.split().1 };
*stuff = for_timer1.dequeue();
}
}
| 31.693069 | 99 | 0.626679 |
ef8470d36e100a98d19d23ca208f3359dc5c445e
| 38 |
org.jfree.chart.needle.LongNeedleTest
| 19 | 37 | 0.868421 |
eb82804f291a3af06b82ee656a38a4d7d4b9c5b7
| 1,222 |
#[macro_use]
extern crate nom;
use nom::character::streaming::digit1 as digit;
use std::str;
use std::str::FromStr;
named!(
unsigned_float<f32>,
map_res!(
map_res!(
recognize!(alt!(
delimited!(digit, tag!("."), opt!(digit)) | delimited!(opt!(digit), tag!("."), digit)
)),
str::from_utf8
),
FromStr::from_str
)
);
named!(
float<f32>,
map!(
pair!(opt!(alt!(tag!("+") | tag!("-"))), unsigned_float),
|(sign, value): (Option<&[u8]>, f32)| sign
.and_then(|s| if s[0] == b'-' { Some(-1f32) } else { None })
.unwrap_or(1f32) * value
)
);
#[test]
fn unsigned_float_test() {
assert_eq!(unsigned_float(&b"123.456;"[..]), Ok((&b";"[..], 123.456)));
assert_eq!(unsigned_float(&b"0.123;"[..]), Ok((&b";"[..], 0.123)));
assert_eq!(unsigned_float(&b"123.0;"[..]), Ok((&b";"[..], 123.0)));
assert_eq!(unsigned_float(&b"123.;"[..]), Ok((&b";"[..], 123.0)));
assert_eq!(unsigned_float(&b".123;"[..]), Ok((&b";"[..], 0.123)));
}
#[test]
fn float_test() {
assert_eq!(float(&b"123.456;"[..]), Ok((&b";"[..], 123.456)));
assert_eq!(float(&b"+123.456;"[..]), Ok((&b";"[..], 123.456)));
assert_eq!(float(&b"-123.456;"[..]), Ok((&b";"[..], -123.456)));
}
| 26 | 93 | 0.520458 |
23f7b1acb54d470f73c08386288aeca37538f675
| 18,867 |
//! Intrinsics and other functions that the miri engine executes without
//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
//! and miri.
use syntax::symbol::Symbol;
use syntax_pos::Span;
use rustc::ty;
use rustc::ty::layout::{LayoutOf, Primitive, Size};
use rustc::ty::subst::SubstsRef;
use rustc::hir::def_id::DefId;
use rustc::ty::TyCtxt;
use rustc::mir::BinOp;
use rustc::mir::interpret::{InterpResult, Scalar, GlobalId, ConstValue};
use super::{
Machine, PlaceTy, OpTy, InterpCx, ImmTy,
};
mod caller_location;
mod type_name;
fn numeric_intrinsic<'tcx, Tag>(
name: &str,
bits: u128,
kind: Primitive,
) -> InterpResult<'tcx, Scalar<Tag>> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
};
let extra = 128 - size.bits() as u128;
let bits_out = match name {
"ctpop" => bits.count_ones() as u128,
"ctlz" => bits.leading_zeros() as u128 - extra,
"cttz" => (bits << extra).trailing_zeros() as u128 - extra,
"bswap" => (bits << extra).swap_bytes(),
"bitreverse" => (bits << extra).reverse_bits(),
_ => bug!("not a numeric intrinsic: {}", name),
};
Ok(Scalar::from_uint(bits_out, size))
}
/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
crate fn eval_nullary_intrinsic<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId,
substs: SubstsRef<'tcx>,
) -> InterpResult<'tcx, &'tcx ty::Const<'tcx>> {
let tp_ty = substs.type_at(0);
let name = &*tcx.item_name(def_id).as_str();
Ok(match name {
"type_name" => {
let alloc = type_name::alloc_type_name(tcx, tp_ty);
tcx.mk_const(ty::Const {
val: ty::ConstKind::Value(ConstValue::Slice {
data: alloc,
start: 0,
end: alloc.len(),
}),
ty: tcx.mk_static_str(),
})
},
"needs_drop" => ty::Const::from_bool(tcx, tp_ty.needs_drop(tcx, param_env)),
"size_of" |
"min_align_of" |
"pref_align_of" => {
let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
let n = match name {
"pref_align_of" => layout.align.pref.bytes(),
"min_align_of" => layout.align.abi.bytes(),
"size_of" => layout.size.bytes(),
_ => bug!(),
};
ty::Const::from_usize(tcx, n)
},
"type_id" => ty::Const::from_bits(
tcx,
tcx.type_id_hash(tp_ty).into(),
param_env.and(tcx.types.u64),
),
other => bug!("`{}` is not a zero arg intrinsic", other),
})
}
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Returns `true` if emulation happened.
pub fn emulate_intrinsic(
&mut self,
span: Span,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
) -> InterpResult<'tcx, bool> {
let substs = instance.substs;
// We currently do not handle any diverging intrinsics.
let dest = match dest {
Some(dest) => dest,
None => return Ok(false)
};
let intrinsic_name = &*self.tcx.item_name(instance.def_id()).as_str();
match intrinsic_name {
"caller_location" => {
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
let location = self.alloc_caller_location(
Symbol::intern(&caller.file.name.to_string()),
caller.line as u32,
caller.col_display as u32 + 1,
)?;
self.write_scalar(location.ptr, dest)?;
}
"min_align_of" |
"pref_align_of" |
"needs_drop" |
"size_of" |
"type_id" |
"type_name" => {
let gid = GlobalId {
instance,
promoted: None,
};
let val = self.tcx.const_eval(self.param_env.and(gid))?;
let val = self.eval_const_to_op(val, None)?;
self.copy_op(val, dest)?;
}
| "ctpop"
| "cttz"
| "cttz_nonzero"
| "ctlz"
| "ctlz_nonzero"
| "bswap"
| "bitreverse" => {
let ty = substs.type_at(0);
let layout_of = self.layout_of(ty)?;
let val = self.read_scalar(args[0])?.not_undef()?;
let bits = self.force_bits(val, layout_of.size)?;
let kind = match layout_of.abi {
ty::layout::Abi::Scalar(ref scalar) => scalar.value,
_ => throw_unsup!(TypeNotPrimitive(ty)),
};
let out_val = if intrinsic_name.ends_with("_nonzero") {
if bits == 0 {
throw_ub_format!("`{}` called on 0", intrinsic_name);
}
numeric_intrinsic(intrinsic_name.trim_end_matches("_nonzero"), bits, kind)?
} else {
numeric_intrinsic(intrinsic_name, bits, kind)?
};
self.write_scalar(out_val, dest)?;
}
| "wrapping_add"
| "wrapping_sub"
| "wrapping_mul"
| "add_with_overflow"
| "sub_with_overflow"
| "mul_with_overflow" => {
let lhs = self.read_immediate(args[0])?;
let rhs = self.read_immediate(args[1])?;
let (bin_op, ignore_overflow) = match intrinsic_name {
"wrapping_add" => (BinOp::Add, true),
"wrapping_sub" => (BinOp::Sub, true),
"wrapping_mul" => (BinOp::Mul, true),
"add_with_overflow" => (BinOp::Add, false),
"sub_with_overflow" => (BinOp::Sub, false),
"mul_with_overflow" => (BinOp::Mul, false),
_ => bug!("Already checked for int ops")
};
if ignore_overflow {
self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
} else {
self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
}
}
"saturating_add" | "saturating_sub" => {
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let is_add = intrinsic_name == "saturating_add";
let (val, overflowed, _ty) = self.overflowing_binary_op(if is_add {
BinOp::Add
} else {
BinOp::Sub
}, l, r)?;
let val = if overflowed {
let num_bits = l.layout.size.bits();
if l.layout.abi.is_signed() {
// For signed ints the saturated value depends on the sign of the first
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no
// overflow can occur)
let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
let first_term_positive = first_term & (1 << (num_bits-1)) == 0;
if first_term_positive {
// Negative overflow not possible since the positive first term
// can only increase an (in range) negative term for addition
// or corresponding negated positive term for subtraction
Scalar::from_uint((1u128 << (num_bits - 1)) - 1, // max positive
Size::from_bits(num_bits))
} else {
// Positive overflow not possible for similar reason
// max negative
Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
}
} else { // unsigned
if is_add {
// max unsigned
Scalar::from_uint(u128::max_value() >> (128 - num_bits),
Size::from_bits(num_bits))
} else { // underflow to 0
Scalar::from_uint(0u128, Size::from_bits(num_bits))
}
}
} else {
val
};
self.write_scalar(val, dest)?;
}
"unchecked_shl" | "unchecked_shr" => {
let l = self.read_immediate(args[0])?;
let r = self.read_immediate(args[1])?;
let bin_op = match intrinsic_name {
"unchecked_shl" => BinOp::Shl,
"unchecked_shr" => BinOp::Shr,
_ => bug!("Already checked for int ops")
};
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
if overflowed {
let layout = self.layout_of(substs.type_at(0))?;
let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
throw_ub_format!("Overflowing shift by {} in `{}`", r_val, intrinsic_name);
}
self.write_scalar(val, dest)?;
}
"rotate_left" | "rotate_right" => {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let layout = self.layout_of(substs.type_at(0))?;
let val = self.read_scalar(args[0])?.not_undef()?;
let val_bits = self.force_bits(val, layout.size)?;
let raw_shift = self.read_scalar(args[1])?.not_undef()?;
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
let width_bits = layout.size.bits() as u128;
let shift_bits = raw_shift_bits % width_bits;
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
let result_bits = if intrinsic_name == "rotate_left" {
(val_bits << shift_bits) | (val_bits >> inv_shift_bits)
} else {
(val_bits >> shift_bits) | (val_bits << inv_shift_bits)
};
let truncated_bits = self.truncate(result_bits, layout);
let result = Scalar::from_uint(truncated_bits, layout.size);
self.write_scalar(result, dest)?;
}
"ptr_offset_from" => {
let isize_layout = self.layout_of(self.tcx.types.isize)?;
let a = self.read_immediate(args[0])?.to_scalar()?;
let b = self.read_immediate(args[1])?.to_scalar()?;
// Special case: if both scalars are *equal integers*
// and not NULL, we pretend there is an allocation of size 0 right there,
// and their offset is 0. (There's never a valid object at NULL, making it an
// exception from the exception.)
// This is the dual to the special exception for offset-by-0
// in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
if a.is_bits() && b.is_bits() {
let a = a.to_machine_usize(self)?;
let b = b.to_machine_usize(self)?;
if a == b && a != 0 {
self.write_scalar(Scalar::from_int(0, isize_layout.size), dest)?;
return Ok(true);
}
}
// General case: we need two pointers.
let a = self.force_ptr(a)?;
let b = self.force_ptr(b)?;
if a.alloc_id != b.alloc_id {
throw_ub_format!(
"ptr_offset_from cannot compute offset of pointers into different \
allocations.",
);
}
let usize_layout = self.layout_of(self.tcx.types.usize)?;
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
let (val, _overflowed, _ty) = self.overflowing_binary_op(
BinOp::Sub, a_offset, b_offset,
)?;
let pointee_layout = self.layout_of(substs.type_at(0))?;
let val = ImmTy::from_scalar(val, isize_layout);
let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
self.exact_div(val, size, dest)?;
}
"transmute" => {
self.copy_op_transmute(args[0], dest)?;
}
"simd_insert" => {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let elem = args[2];
let input = args[0];
let (len, e_ty) = input.layout.ty.simd_size_and_type(self.tcx.tcx);
assert!(
index < len,
"Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
index, e_ty, len
);
assert_eq!(
input.layout, dest.layout,
"Return type `{}` must match vector type `{}`",
dest.layout.ty, input.layout.ty
);
assert_eq!(
elem.layout.ty, e_ty,
"Scalar element type `{}` must match vector element type `{}`",
elem.layout.ty, e_ty
);
for i in 0..len {
let place = self.place_field(dest, i)?;
let value = if i == index {
elem
} else {
self.operand_field(input, i)?
};
self.copy_op(value, place)?;
}
}
"simd_extract" => {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let (len, e_ty) = args[0].layout.ty.simd_size_and_type(self.tcx.tcx);
assert!(
index < len,
"index `{}` is out-of-bounds of vector type `{}` with length `{}`",
index, e_ty, len
);
assert_eq!(
e_ty, dest.layout.ty,
"Return type `{}` must match vector element type `{}`",
dest.layout.ty, e_ty
);
self.copy_op(self.operand_field(args[0], index)?, dest)?;
}
_ => return Ok(false),
}
Ok(true)
}
/// "Intercept" a function call to a panic-related function
/// because we have something special to do for it.
/// Returns `true` if an intercept happened.
pub fn hook_panic_fn(
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
_dest: Option<PlaceTy<'tcx, M::PointerTag>>,
) -> InterpResult<'tcx, bool> {
let def_id = instance.def_id();
if Some(def_id) == self.tcx.lang_items().panic_fn() {
// &'static str, &core::panic::Location { &'static str, u32, u32 }
assert!(args.len() == 2);
let msg_place = self.deref_operand(args[0])?;
let msg = Symbol::intern(self.read_str(msg_place)?);
let location = self.deref_operand(args[1])?;
let (file, line, col) = (
self.mplace_field(location, 0)?,
self.mplace_field(location, 1)?,
self.mplace_field(location, 2)?,
);
let file_place = self.deref_operand(file.into())?;
let file = Symbol::intern(self.read_str(file_place)?);
let line = self.read_scalar(line.into())?.to_u32()?;
let col = self.read_scalar(col.into())?.to_u32()?;
throw_panic!(Panic { msg, file, line, col })
} else if Some(def_id) == self.tcx.lang_items().begin_panic_fn() {
assert!(args.len() == 2);
// &'static str, &(&'static str, u32, u32)
let msg = args[0];
let place = self.deref_operand(args[1])?;
let (file, line, col) = (
self.mplace_field(place, 0)?,
self.mplace_field(place, 1)?,
self.mplace_field(place, 2)?,
);
let msg_place = self.deref_operand(msg.into())?;
let msg = Symbol::intern(self.read_str(msg_place)?);
let file_place = self.deref_operand(file.into())?;
let file = Symbol::intern(self.read_str(file_place)?);
let line = self.read_scalar(line.into())?.to_u32()?;
let col = self.read_scalar(col.into())?.to_u32()?;
throw_panic!(Panic { msg, file, line, col })
} else {
return Ok(false);
}
}
pub fn exact_div(
&mut self,
a: ImmTy<'tcx, M::PointerTag>,
b: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`.
// First, check x % y != 0.
if self.binary_op(BinOp::Rem, a, b)?.to_bits()? != 0 {
// Then, check if `b` is -1, which is the "min_value / -1" case.
let minus1 = Scalar::from_int(-1, dest.layout.size);
let b = b.to_scalar().unwrap();
if b == minus1 {
throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
} else {
throw_ub_format!(
"exact_div: {} cannot be divided by {} without remainder",
a.to_scalar().unwrap(),
b,
)
}
}
self.binop_ignore_overflow(BinOp::Div, a, b, dest)
}
}
| 43.47235 | 99 | 0.485822 |
87b5d5ca856cd0650c7bbb542ccb0ac000d30118
| 463 |
pub struct Timer {
remaining: f32,
}
impl Timer {
pub fn new(duration: f32) -> Timer {
Timer {
remaining: duration,
}
}
pub fn tick(&mut self, dt: f32) {
self.remaining = (self.remaining - dt).max(0.);
}
pub fn tetra_tick(&mut self, ctx: &tetra::Context) {
self.tick(tetra::time::get_delta_time(ctx).as_secs_f32())
}
pub fn done(&self) -> bool {
self.remaining == 0.
}
}
| 19.291667 | 65 | 0.533477 |
e6f85c08d1246cf06ee0f88ad9935b49cbf83262
| 2,844 |
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for "weak linkage" to symbols on Unix
//!
//! Some I/O operations we do in libstd require newer versions of OSes but we
//! need to maintain binary compatibility with older releases for now. In order
//! to use the new functionality when available we use this module for
//! detection.
//!
//! One option to use here is weak linkage, but that is unfortunately only
//! really workable on Linux. Hence, use dlsym to get the symbol value at
//! runtime. This is also done for compatibility with older versions of glibc,
//! and to avoid creating dependencies on GLIBC_PRIVATE symbols. It assumes that
//! we've been dynamically linked to the library the symbol comes from, but that
//! is currently always the case for things like libpthread/libc.
//!
//! A long time ago this used weak linkage for the __pthread_get_minstack
//! symbol, but that caused Debian to detect an unnecessarily strict versioned
//! dependency on libc6 (#23628).
use libc;
use ffi::CString;
use marker;
use mem;
use sync::atomic::{AtomicUsize, Ordering};
macro_rules! weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
static $name: ::sys::weak::Weak<unsafe extern fn($($t),*) -> $ret> =
::sys::weak::Weak::new(stringify!($name));
)
}
pub struct Weak<F> {
name: &'static str,
addr: AtomicUsize,
_marker: marker::PhantomData<F>,
}
impl<F> Weak<F> {
pub const fn new(name: &'static str) -> Weak<F> {
Weak {
name: name,
addr: AtomicUsize::new(1),
_marker: marker::PhantomData,
}
}
pub fn get(&self) -> Option<&F> {
assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
unsafe {
if self.addr.load(Ordering::SeqCst) == 1 {
self.addr.store(fetch(self.name), Ordering::SeqCst);
}
if self.addr.load(Ordering::SeqCst) == 0 {
None
} else {
mem::transmute::<&AtomicUsize, Option<&F>>(&self.addr)
}
}
}
}
unsafe fn fetch(name: &str) -> usize {
let name = match CString::new(name) {
Ok(cstr) => cstr,
Err(..) => return 0,
};
let lib = libc::dlopen(0 as *const _, libc::RTLD_LAZY);
if lib.is_null() {
return 0
}
let ret = libc::dlsym(lib, name.as_ptr()) as usize;
libc::dlclose(lib);
return ret
}
| 33.069767 | 80 | 0.627637 |
61aead6c620a316850d4ebf271215cc2e4f3e799
| 428 |
extern crate mpd;
mod helpers;
use helpers::connect;
#[test]
/// Creating a sticker and then getting that sticker returns the value that was set.
fn set_sticker() {
let mut mpd = connect();
static VALUE: &'static str = "value";
mpd.set_sticker("song", "empty.flac", "test_sticker", VALUE).unwrap();
let sticker = mpd.sticker("song", "empty.flac", "test_sticker").unwrap();
assert_eq!(sticker, VALUE);
}
| 23.777778 | 84 | 0.670561 |
bbca98e76961d0a90e4f23088008e54361204dcb
| 8,664 |
use super::GlobalTransform;
use bevy_ecs::{component::Component, reflect::ReflectComponent};
use bevy_math::{const_vec3, Mat3, Mat4, Quat, Vec3};
use bevy_reflect::prelude::*;
use bevy_reflect::Reflect;
use std::ops::Mul;
/// Describe the position of an entity. If the entity has a parent, the position is relative
/// to its parent position.
///
/// * To place or move an entity, you should set its [`Transform`].
/// * To get the global position of an entity, you should get its [`GlobalTransform`].
/// * To be displayed, an entity must have both a [`Transform`] and a [`GlobalTransform`].
/// * You may use the [`TransformBundle`](crate::TransformBundle) to guarantee this.
///
/// ## [`Transform`] and [`GlobalTransform`]
///
/// [`Transform`] is the position of an entity relative to its parent position, or the reference
/// frame if it doesn't have a [`Parent`](bevy_hierarchy::Parent).
///
/// [`GlobalTransform`] is the position of an entity relative to the reference frame.
///
/// [`GlobalTransform`] is updated from [`Transform`] in the system
/// [`transform_propagate_system`](crate::transform_propagate_system).
///
/// This system runs in stage [`CoreStage::PostUpdate`](crate::CoreStage::PostUpdate). If you
/// update the[`Transform`] of an entity in this stage or after, you will notice a 1 frame lag
/// before the [`GlobalTransform`] is updated.
#[derive(Component, Debug, PartialEq, Clone, Copy, Reflect)]
#[reflect(Component, Default, PartialEq)]
pub struct Transform {
/// Position of the entity. In 2d, the last value of the `Vec3` is used for z-ordering.
pub translation: Vec3,
/// Rotation of the entity.
pub rotation: Quat,
/// Scale of the entity.
pub scale: Vec3,
}
impl Transform {
/// Creates a new [`Transform`] at the position `(x, y, z)`. In 2d, the `z` component
/// is used for z-ordering elements: higher `z`-value will be in front of lower
/// `z`-value.
#[inline]
pub const fn from_xyz(x: f32, y: f32, z: f32) -> Self {
Self::from_translation(const_vec3!([x, y, z]))
}
/// Creates a new identity [`Transform`], with no translation, rotation, and a scale of 1 on
/// all axes.
#[inline]
pub const fn identity() -> Self {
Transform {
translation: Vec3::ZERO,
rotation: Quat::IDENTITY,
scale: Vec3::ONE,
}
}
/// Extracts the translation, rotation, and scale from `matrix`. It must be a 3d affine
/// transformation matrix.
#[inline]
pub fn from_matrix(matrix: Mat4) -> Self {
let (scale, rotation, translation) = matrix.to_scale_rotation_translation();
Transform {
translation,
rotation,
scale,
}
}
/// Creates a new [`Transform`], with `translation`. Rotation will be 0 and scale 1 on
/// all axes.
#[inline]
pub const fn from_translation(translation: Vec3) -> Self {
Transform {
translation,
..Self::identity()
}
}
/// Creates a new [`Transform`], with `rotation`. Translation will be 0 and scale 1 on
/// all axes.
#[inline]
pub const fn from_rotation(rotation: Quat) -> Self {
Transform {
rotation,
..Self::identity()
}
}
/// Creates a new [`Transform`], with `scale`. Translation will be 0 and rotation 0 on
/// all axes.
#[inline]
pub const fn from_scale(scale: Vec3) -> Self {
Transform {
scale,
..Self::identity()
}
}
/// Updates and returns this [`Transform`] by rotating it so that its unit vector in the
/// local z direction is toward `target` and its unit vector in the local y direction
/// is toward `up`.
#[inline]
#[must_use]
pub fn looking_at(mut self, target: Vec3, up: Vec3) -> Self {
self.look_at(target, up);
self
}
/// Returns this [`Transform`] with a new translation.
#[inline]
#[must_use]
pub const fn with_translation(mut self, translation: Vec3) -> Self {
self.translation = translation;
self
}
/// Returns this [`Transform`] with a new rotation.
#[inline]
#[must_use]
pub const fn with_rotation(mut self, rotation: Quat) -> Self {
self.rotation = rotation;
self
}
/// Returns this [`Transform`] with a new scale.
#[inline]
#[must_use]
pub const fn with_scale(mut self, scale: Vec3) -> Self {
self.scale = scale;
self
}
/// Returns the 3d affine transformation matrix from this transforms translation,
/// rotation, and scale.
#[inline]
pub fn compute_matrix(&self) -> Mat4 {
Mat4::from_scale_rotation_translation(self.scale, self.rotation, self.translation)
}
/// Get the unit vector in the local x direction.
#[inline]
pub fn local_x(&self) -> Vec3 {
self.rotation * Vec3::X
}
/// Equivalent to [`-local_x()`][Transform::local_x()]
#[inline]
pub fn left(&self) -> Vec3 {
-self.local_x()
}
/// Equivalent to [`local_x()`][Transform::local_x()]
#[inline]
pub fn right(&self) -> Vec3 {
self.local_x()
}
/// Get the unit vector in the local y direction.
#[inline]
pub fn local_y(&self) -> Vec3 {
self.rotation * Vec3::Y
}
/// Equivalent to [`local_y()`][Transform::local_y]
#[inline]
pub fn up(&self) -> Vec3 {
self.local_y()
}
/// Equivalent to [`-local_y()`][Transform::local_y]
#[inline]
pub fn down(&self) -> Vec3 {
-self.local_y()
}
/// Get the unit vector in the local z direction.
#[inline]
pub fn local_z(&self) -> Vec3 {
self.rotation * Vec3::Z
}
/// Equivalent to [`-local_z()`][Transform::local_z]
#[inline]
pub fn forward(&self) -> Vec3 {
-self.local_z()
}
/// Equivalent to [`local_z()`][Transform::local_z]
#[inline]
pub fn back(&self) -> Vec3 {
self.local_z()
}
/// Rotates the transform by the given rotation.
#[inline]
pub fn rotate(&mut self, rotation: Quat) {
self.rotation = rotation * self.rotation;
}
/// Rotates this [`Transform`] around a point in space.
/// If the point is a zero vector, this will rotate around the parent (if any) or the origin.
#[inline]
pub fn rotate_around(&mut self, point: Vec3, rotation: Quat) {
self.translation = point + rotation * (self.translation - point);
self.rotation *= rotation;
}
/// Multiplies `self` with `transform` component by component, returning the
/// resulting [`Transform`]
#[inline]
#[must_use]
pub fn mul_transform(&self, transform: Transform) -> Self {
let translation = self.mul_vec3(transform.translation);
let rotation = self.rotation * transform.rotation;
let scale = self.scale * transform.scale;
Transform {
translation,
rotation,
scale,
}
}
/// Returns a [`Vec3`] of this [`Transform`] applied to `value`.
#[inline]
pub fn mul_vec3(&self, mut value: Vec3) -> Vec3 {
value = self.scale * value;
value = self.rotation * value;
value += self.translation;
value
}
/// Changes the `scale` of this [`Transform`], multiplying the current `scale` by
/// `scale_factor`.
#[inline]
pub fn apply_non_uniform_scale(&mut self, scale_factor: Vec3) {
self.scale *= scale_factor;
}
/// Rotates this [`Transform`] so that its local z direction is toward
/// `target` and its local y direction is toward `up`.
#[inline]
pub fn look_at(&mut self, target: Vec3, up: Vec3) {
let forward = Vec3::normalize(self.translation - target);
let right = up.cross(forward).normalize();
let up = forward.cross(right);
self.rotation = Quat::from_mat3(&Mat3::from_cols(right, up, forward));
}
}
impl Default for Transform {
fn default() -> Self {
Self::identity()
}
}
impl From<GlobalTransform> for Transform {
fn from(transform: GlobalTransform) -> Self {
Self {
translation: transform.translation,
rotation: transform.rotation,
scale: transform.scale,
}
}
}
impl Mul<Transform> for Transform {
type Output = Transform;
fn mul(self, transform: Transform) -> Self::Output {
self.mul_transform(transform)
}
}
impl Mul<Vec3> for Transform {
type Output = Vec3;
fn mul(self, value: Vec3) -> Self::Output {
self.mul_vec3(value)
}
}
| 30.4 | 97 | 0.602378 |
231500b5045779a82ad2b547af871fcbb8897d5f
| 13,799 |
extern crate openssl;
extern crate openssl_probe;
use self::openssl::error::ErrorStack;
use self::openssl::hash::MessageDigest;
use self::openssl::nid::Nid;
use self::openssl::pkcs12::Pkcs12;
use self::openssl::pkey::PKey;
use self::openssl::ssl::{
self, MidHandshakeSslStream, SslAcceptor, SslConnector, SslContextBuilder, SslMethod,
SslVerifyMode,
};
use self::openssl::x509::{store::X509StoreBuilder, X509VerifyResult, X509};
use std::error;
use std::fmt;
use std::io;
use std::sync::Once;
use self::openssl::pkey::Private;
use {Protocol, TlsAcceptorBuilder, TlsConnectorBuilder};
#[cfg(have_min_max_version)]
fn supported_protocols(
min: Option<Protocol>,
max: Option<Protocol>,
ctx: &mut SslContextBuilder,
) -> Result<(), ErrorStack> {
use self::openssl::ssl::SslVersion;
fn cvt(p: Protocol) -> SslVersion {
match p {
Protocol::Sslv3 => SslVersion::SSL3,
Protocol::Tlsv10 => SslVersion::TLS1,
Protocol::Tlsv11 => SslVersion::TLS1_1,
Protocol::Tlsv12 => SslVersion::TLS1_2,
Protocol::Tlsv13 => SslVersion::TLS1_3,
Protocol::__NonExhaustive => unreachable!(),
}
}
ctx.set_min_proto_version(min.map(cvt))?;
ctx.set_max_proto_version(max.map(cvt))?;
Ok(())
}
#[cfg(not(have_min_max_version))]
fn supported_protocols(
min: Option<Protocol>,
max: Option<Protocol>,
ctx: &mut SslContextBuilder,
) -> Result<(), ErrorStack> {
use self::openssl::ssl::SslOptions;
let no_ssl_mask = SslOptions::NO_SSLV2
| SslOptions::NO_SSLV3
| SslOptions::NO_TLSV1
| SslOptions::NO_TLSV1_1
| SslOptions::NO_TLSV1_2
| SslOptions::NO_TLSV1_3;
ctx.clear_options(no_ssl_mask);
let mut options = SslOptions::empty();
options |= match min {
None => SslOptions::empty(),
Some(Protocol::Sslv3) => SslOptions::NO_SSLV2,
Some(Protocol::Tlsv10) => SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3,
Some(Protocol::Tlsv11) => {
SslOptions::NO_SSLV2 | SslOptions::NO_SSLV3 | SslOptions::NO_TLSV1
}
Some(Protocol::Tlsv12) => {
SslOptions::NO_SSLV2
| SslOptions::NO_SSLV3
| SslOptions::NO_TLSV1
| SslOptions::NO_TLSV1_1
}
Some(Protocol::Tlsv13) => {
SslOptions::NO_SSLV2
| SslOptions::NO_SSLV3
| SslOptions::NO_TLSV1
| SslOptions::NO_TLSV1_1
| SslOptions::NO_TLSV1_2
}
Some(Protocol::__NonExhaustive) => unreachable!(),
};
options |= match max {
None | Some(Protocol::Tlsv13) => SslOptions::empty(),
Some(Protocol::Tlsv12) => SslOptions::NO_TLSV1_3,
Some(Protocol::Tlsv11) => SslOptions::NO_TLSV1_2 | SslOptions::NO_TLSV1_3,
Some(Protocol::Tlsv10) => {
SslOptions::NO_TLSV1_1 | SslOptions::NO_TLSV1_2 | SslOptions::NO_TLSV1_3
}
Some(Protocol::Sslv3) => {
SslOptions::NO_TLSV1 | SslOptions::NO_TLSV1_1 | SslOptions::NO_TLSV1_2 |
SslOptions::NO_TLSV1_3
}
Some(Protocol::__NonExhaustive) => unreachable!(),
};
ctx.set_options(options);
Ok(())
}
fn init_trust() {
static ONCE: Once = Once::new();
ONCE.call_once(openssl_probe::init_ssl_cert_env_vars);
}
#[cfg(target_os = "android")]
fn load_android_root_certs(connector: &mut SslContextBuilder) -> Result<(), Error> {
use std::fs;
if let Ok(dir) = fs::read_dir("/system/etc/security/cacerts") {
let certs = dir
.filter_map(|r| r.ok())
.filter_map(|e| fs::read(e.path()).ok())
.filter_map(|b| X509::from_pem(&b).ok());
for cert in certs {
if let Err(err) = connector.cert_store_mut().add_cert(cert) {
debug!("load_android_root_certs error: {:?}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
pub enum Error {
Normal(ErrorStack),
Ssl(ssl::Error, X509VerifyResult),
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match *self {
Error::Normal(ref e) => error::Error::source(e),
Error::Ssl(ref e, _) => error::Error::source(e),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Normal(ref e) => fmt::Display::fmt(e, fmt),
Error::Ssl(ref e, X509VerifyResult::OK) => fmt::Display::fmt(e, fmt),
Error::Ssl(ref e, v) => write!(fmt, "{} ({})", e, v),
}
}
}
impl From<ErrorStack> for Error {
fn from(err: ErrorStack) -> Error {
Error::Normal(err)
}
}
#[derive(Clone)]
pub struct Identity {
pkey: PKey<Private>,
cert: X509,
chain: Vec<X509>,
}
impl Identity {
pub fn from_pkcs12(buf: &[u8], pass: &str) -> Result<Identity, Error> {
let pkcs12 = Pkcs12::from_der(buf)?;
let parsed = pkcs12.parse(pass)?;
Ok(Identity {
pkey: parsed.pkey,
cert: parsed.cert,
chain: parsed.chain.into_iter().flatten().collect(),
})
}
}
#[derive(Clone)]
pub struct Certificate(X509);
impl Certificate {
pub fn from_der(buf: &[u8]) -> Result<Certificate, Error> {
let cert = X509::from_der(buf)?;
Ok(Certificate(cert))
}
pub fn from_pem(buf: &[u8]) -> Result<Certificate, Error> {
let cert = X509::from_pem(buf)?;
Ok(Certificate(cert))
}
pub fn to_der(&self) -> Result<Vec<u8>, Error> {
let der = self.0.to_der()?;
Ok(der)
}
}
pub struct MidHandshakeTlsStream<S>(MidHandshakeSslStream<S>);
impl<S> fmt::Debug for MidHandshakeTlsStream<S>
where
S: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.0, fmt)
}
}
impl<S> MidHandshakeTlsStream<S> {
pub fn get_ref(&self) -> &S {
self.0.get_ref()
}
pub fn get_mut(&mut self) -> &mut S {
self.0.get_mut()
}
}
impl<S> MidHandshakeTlsStream<S>
where
S: io::Read + io::Write,
{
pub fn handshake(self) -> Result<TlsStream<S>, HandshakeError<S>> {
match self.0.handshake() {
Ok(s) => Ok(TlsStream(s)),
Err(e) => Err(e.into()),
}
}
}
pub enum HandshakeError<S> {
Failure(Error),
WouldBlock(MidHandshakeTlsStream<S>),
}
impl<S> From<ssl::HandshakeError<S>> for HandshakeError<S> {
fn from(e: ssl::HandshakeError<S>) -> HandshakeError<S> {
match e {
ssl::HandshakeError::SetupFailure(e) => HandshakeError::Failure(e.into()),
ssl::HandshakeError::Failure(e) => {
let v = e.ssl().verify_result();
HandshakeError::Failure(Error::Ssl(e.into_error(), v))
}
ssl::HandshakeError::WouldBlock(s) => {
HandshakeError::WouldBlock(MidHandshakeTlsStream(s))
}
}
}
}
impl<S> From<ErrorStack> for HandshakeError<S> {
fn from(e: ErrorStack) -> HandshakeError<S> {
HandshakeError::Failure(e.into())
}
}
#[derive(Clone)]
pub struct TlsConnector {
connector: SslConnector,
use_sni: bool,
accept_invalid_hostnames: bool,
accept_invalid_certs: bool,
}
impl TlsConnector {
pub fn new(builder: &TlsConnectorBuilder) -> Result<TlsConnector, Error> {
init_trust();
let mut connector = SslConnector::builder(SslMethod::tls())?;
if let Some(ref identity) = builder.identity {
connector.set_certificate(&identity.0.cert)?;
connector.set_private_key(&identity.0.pkey)?;
for cert in identity.0.chain.iter().rev() {
connector.add_extra_chain_cert(cert.to_owned())?;
}
}
supported_protocols(builder.min_protocol, builder.max_protocol, &mut connector)?;
if builder.disable_built_in_roots {
connector.set_cert_store(X509StoreBuilder::new()?.build());
}
for cert in &builder.root_certificates {
if let Err(err) = connector.cert_store_mut().add_cert((cert.0).0.clone()) {
debug!("add_cert error: {:?}", err);
}
}
#[cfg(feature = "alpn")]
{
if !builder.alpn.is_empty() {
// Wire format is each alpn preceded by its length as a byte.
let mut alpn_wire_format = Vec::with_capacity(
builder
.alpn
.iter()
.map(|s| s.as_bytes().len())
.sum::<usize>()
+ builder.alpn.len(),
);
for alpn in builder.alpn.iter().map(|s| s.as_bytes()) {
alpn_wire_format.push(alpn.len() as u8);
alpn_wire_format.extend(alpn);
}
connector.set_alpn_protos(&alpn_wire_format)?;
}
}
#[cfg(target_os = "android")]
load_android_root_certs(&mut connector)?;
Ok(TlsConnector {
connector: connector.build(),
use_sni: builder.use_sni,
accept_invalid_hostnames: builder.accept_invalid_hostnames,
accept_invalid_certs: builder.accept_invalid_certs,
})
}
pub fn connect<S>(&self, domain: &str, stream: S) -> Result<TlsStream<S>, HandshakeError<S>>
where
S: io::Read + io::Write,
{
let mut ssl = self
.connector
.configure()?
.use_server_name_indication(self.use_sni)
.verify_hostname(!self.accept_invalid_hostnames);
if self.accept_invalid_certs {
ssl.set_verify(SslVerifyMode::NONE);
}
let s = ssl.connect(domain, stream)?;
Ok(TlsStream(s))
}
}
impl fmt::Debug for TlsConnector {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("TlsConnector")
// n.b. SslConnector is a newtype on SslContext which implements a noop Debug so it's omitted
.field("use_sni", &self.use_sni)
.field("accept_invalid_hostnames", &self.accept_invalid_hostnames)
.field("accept_invalid_certs", &self.accept_invalid_certs)
.finish()
}
}
#[derive(Clone)]
pub struct TlsAcceptor(SslAcceptor);
impl TlsAcceptor {
pub fn new(builder: &TlsAcceptorBuilder) -> Result<TlsAcceptor, Error> {
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls())?;
acceptor.set_private_key(&builder.identity.0.pkey)?;
acceptor.set_certificate(&builder.identity.0.cert)?;
for cert in builder.identity.0.chain.iter().rev() {
acceptor.add_extra_chain_cert(cert.to_owned())?;
}
supported_protocols(builder.min_protocol, builder.max_protocol, &mut acceptor)?;
Ok(TlsAcceptor(acceptor.build()))
}
pub fn accept<S>(&self, stream: S) -> Result<TlsStream<S>, HandshakeError<S>>
where
S: io::Read + io::Write,
{
let s = self.0.accept(stream)?;
Ok(TlsStream(s))
}
}
pub struct TlsStream<S>(ssl::SslStream<S>);
impl<S: fmt::Debug> fmt::Debug for TlsStream<S> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.0, fmt)
}
}
impl<S> TlsStream<S> {
pub fn get_ref(&self) -> &S {
self.0.get_ref()
}
pub fn get_mut(&mut self) -> &mut S {
self.0.get_mut()
}
}
impl<S: io::Read + io::Write> TlsStream<S> {
pub fn buffered_read_size(&self) -> Result<usize, Error> {
Ok(self.0.ssl().pending())
}
pub fn peer_certificate(&self) -> Result<Option<Certificate>, Error> {
Ok(self.0.ssl().peer_certificate().map(Certificate))
}
#[cfg(feature = "alpn")]
pub fn negotiated_alpn(&self) -> Result<Option<Vec<u8>>, Error> {
Ok(self
.0
.ssl()
.selected_alpn_protocol()
.map(|alpn| alpn.to_vec()))
}
pub fn tls_server_end_point(&self) -> Result<Option<Vec<u8>>, Error> {
let cert = if self.0.ssl().is_server() {
self.0.ssl().certificate().map(|x| x.to_owned())
} else {
self.0.ssl().peer_certificate()
};
let cert = match cert {
Some(cert) => cert,
None => return Ok(None),
};
let algo_nid = cert.signature_algorithm().object().nid();
let signature_algorithms = match algo_nid.signature_algorithms() {
Some(algs) => algs,
None => return Ok(None),
};
let md = match signature_algorithms.digest {
Nid::MD5 | Nid::SHA1 => MessageDigest::sha256(),
nid => match MessageDigest::from_nid(nid) {
Some(md) => md,
None => return Ok(None),
},
};
let digest = cert.digest(md)?;
Ok(Some(digest.to_vec()))
}
pub fn shutdown(&mut self) -> io::Result<()> {
match self.0.shutdown() {
Ok(_) => Ok(()),
Err(ref e) if e.code() == ssl::ErrorCode::ZERO_RETURN => Ok(()),
Err(e) => Err(e
.into_io_error()
.unwrap_or_else(|e| io::Error::new(io::ErrorKind::Other, e))),
}
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
| 29.485043 | 105 | 0.56794 |
2310f096bff5e1e9ec8aacd15016e41800f76718
| 1,985 |
use srt_tokio::SrtSocketBuilder;
use bytes::Bytes;
use futures::prelude::*;
use log::info;
use std::time::{Duration, Instant};
/// Send a single packet, with a large tsbpd, then close. Make sure it gets delviered with the delay.
#[tokio::test]
async fn single_packet_tsbpd() {
let _ = env_logger::try_init();
let sender = SrtSocketBuilder::new_connect("127.0.0.1:3000")
.latency(Duration::from_secs(5))
.connect();
let recvr = SrtSocketBuilder::new_listen()
.local_port(3000)
.latency(Duration::from_secs(2))
.connect();
// init the connection
let (mut recvr, mut sender) = futures::try_join!(sender, recvr).unwrap();
let recvr_fut = async move {
let start = Instant::now();
let (time, packet) = recvr
.try_next()
.await
.unwrap()
.expect("The receiver should've yielded an object");
info!("Pack recvd");
// should be around 5s later
let delay_ms = start.elapsed().as_millis();
assert!(
delay_ms < 5500 && delay_ms > 4900,
"Was not around 5s later, was {}ms",
delay_ms
);
assert_eq!(&packet, "Hello World!");
let expected_displacement = Duration::from_millis(5);
let displacement = if start > time {
start - time
} else {
time - start
};
assert!(displacement < expected_displacement,
"TsbPd time calculated for the packet should be close to `start` time\nExpected: < {:?}\nActual: {:?}\n",
expected_displacement, displacement);
// the recvr should return None now
assert!(recvr.next().await.is_none());
};
let sendr_fut = async move {
sender
.send((Instant::now(), Bytes::from("Hello World!")))
.await
.unwrap();
sender.close().await.unwrap();
};
futures::join!(recvr_fut, sendr_fut);
}
| 28.357143 | 117 | 0.571285 |
71edec48d856fb3edb6a53a22da177db09e752df
| 5,612 |
use instruction_def::*;
use test::run_test;
use Operand::*;
use Reg::*;
use RegScale::*;
use RegType::*;
use {BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
#[test]
fn vmaskmovps_1() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(Direct(XMM1)),
operand2: Some(Direct(XMM7)),
operand3: Some(IndirectScaledIndexed(
EBX,
EBX,
Two,
Some(OperandSize::Xmmword),
None,
)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 65, 44, 12, 91],
OperandSize::Dword,
)
}
#[test]
fn vmaskmovps_2() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(Direct(XMM4)),
operand2: Some(Direct(XMM4)),
operand3: Some(IndirectScaledIndexed(
RSI,
RCX,
Four,
Some(OperandSize::Xmmword),
None,
)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 89, 44, 36, 142],
OperandSize::Qword,
)
}
#[test]
fn vmaskmovps_3() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(Direct(YMM0)),
operand2: Some(Direct(YMM6)),
operand3: Some(IndirectDisplaced(
EAX,
1140633179,
Some(OperandSize::Ymmword),
None,
)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 77, 44, 128, 91, 174, 252, 67],
OperandSize::Dword,
)
}
#[test]
fn vmaskmovps_4() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(Direct(YMM0)),
operand2: Some(Direct(YMM0)),
operand3: Some(IndirectScaledIndexed(
RBX,
RDX,
Four,
Some(OperandSize::Ymmword),
None,
)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 125, 44, 4, 147],
OperandSize::Qword,
)
}
#[test]
fn vmaskmovps_5() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(IndirectScaledIndexed(
ESI,
EAX,
Four,
Some(OperandSize::Xmmword),
None,
)),
operand2: Some(Direct(XMM2)),
operand3: Some(Direct(XMM7)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 105, 46, 60, 134],
OperandSize::Dword,
)
}
#[test]
fn vmaskmovps_6() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(IndirectScaledIndexedDisplaced(
RDI,
RDI,
Four,
876814899,
Some(OperandSize::Xmmword),
None,
)),
operand2: Some(Direct(XMM6)),
operand3: Some(Direct(XMM6)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 73, 46, 180, 191, 51, 34, 67, 52],
OperandSize::Qword,
)
}
#[test]
fn vmaskmovps_7() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(IndirectDisplaced(
ECX,
259507747,
Some(OperandSize::Ymmword),
None,
)),
operand2: Some(Direct(YMM6)),
operand3: Some(Direct(YMM2)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 77, 46, 145, 35, 198, 119, 15],
OperandSize::Dword,
)
}
#[test]
fn vmaskmovps_8() {
run_test(
&Instruction {
mnemonic: Mnemonic::VMASKMOVPS,
operand1: Some(IndirectScaledDisplaced(
RDX,
Four,
1532890708,
Some(OperandSize::Ymmword),
None,
)),
operand2: Some(Direct(YMM0)),
operand3: Some(Direct(YMM3)),
operand4: None,
lock: false,
rounding_mode: None,
merge_mode: None,
sae: false,
mask: None,
broadcast: None,
},
&[196, 226, 125, 46, 28, 149, 84, 14, 94, 91],
OperandSize::Qword,
)
}
| 25.165919 | 95 | 0.4469 |
3853443f10e4b63dac61acaceb648611c0b9312e
| 150 |
use once_cell::sync::Lazy;
use swc_common::{sync::Lrc, SourceMap};
pub(crate) static SOURCE_MAP: Lazy<Lrc<SourceMap>> = Lazy::new(Default::default);
| 30 | 81 | 0.733333 |
901bb4144d7fc6f593f16d44a2c3e786be0dc2e9
| 13,718 |
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
// #[PerformanceCriticalPath]
use crate::storage::kv::WriteData;
use crate::storage::lock_manager::LockManager;
use crate::storage::mvcc::{LockType, MvccTxn, SnapshotReader, TimeStamp, TxnCommitRecord};
use crate::storage::txn::commands::ReaderWithStats;
use crate::storage::txn::{
actions::check_txn_status::{collapse_prev_rollback, make_rollback},
commands::{
Command, CommandExt, ReleasedLocks, ResponsePolicy, TypedCommand, WriteCommand,
WriteContext, WriteResult,
},
Result,
};
use crate::storage::types::SecondaryLocksStatus;
use crate::storage::{ProcessResult, Snapshot};
use txn_types::{Key, Lock, WriteType};
command! {
/// Check secondary locks of an async commit transaction.
///
/// If all prewritten locks exist, the lock information is returned.
/// Otherwise, it returns the commit timestamp of the transaction.
///
/// If the lock does not exist or is a pessimistic lock, to prevent the
/// status being changed, a rollback may be written.
CheckSecondaryLocks:
cmd_ty => SecondaryLocksStatus,
display => "kv::command::CheckSecondaryLocks {} keys@{} | {:?}", (keys.len, start_ts, ctx),
content => {
/// The keys of secondary locks.
keys: Vec<Key>,
/// The start timestamp of the transaction.
start_ts: txn_types::TimeStamp,
}
}
impl CommandExt for CheckSecondaryLocks {
ctx!();
tag!(check_secondary_locks);
ts!(start_ts);
write_bytes!(keys: multiple);
gen_lock!(keys: multiple);
}
#[derive(Debug, PartialEq)]
enum SecondaryLockStatus {
Locked(Lock),
Committed(TimeStamp),
RolledBack,
}
impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for CheckSecondaryLocks {
fn process_write(self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> {
// It is not allowed for commit to overwrite a protected rollback. So we update max_ts
// to prevent this case from happening.
context.concurrency_manager.update_max_ts(self.start_ts);
let mut txn = MvccTxn::new(self.start_ts, context.concurrency_manager);
let mut reader = ReaderWithStats::new(
SnapshotReader::new_with_ctx(self.start_ts, snapshot, &self.ctx),
context.statistics,
);
let mut released_locks = ReleasedLocks::new(self.start_ts, TimeStamp::zero());
let mut result = SecondaryLocksStatus::Locked(Vec::new());
for key in self.keys {
let mut released_lock = None;
let mut mismatch_lock = None;
// Checks whether the given secondary lock exists.
let (status, need_rollback, rollback_overlapped_write) = match reader.load_lock(&key)? {
// The lock exists, the lock information is returned.
Some(lock) if lock.ts == self.start_ts => {
if lock.lock_type == LockType::Pessimistic {
released_lock = txn.unlock_key(key.clone(), true);
let overlapped_write = reader.get_txn_commit_record(&key)?.unwrap_none();
(SecondaryLockStatus::RolledBack, true, overlapped_write)
} else {
(SecondaryLockStatus::Locked(lock), false, None)
}
}
// Searches the write CF for the commit record of the lock and returns the commit timestamp
// (0 if the lock is not committed).
l => {
mismatch_lock = l;
match reader.get_txn_commit_record(&key)? {
TxnCommitRecord::SingleRecord { commit_ts, write } => {
let status = if write.write_type != WriteType::Rollback {
SecondaryLockStatus::Committed(commit_ts)
} else {
SecondaryLockStatus::RolledBack
};
// We needn't write a rollback once there is a write record for it:
// If it's a committed record, it cannot be changed.
// If it's a rollback record, it either comes from another check_secondary_lock
// (thus protected) or the client stops commit actively. So we don't need
// to make it protected again.
(status, false, None)
}
TxnCommitRecord::OverlappedRollback { .. } => {
(SecondaryLockStatus::RolledBack, false, None)
}
TxnCommitRecord::None { overlapped_write } => {
(SecondaryLockStatus::RolledBack, true, overlapped_write)
}
}
}
};
// If the lock does not exist or is a pessimistic lock, to prevent the
// status being changed, a rollback may be written and this rollback
// needs to be protected.
if need_rollback {
if let Some(l) = mismatch_lock {
txn.mark_rollback_on_mismatching_lock(&key, l, true);
}
// We must protect this rollback in case this rollback is collapsed and a stale
// acquire_pessimistic_lock and prewrite succeed again.
if let Some(write) = make_rollback(self.start_ts, true, rollback_overlapped_write) {
txn.put_write(key.clone(), self.start_ts, write.as_ref().to_bytes());
collapse_prev_rollback(&mut txn, &mut reader, &key)?;
}
}
released_locks.push(released_lock);
match status {
SecondaryLockStatus::Locked(lock) => {
result.push(lock.into_lock_info(key.to_raw()?));
}
SecondaryLockStatus::Committed(commit_ts) => {
result = SecondaryLocksStatus::Committed(commit_ts);
break;
}
SecondaryLockStatus::RolledBack => {
result = SecondaryLocksStatus::RolledBack;
break;
}
}
}
let mut rows = 0;
if let SecondaryLocksStatus::RolledBack = &result {
// Lock is only released when result is `RolledBack`.
released_locks.wake_up(context.lock_mgr);
// One row is mutated only when a secondary lock is rolled back.
rows = 1;
}
let pr = ProcessResult::SecondaryLocksStatus { status: result };
let mut write_data = WriteData::from_modifies(txn.into_modifies());
write_data.set_allowed_on_disk_almost_full();
Ok(WriteResult {
ctx: self.ctx,
to_be_write: write_data,
rows,
pr,
lock_info: None,
lock_guards: vec![],
response_policy: ResponsePolicy::OnApplied,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::storage::kv::TestEngineBuilder;
use crate::storage::lock_manager::DummyLockManager;
use crate::storage::mvcc::tests::*;
use crate::storage::txn::commands::WriteCommand;
use crate::storage::txn::scheduler::DEFAULT_EXECUTION_DURATION_LIMIT;
use crate::storage::txn::tests::*;
use crate::storage::Engine;
use concurrency_manager::ConcurrencyManager;
use kvproto::kvrpcpb::Context;
use tikv_util::deadline::Deadline;
pub fn must_success<E: Engine>(
engine: &E,
key: &[u8],
lock_ts: impl Into<TimeStamp>,
expect_status: SecondaryLocksStatus,
) {
let ctx = Context::default();
let snapshot = engine.snapshot(Default::default()).unwrap();
let lock_ts = lock_ts.into();
let cm = ConcurrencyManager::new(lock_ts);
let command = crate::storage::txn::commands::CheckSecondaryLocks {
ctx: ctx.clone(),
keys: vec![Key::from_raw(key)],
start_ts: lock_ts,
deadline: Deadline::from_now(DEFAULT_EXECUTION_DURATION_LIMIT),
};
let result = command
.process_write(
snapshot,
WriteContext {
lock_mgr: &DummyLockManager,
concurrency_manager: cm,
extra_op: Default::default(),
statistics: &mut Default::default(),
async_apply_prewrite: false,
},
)
.unwrap();
if let ProcessResult::SecondaryLocksStatus { status } = result.pr {
assert_eq!(status, expect_status);
write(engine, &ctx, result.to_be_write.modifies);
} else {
unreachable!();
}
}
#[test]
fn test_check_async_commit_secondary_locks() {
let engine = TestEngineBuilder::new().build().unwrap();
let ctx = Context::default();
let cm = ConcurrencyManager::new(1.into());
let check_secondary = |key, ts| {
let snapshot = engine.snapshot(Default::default()).unwrap();
let key = Key::from_raw(key);
let ts = TimeStamp::new(ts);
let command = crate::storage::txn::commands::CheckSecondaryLocks {
ctx: Default::default(),
keys: vec![key],
start_ts: ts,
deadline: Deadline::from_now(DEFAULT_EXECUTION_DURATION_LIMIT),
};
let result = command
.process_write(
snapshot,
WriteContext {
lock_mgr: &DummyLockManager,
concurrency_manager: cm.clone(),
extra_op: Default::default(),
statistics: &mut Default::default(),
async_apply_prewrite: false,
},
)
.unwrap();
if !result.to_be_write.modifies.is_empty() {
engine.write(&ctx, result.to_be_write).unwrap();
}
if let ProcessResult::SecondaryLocksStatus { status } = result.pr {
status
} else {
unreachable!();
}
};
must_prewrite_lock(&engine, b"k1", b"key", 1);
must_commit(&engine, b"k1", 1, 3);
must_rollback(&engine, b"k1", 5, false);
must_prewrite_lock(&engine, b"k1", b"key", 7);
must_commit(&engine, b"k1", 7, 9);
// Lock CF has no lock
//
// LOCK CF | WRITE CF
// --------------+---------------------
// | 9: start_ts = 7
// | 5: rollback
// | 3: start_ts = 1
assert_eq!(
check_secondary(b"k1", 7),
SecondaryLocksStatus::Committed(9.into())
);
must_get_commit_ts(&engine, b"k1", 7, 9);
assert_eq!(check_secondary(b"k1", 5), SecondaryLocksStatus::RolledBack);
must_get_rollback_ts(&engine, b"k1", 5);
assert_eq!(
check_secondary(b"k1", 1),
SecondaryLocksStatus::Committed(3.into())
);
must_get_commit_ts(&engine, b"k1", 1, 3);
assert_eq!(check_secondary(b"k1", 6), SecondaryLocksStatus::RolledBack);
must_get_rollback_protected(&engine, b"k1", 6, true);
// ----------------------------
must_acquire_pessimistic_lock(&engine, b"k1", b"key", 11, 11);
// Lock CF has a pessimistic lock
//
// LOCK CF | WRITE CF
// ------------------------------------
// ts = 11 (pes) | 9: start_ts = 7
// | 5: rollback
// | 3: start_ts = 1
let status = check_secondary(b"k1", 11);
assert_eq!(status, SecondaryLocksStatus::RolledBack);
must_get_rollback_protected(&engine, b"k1", 11, true);
// ----------------------------
must_prewrite_lock(&engine, b"k1", b"key", 13);
// Lock CF has an optimistic lock
//
// LOCK CF | WRITE CF
// ------------------------------------
// ts = 13 (opt) | 11: rollback
// | 9: start_ts = 7
// | 5: rollback
// | 3: start_ts = 1
match check_secondary(b"k1", 13) {
SecondaryLocksStatus::Locked(_) => {}
res => panic!("unexpected lock status: {:?}", res),
}
must_locked(&engine, b"k1", 13);
// ----------------------------
must_commit(&engine, b"k1", 13, 15);
// Lock CF has an optimistic lock
//
// LOCK CF | WRITE CF
// ------------------------------------
// | 15: start_ts = 13
// | 11: rollback
// | 9: start_ts = 7
// | 5: rollback
// | 3: start_ts = 1
match check_secondary(b"k1", 14) {
SecondaryLocksStatus::RolledBack => {}
res => panic!("unexpected lock status: {:?}", res),
}
must_get_rollback_protected(&engine, b"k1", 14, true);
match check_secondary(b"k1", 15) {
SecondaryLocksStatus::RolledBack => {}
res => panic!("unexpected lock status: {:?}", res),
}
must_get_overlapped_rollback(&engine, b"k1", 15, 13, WriteType::Lock, Some(0));
}
}
| 40.228739 | 107 | 0.526389 |
7613f437cdd5436fe0576a2ac1c86bd96e20d509
| 12,150 |
//! Module containing various types used across the various netlink
//! structures used in `neli`.
//!
//! # Design decisions
//! These structures are new types rather than type aliases in most
//! cases to allow the internal representation to change without
//! resulting in a breaking change.
use crate as neli;
use std::{
fmt::{self, Debug},
iter::FromIterator,
marker::PhantomData,
ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not},
slice::{Iter, IterMut},
};
use crate::{
attr::{AttrHandle, AttrHandleMut},
consts::{genl::NlAttrType, nl::NlType, rtnl::RtaType},
genl::Nlattr,
nl::Nlmsghdr,
rtnl::Rtattr,
FromBytes, FromBytesWithInput, Size, ToBytes, TypeSize,
};
/// A buffer of bytes.
#[derive(PartialEq, Size, FromBytesWithInput, ToBytes)]
pub struct Buffer(#[neli(input)] Vec<u8>);
impl Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Buffer")
}
}
impl AsRef<[u8]> for Buffer {
fn as_ref(&self) -> &[u8] {
self.0.as_slice()
}
}
impl AsMut<[u8]> for Buffer {
fn as_mut(&mut self) -> &mut [u8] {
self.0.as_mut_slice()
}
}
impl<'a> From<&'a [u8]> for Buffer {
fn from(slice: &'a [u8]) -> Self {
Buffer(Vec::from(slice))
}
}
impl<'a> From<Vec<u8>> for Buffer {
fn from(vec: Vec<u8>) -> Self {
Buffer(vec)
}
}
impl Buffer {
/// Create a new general purpose byte buffer.
pub fn new() -> Self {
Buffer(Vec::new())
}
/// Extend the given buffer with the contents of another slice.
pub fn extend_from_slice(&mut self, slice: &[u8]) {
self.0.extend_from_slice(slice)
}
/// Get the current length of the buffer.
pub fn len(&self) -> usize {
self.0.len()
}
/// Check whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl Default for Buffer {
fn default() -> Self {
Self::new()
}
}
/// A buffer of netlink messages.
#[derive(Debug, PartialEq, Size, FromBytesWithInput, ToBytes)]
#[neli(from_bytes_bound = "T: NlType")]
#[neli(from_bytes_bound = "P: FromBytesWithInput<Input = usize>")]
pub struct NlBuffer<T, P>(#[neli(input)] Vec<Nlmsghdr<T, P>>);
impl<T, P> FromIterator<Nlmsghdr<T, P>> for NlBuffer<T, P> {
fn from_iter<I>(i: I) -> Self
where
I: IntoIterator<Item = Nlmsghdr<T, P>>,
{
NlBuffer(Vec::from_iter(i))
}
}
impl<T, P> AsRef<[Nlmsghdr<T, P>]> for NlBuffer<T, P> {
fn as_ref(&self) -> &[Nlmsghdr<T, P>] {
self.0.as_slice()
}
}
impl<T, P> NlBuffer<T, P> {
/// Create a new buffer of netlink messages.
pub fn new() -> Self {
NlBuffer(Vec::new())
}
/// Add a new netlink message to the end of the buffer.
pub fn push(&mut self, msg: Nlmsghdr<T, P>) {
self.0.push(msg);
}
/// Get a netlink message from the end of the buffer.
pub fn pop(&mut self) -> Option<Nlmsghdr<T, P>> {
self.0.pop()
}
/// Return an iterator over immutable references to the elements
/// in the buffer.
pub fn iter(&self) -> Iter<'_, Nlmsghdr<T, P>> {
self.0.iter()
}
/// Return an iterator over mutable references to the elements
/// in the buffer.
pub fn iter_mut(&mut self) -> IterMut<'_, Nlmsghdr<T, P>> {
self.0.iter_mut()
}
/// Returns the number of elements in the buffer.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the number of elements in the buffer is 0.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl<T, P> IntoIterator for NlBuffer<T, P> {
type Item = Nlmsghdr<T, P>;
type IntoIter = <Vec<Nlmsghdr<T, P>> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<T, P> Default for NlBuffer<T, P> {
fn default() -> Self {
Self::new()
}
}
/// A buffer of generic netlink attributes.
#[derive(Debug, PartialEq, ToBytes, FromBytesWithInput)]
#[neli(to_bytes_bound = "T: NlAttrType")]
#[neli(from_bytes_bound = "T: NlAttrType")]
#[neli(from_bytes_bound = "P: FromBytesWithInput<Input = usize>")]
pub struct GenlBuffer<T, P>(#[neli(input)] Vec<Nlattr<T, P>>);
impl<T, P> neli::Size for GenlBuffer<T, P>
where
T: Size,
P: Size,
{
fn unpadded_size(&self) -> usize {
self.0.iter().map(|attr| attr.padded_size()).sum()
}
}
impl<T> GenlBuffer<T, Buffer> {
/// Get a data structure with an immutable reference to the
/// underlying [`Nlattr`]s.
pub fn get_attr_handle(&self) -> AttrHandle<Self, Nlattr<T, Buffer>> {
AttrHandle::new_borrowed(self.0.as_ref())
}
/// Get a data structure with a mutable reference to the
/// underlying [`Nlattr`]s.
pub fn get_attr_handle_mut(&mut self) -> AttrHandleMut<Self, Nlattr<T, Buffer>> {
AttrHandleMut::new_borrowed(self.0.as_mut())
}
}
impl<T, P> AsRef<[Nlattr<T, P>]> for GenlBuffer<T, P> {
fn as_ref(&self) -> &[Nlattr<T, P>] {
self.0.as_slice()
}
}
impl<T, P> AsMut<[Nlattr<T, P>]> for GenlBuffer<T, P> {
fn as_mut(&mut self) -> &mut [Nlattr<T, P>] {
self.0.as_mut_slice()
}
}
impl<T, P> FromIterator<Nlattr<T, P>> for GenlBuffer<T, P> {
fn from_iter<I>(i: I) -> Self
where
I: IntoIterator<Item = Nlattr<T, P>>,
{
GenlBuffer(Vec::from_iter(i))
}
}
impl<T, P> IntoIterator for GenlBuffer<T, P> {
type Item = Nlattr<T, P>;
type IntoIter = <Vec<Nlattr<T, P>> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<T, P> GenlBuffer<T, P> {
/// Create a new buffer of generic netlink attributes.
pub fn new() -> Self {
GenlBuffer(Vec::new())
}
/// Add a new generic netlink attribute to the end of the buffer.
pub fn push(&mut self, attr: Nlattr<T, P>) {
self.0.push(attr)
}
/// Get a generic netlink attribute from the end of the buffer.
pub fn pop(&mut self) -> Option<Nlattr<T, P>> {
self.0.pop()
}
/// Return an iterator over immutable references to the elements
/// in the buffer.
pub fn iter(&self) -> Iter<'_, Nlattr<T, P>> {
self.0.iter()
}
/// Return an iterator over mutable references to the elements
/// in the buffer.
pub fn iter_mut(&mut self) -> IterMut<'_, Nlattr<T, P>> {
self.0.iter_mut()
}
/// Returns the number of elements in the buffer.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the number of elements in the buffer is 0.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl<T, P> Default for GenlBuffer<T, P> {
fn default() -> Self {
Self::new()
}
}
/// A buffer of rtnetlink attributes.
#[derive(Debug, FromBytesWithInput, ToBytes)]
#[neli(from_bytes_bound = "T: RtaType")]
#[neli(from_bytes_bound = "P: FromBytesWithInput<Input = usize>")]
pub struct RtBuffer<T, P>(#[neli(input)] Vec<Rtattr<T, P>>);
impl<T, P> neli::Size for RtBuffer<T, P>
where
T: Size,
P: Size,
{
fn unpadded_size(&self) -> usize {
self.0.iter().map(|attr| attr.padded_size()).sum()
}
}
impl<T> RtBuffer<T, Buffer> {
/// Get a data structure with an immutable reference to the
/// underlying [`Rtattr`]s.
pub fn get_attr_handle(&self) -> AttrHandle<Self, Rtattr<T, Buffer>> {
AttrHandle::new_borrowed(self.0.as_ref())
}
/// Get a data structure with a mutable reference to the
/// underlying [`Rtattr`]s.
pub fn get_attr_handle_mut(&mut self) -> AttrHandleMut<Self, Rtattr<T, Buffer>> {
AttrHandleMut::new_borrowed(self.0.as_mut())
}
}
impl<T, P> FromIterator<Rtattr<T, P>> for RtBuffer<T, P> {
fn from_iter<I>(i: I) -> Self
where
I: IntoIterator<Item = Rtattr<T, P>>,
{
RtBuffer(Vec::from_iter(i))
}
}
impl<T, P> IntoIterator for RtBuffer<T, P> {
type Item = Rtattr<T, P>;
type IntoIter = <Vec<Rtattr<T, P>> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<T, P> AsRef<[Rtattr<T, P>]> for RtBuffer<T, P> {
fn as_ref(&self) -> &[Rtattr<T, P>] {
self.0.as_slice()
}
}
impl<T, P> AsMut<[Rtattr<T, P>]> for RtBuffer<T, P> {
fn as_mut(&mut self) -> &mut [Rtattr<T, P>] {
self.0.as_mut_slice()
}
}
impl<T, P> RtBuffer<T, P> {
/// Create a new buffer of routing netlink attributes.
pub fn new() -> Self {
RtBuffer(Vec::new())
}
/// Add a new routing netlink attribute to the end of the buffer.
pub fn push(&mut self, attr: Rtattr<T, P>) {
self.0.push(attr)
}
/// Get a routing netlink attribute from the end of the buffer.
pub fn pop(&mut self) -> Option<Rtattr<T, P>> {
self.0.pop()
}
/// Return an iterator over immutable references to the elements
/// in the buffer.
pub fn iter(&self) -> Iter<'_, Rtattr<T, P>> {
self.0.iter()
}
/// Return an iterator over mutable references to the elements
/// in the buffer.
pub fn iter_mut(&mut self) -> IterMut<'_, Rtattr<T, P>> {
self.0.iter_mut()
}
/// Returns the number of elements in the buffer.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the number of elements in the buffer is 0.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl<T, P> Default for RtBuffer<T, P> {
fn default() -> Self {
Self::new()
}
}
/// A buffer of flag constants.
// FIXME: Fix the debug implementation for flags to actually display which flags
// have been set.
#[derive(Debug, PartialEq, Size, ToBytes, FromBytes)]
#[neli(from_bytes_bound = "B: FromBytes + TypeSize + Debug")]
pub struct FlagBuffer<B, T>(B, PhantomData<T>);
impl<'a, B, T> From<&'a [T]> for FlagBuffer<B, T>
where
B: Default + BitOr<B, Output = B> + From<&'a T>,
{
fn from(slice: &'a [T]) -> Self {
FlagBuffer(
slice
.iter()
.fold(B::default(), |inner, flag| inner | B::from(flag)),
PhantomData,
)
}
}
impl<B, T> TypeSize for FlagBuffer<B, T>
where
B: TypeSize,
{
fn type_size() -> usize {
B::type_size()
}
}
impl<'a, B, T> FlagBuffer<B, T>
where
B: Default
+ BitAnd<B, Output = B>
+ BitAndAssign<B>
+ BitOr<B, Output = B>
+ BitOrAssign<B>
+ Not<Output = B>
+ From<&'a T>
+ PartialEq
+ Copy,
T: 'a,
{
/// Check whether the set of flags is empty.
pub fn empty() -> Self {
FlagBuffer(B::default(), PhantomData)
}
/// Check whether the set of flags contains the given flag.
pub fn contains(&self, elem: &'a T) -> bool {
(self.0 & elem.into()) == elem.into()
}
/// Add a flag to the set of flags.
pub fn set(&mut self, flag: &'a T) {
self.0 |= B::from(flag)
}
/// Remove a flag from the set of flags.
pub fn unset(&mut self, flag: &'a T) {
self.0 &= !B::from(flag)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::consts::{genl::Index, rtnl::Ifa};
#[test]
fn test_genlbuffer_align() {
assert_eq!(
vec![
Nlattr::new(false, false, Index::from(0), 0u8,).unwrap(),
Nlattr::new(false, false, Index::from(1), 1u8,).unwrap(),
Nlattr::new(false, false, Index::from(2), 2u8,).unwrap(),
]
.into_iter()
.collect::<GenlBuffer<Index, Buffer>>()
.unpadded_size(),
24
)
}
#[test]
fn test_rtbuffer_align() {
assert_eq!(
vec![
Rtattr::new(None, Ifa::Unspec, 0u8,).unwrap(),
Rtattr::new(None, Ifa::Address, 1u8,).unwrap(),
Rtattr::new(None, Ifa::Local, 2u8,).unwrap(),
]
.into_iter()
.collect::<RtBuffer<Ifa, Buffer>>()
.unpadded_size(),
24
)
}
}
| 25.52521 | 85 | 0.572099 |
905947fed52ec3c042b9297f311298b5b02dc77d
| 487 |
//
// Copyright 2020 bplist Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
mod de;
mod document;
mod error;
pub mod object;
pub use object::Object;
pub use de::{from_slice, Deserializer};
pub use error::{Error, Result};
| 25.631579 | 77 | 0.728953 |
18909ac0b69b3df69b78556e06a4e038ab35581b
| 13,745 |
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::fmt::{self, Display, Formatter};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::{future, Async, Future, Poll, Stream};
use futures_cpupool::{Builder as CpuPoolBuilder, CpuPool};
use grpcio::{
ChannelBuilder, ClientStreamingSink, Environment, RequestStream, RpcStatus, RpcStatusCode,
WriteFlags,
};
use kvproto::raft_serverpb::RaftMessage;
use kvproto::raft_serverpb::{Done, SnapshotChunk};
use kvproto::tikvpb_grpc::TikvClient;
use crate::raftstore::store::{SnapEntry, SnapKey, SnapManager, Snapshot};
use tikv_util::security::SecurityManager;
use tikv_util::worker::Runnable;
use tikv_util::DeferContext;
use super::metrics::*;
use super::transport::RaftStoreRouter;
use super::{Config, Error, Result};
pub type Callback = Box<dyn FnOnce(Result<()>) + Send>;
const DEFAULT_POOL_SIZE: usize = 4;
/// A task for either receiving Snapshot or sending Snapshot
pub enum Task {
Recv {
stream: RequestStream<SnapshotChunk>,
sink: ClientStreamingSink<Done>,
},
Send {
addr: String,
msg: RaftMessage,
cb: Callback,
},
}
impl Display for Task {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match *self {
Task::Recv { .. } => write!(f, "Recv"),
Task::Send {
ref addr, ref msg, ..
} => write!(f, "Send Snap[to: {}, snap: {:?}]", addr, msg),
}
}
}
struct SnapChunk {
first: Option<SnapshotChunk>,
snap: Box<dyn Snapshot>,
remain_bytes: usize,
}
const SNAP_CHUNK_LEN: usize = 1024 * 1024;
impl Stream for SnapChunk {
type Item = (SnapshotChunk, WriteFlags);
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Error> {
if let Some(t) = self.first.take() {
let write_flags = WriteFlags::default().buffer_hint(true);
return Ok(Async::Ready(Some((t, write_flags))));
}
let mut buf = match self.remain_bytes {
0 => return Ok(Async::Ready(None)),
n if n > SNAP_CHUNK_LEN => vec![0; SNAP_CHUNK_LEN],
n => vec![0; n],
};
let result = self.snap.read_exact(buf.as_mut_slice());
match result {
Ok(_) => {
self.remain_bytes -= buf.len();
let mut chunk = SnapshotChunk::new();
chunk.set_data(buf);
Ok(Async::Ready(Some((
chunk,
WriteFlags::default().buffer_hint(true),
))))
}
Err(e) => Err(box_err!("failed to read snapshot chunk: {}", e)),
}
}
}
struct SendStat {
key: SnapKey,
total_size: u64,
elapsed: Duration,
}
/// Send the snapshot to specified address.
///
/// It will first send the normal raft snapshot message and then send the snapshot file.
fn send_snap(
env: Arc<Environment>,
mgr: SnapManager,
security_mgr: Arc<SecurityManager>,
cfg: &Config,
addr: &str,
msg: RaftMessage,
) -> Result<impl Future<Item = SendStat, Error = Error>> {
assert!(msg.get_message().has_snapshot());
let timer = Instant::now();
let send_timer = SEND_SNAP_HISTOGRAM.start_coarse_timer();
let key = {
let snap = msg.get_message().get_snapshot();
SnapKey::from_snap(snap)?
};
mgr.register(key.clone(), SnapEntry::Sending);
let deregister = {
let (mgr, key) = (mgr.clone(), key.clone());
DeferContext::new(move || mgr.deregister(&key, &SnapEntry::Sending))
};
let s = box_try!(mgr.get_snapshot_for_sending(&key));
if !s.exists() {
return Err(box_err!("missing snap file: {:?}", s.path()));
}
let total_size = s.total_size()?;
let chunks = {
let mut first_chunk = SnapshotChunk::new();
first_chunk.set_message(msg);
SnapChunk {
first: Some(first_chunk),
snap: s,
remain_bytes: total_size as usize,
}
};
let cb = ChannelBuilder::new(env)
.stream_initial_window_size(cfg.grpc_stream_initial_window_size.0 as i32)
.keepalive_time(cfg.grpc_keepalive_time.0)
.keepalive_timeout(cfg.grpc_keepalive_timeout.0)
.default_compression_algorithm(cfg.grpc_compression_algorithm());
let channel = security_mgr.connect(cb, addr);
let client = TikvClient::new(channel);
let (sink, receiver) = client.snapshot()?;
let send = chunks.forward(sink).map_err(Error::from);
let send = send
.and_then(|(s, _)| receiver.map_err(Error::from).map(|_| s))
.then(move |result| {
send_timer.observe_duration();
drop(deregister);
drop(client);
result.map(|s| {
fail_point!("snapshot_delete_after_send");
s.snap.delete();
// TODO: improve it after rustc resolves the bug.
// Call `info` in the closure directly will cause rustc
// panic with `Cannot create local mono-item for DefId`.
SendStat {
key,
total_size,
elapsed: timer.elapsed(),
}
})
});
Ok(send)
}
struct RecvSnapContext {
key: SnapKey,
file: Option<Box<dyn Snapshot>>,
raft_msg: RaftMessage,
}
impl RecvSnapContext {
fn new(head_chunk: Option<SnapshotChunk>, snap_mgr: &SnapManager) -> Result<Self> {
// head_chunk is None means the stream is empty.
let mut head = head_chunk.ok_or_else(|| Error::Other("empty gRPC stream".into()))?;
if !head.has_message() {
return Err(box_err!("no raft message in the first chunk"));
}
let meta = head.take_message();
let key = match SnapKey::from_snap(meta.get_message().get_snapshot()) {
Ok(k) => k,
Err(e) => return Err(box_err!("failed to create snap key: {:?}", e)),
};
let snap = {
let data = meta.get_message().get_snapshot().get_data();
let s = match snap_mgr.get_snapshot_for_receiving(&key, data) {
Ok(s) => s,
Err(e) => return Err(box_err!("{} failed to create snapshot file: {:?}", key, e)),
};
if s.exists() {
let p = s.path();
info!("snapshot file already exists, skip receiving"; "snap_key" => %key, "file" => p);
None
} else {
Some(s)
}
};
Ok(RecvSnapContext {
key,
file: snap,
raft_msg: meta,
})
}
fn finish<R: RaftStoreRouter>(self, raft_router: R) -> Result<()> {
let key = self.key;
if let Some(mut file) = self.file {
info!("saving snapshot file"; "snap_key" => %key, "file" => file.path());
if let Err(e) = file.save() {
let path = file.path();
let e = box_err!("{} failed to save snapshot file {}: {:?}", key, path, e);
return Err(e);
}
}
if let Err(e) = raft_router.send_raft_msg(self.raft_msg) {
return Err(box_err!("{} failed to send snapshot to raft: {}", key, e));
}
Ok(())
}
}
fn recv_snap<R: RaftStoreRouter + 'static>(
stream: RequestStream<SnapshotChunk>,
sink: ClientStreamingSink<Done>,
snap_mgr: SnapManager,
raft_router: R,
) -> impl Future<Item = (), Error = Error> {
let stream = stream.map_err(Error::from);
let f = stream.into_future().map_err(|(e, _)| e).and_then(
move |(head, chunks)| -> Box<dyn Future<Item = (), Error = Error> + Send> {
let context = match RecvSnapContext::new(head, &snap_mgr) {
Ok(context) => context,
Err(e) => return Box::new(future::err(e)),
};
if context.file.is_none() {
return Box::new(future::result(context.finish(raft_router)));
}
let context_key = context.key.clone();
snap_mgr.register(context.key.clone(), SnapEntry::Receiving);
let recv_chunks = chunks.fold(context, |mut context, mut chunk| -> Result<_> {
let data = chunk.take_data();
if data.is_empty() {
return Err(box_err!("{} receive chunk with empty data", context.key));
}
if let Err(e) = context.file.as_mut().unwrap().write_all(&data) {
let key = &context.key;
let path = context.file.as_mut().unwrap().path();
let e = box_err!("{} failed to write snapshot file {}: {}", key, path, e);
return Err(e);
}
Ok(context)
});
Box::new(
recv_chunks
.and_then(move |context| context.finish(raft_router))
.then(move |r| {
snap_mgr.deregister(&context_key, &SnapEntry::Receiving);
r
}),
)
},
);
f.then(move |res| match res {
Ok(()) => sink.success(Done::new()),
Err(e) => {
let status = RpcStatus::new(RpcStatusCode::Unknown, Some(format!("{:?}", e)));
sink.fail(status)
}
})
.map_err(Error::from)
}
pub struct Runner<R: RaftStoreRouter + 'static> {
env: Arc<Environment>,
snap_mgr: SnapManager,
pool: CpuPool,
raft_router: R,
security_mgr: Arc<SecurityManager>,
cfg: Arc<Config>,
sending_count: Arc<AtomicUsize>,
recving_count: Arc<AtomicUsize>,
}
impl<R: RaftStoreRouter + 'static> Runner<R> {
pub fn new(
env: Arc<Environment>,
snap_mgr: SnapManager,
r: R,
security_mgr: Arc<SecurityManager>,
cfg: Arc<Config>,
) -> Runner<R> {
Runner {
env,
snap_mgr,
pool: CpuPoolBuilder::new()
.name_prefix(thd_name!("snap-sender"))
.pool_size(DEFAULT_POOL_SIZE)
.create(),
raft_router: r,
security_mgr,
cfg,
sending_count: Arc::new(AtomicUsize::new(0)),
recving_count: Arc::new(AtomicUsize::new(0)),
}
}
}
impl<R: RaftStoreRouter + 'static> Runnable<Task> for Runner<R> {
fn run(&mut self, task: Task) {
match task {
Task::Recv { stream, sink } => {
if self.recving_count.load(Ordering::SeqCst) >= self.cfg.concurrent_recv_snap_limit
{
warn!("too many recving snapshot tasks, ignore");
let status = RpcStatus::new(RpcStatusCode::ResourceExhausted, None);
self.pool.spawn(sink.fail(status)).forget();
return;
}
SNAP_TASK_COUNTER.with_label_values(&["recv"]).inc();
let snap_mgr = self.snap_mgr.clone();
let raft_router = self.raft_router.clone();
let recving_count = Arc::clone(&self.recving_count);
recving_count.fetch_add(1, Ordering::SeqCst);
let f = recv_snap(stream, sink, snap_mgr, raft_router).then(move |result| {
recving_count.fetch_sub(1, Ordering::SeqCst);
if let Err(e) = result {
error!("failed to recv snapshot"; "err" => %e);
}
future::ok::<_, ()>(())
});
self.pool.spawn(f).forget();
}
Task::Send { addr, msg, cb } => {
if self.sending_count.load(Ordering::SeqCst) >= self.cfg.concurrent_send_snap_limit
{
warn!(
"too many sending snapshot tasks, drop Send Snap[to: {}, snap: {:?}]",
addr, msg
);
cb(Err(Error::Other("Too many sending snapshot tasks".into())));
return;
}
SNAP_TASK_COUNTER.with_label_values(&["send"]).inc();
let env = Arc::clone(&self.env);
let mgr = self.snap_mgr.clone();
let security_mgr = Arc::clone(&self.security_mgr);
let sending_count = Arc::clone(&self.sending_count);
sending_count.fetch_add(1, Ordering::SeqCst);
let f = future::result(send_snap(env, mgr, security_mgr, &self.cfg, &addr, msg))
.flatten()
.then(move |res| {
match res {
Ok(stat) => {
info!(
"sent snapshot";
"region_id" => stat.key.region_id,
"snap_key" => %stat.key,
"size" => stat.total_size,
"duration" => ?stat.elapsed
);
cb(Ok(()));
}
Err(e) => {
error!("failed to send snap"; "to_addr" => addr, "err" => ?e);
cb(Err(e));
}
};
sending_count.fetch_sub(1, Ordering::SeqCst);
future::ok::<_, ()>(())
});
self.pool.spawn(f).forget();
}
}
}
}
| 34.3625 | 103 | 0.513423 |
f7abc3320b890297d71c3e738739a10194cd77fc
| 793 |
use prelude::read_i64;
/// By considering the terms in the Fibonacci sequence whose
/// values do not exceed MAX, find the sum of the even-valued terms.
struct Fib {
curr: i64,
next: i64,
}
impl Fib {
pub fn new() -> Fib {
Fib {
curr: 1,
next: 1,
}
}
pub fn even_sum_till(&mut self, max: i64) -> i64 {
self.filter(|x| x % 2 == 0).take_while(|&x| x <= max).sum()
}
}
impl Iterator for Fib {
type Item = i64;
fn next(&mut self) -> Option<i64> {
let next = self.curr + self.next;
self.curr = self.next;
self.next = next;
Some(self.curr)
}
}
fn main() {
let num = read_i64();
for _ in 0..num {
println!("{}", Fib::new().even_sum_till(read_i64()));
}
}
| 18.44186 | 68 | 0.522068 |
ac6427815f63752d469b5cee4db01f75d971e221
| 8,176 |
/**
* MIT License
*
* termusic - Copyright (c) 2021 Larry Hao
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use super::{
MailEntryNewOrRead, TermailActivity, COMPONENT_TABLE_MAILLIST, COMPONENT_TEXTAREA_MAIL,
COMPONENT_TREEVIEW_MAILBOXES,
};
// use std::path::Path;
// use tui_realm_treeview::{Node, Tree};
// use tuirealm::{Payload, PropPayload, PropValue, PropsBuilder, Value};
use anyhow::{anyhow, Result};
use chrono::prelude::DateTime;
use chrono::Local;
use maildir::Maildir;
use mailparse::{MailHeaderMap, ParsedMail};
// use std::io::Write;
use std::thread;
use std::time::{Duration, UNIX_EPOCH};
use tui_realm_stdlib::TablePropsBuilder;
use tui_realm_stdlib::TextareaPropsBuilder;
use tui_realm_treeview::TreeViewPropsBuilder;
use tuirealm::props::{TableBuilder, TextSpan};
use tuirealm::tui::style::Color;
use tuirealm::PropsBuilder;
impl TermailActivity {
pub fn load_mailbox(&mut self, node_id: &str) {
let mail_dir = Maildir::from(node_id);
self.current_maildir = mail_dir;
let mail_dir = Maildir::from(node_id);
let mut mail_items = Vec::new();
let tx = self.sender_mail_items.clone();
thread::spawn(move || {
let mail_new_entries = mail_dir.list_new();
let mail_cur_entries = mail_dir.list_cur();
// Add new items
for record in mail_new_entries {
if record.is_err() {
continue;
}
let mut record = record.unwrap();
mail_items.push(MailEntryNewOrRead {
date: record.date().unwrap_or(0),
item: record,
new: true,
});
}
// Add read items
for record in mail_cur_entries {
if record.is_err() {
continue;
}
let mut record = record.unwrap();
mail_items.push(MailEntryNewOrRead {
date: record.date().unwrap_or(0),
item: record,
new: false,
});
}
mail_items.sort_by(|b, a| a.date.cmp(&b.date));
mail_items.sort_by(|b, a| a.new.cmp(&b.new));
tx.send(mail_items).ok();
});
}
pub fn sync_maillist(&mut self) {
let mut table: TableBuilder = TableBuilder::default();
// Add new items
for (idx, record) in self.mail_items.iter_mut().enumerate() {
if idx > 0 {
table.add_row();
}
let date = record.date;
let header = record.item.headers().unwrap();
let sender = header
.get_first_value("From")
.unwrap_or_else(|| "No Sender".to_string());
let subject = header
.get_first_value("Subject")
.unwrap_or_else(|| "No Subject".to_string());
// Creates a new SystemTime from the specified number of whole seconds
let date_u64 = if date.is_negative() {
0
} else {
date.unsigned_abs()
};
// let result = if a > b { a } else { b };
let d = UNIX_EPOCH + Duration::from_secs(date_u64);
// Create DateTime from SystemTime
let datetime = DateTime::<Local>::from(d);
// Formats the combined date and time with the specified format string.
let timestamp_str = datetime.format("%y-%m-%d %H:%M").to_string();
table
.add_col(TextSpan::new(idx.to_string()))
.add_col(TextSpan::new(timestamp_str).fg(Color::LightYellow));
if record.new {
table
.add_col(TextSpan::new(sender).bold().fg(Color::Green))
.add_col(TextSpan::new(subject).bold().fg(Color::Green));
} else {
table
.add_col(TextSpan::new(sender))
.add_col(TextSpan::new(subject));
}
}
let table = table.build();
if let Some(props) = self.view.get_props(COMPONENT_TABLE_MAILLIST) {
let props = TablePropsBuilder::from(props).with_table(table).build();
let msg = self.view.update(COMPONENT_TABLE_MAILLIST, props);
self.update(&msg);
self.view.active(COMPONENT_TABLE_MAILLIST);
}
}
pub fn load_mail(&mut self, index: usize) -> Result<()> {
let mail_item = self
.mail_items
.get_mut(index)
.ok_or_else(|| anyhow!("error get mail_item"))?;
let parsed_mail = mail_item.item.parsed()?;
let content = Self::get_body_recursive(&parsed_mail)?;
let mut vec_lines: Vec<TextSpan> = vec![];
for line in content.split('\n') {
let trimed = line.trim();
if !trimed.is_empty() {
vec_lines.push(TextSpan::from(trimed));
}
}
// let mut file = std::fs::File::create("data.txt").expect("create failed");
// file.write_all(&parsed_mail.get_body_raw().unwrap())
// .expect("write failed");
// update mail text area
let props = self
.view
.get_props(COMPONENT_TEXTAREA_MAIL)
.ok_or_else(|| anyhow!("error get props"))?;
let props = TextareaPropsBuilder::from(props)
// .with_texts(vec![TextSpan::new(body)])
.with_texts(vec_lines)
.build();
self.view.update(COMPONENT_TEXTAREA_MAIL, props);
if mail_item.new {
// update mail list
self.current_maildir.move_new_to_cur(mail_item.item.id())?;
mail_item.new = false;
self.sync_maillist();
// update mail box tree view
let path = self.path.clone();
self.scan_dir(&path);
if let Some(props) = self.view.get_props(COMPONENT_TREEVIEW_MAILBOXES) {
let props = TreeViewPropsBuilder::from(props)
.with_tree_and_depth(self.tree.root(), 2)
.build();
self.view.update(COMPONENT_TREEVIEW_MAILBOXES, props);
}
}
Ok(())
}
fn get_body_recursive(mail: &ParsedMail) -> Result<String> {
let mut content = String::new();
let parts_quantity = mail.subparts.len();
if parts_quantity == 0 {
if mail.ctype.mimetype.starts_with("text/plain") {
content = mail.get_body()?;
} else if mail.ctype.mimetype.starts_with("text/html") {
let frag = scraper::Html::parse_fragment(&mail.get_body()?);
for node in frag.tree {
if let scraper::node::Node::Text(text) = node {
content.push_str(&text.text);
}
}
}
} else {
for i in 0..parts_quantity - 1 {
content.push_str(&Self::get_body_recursive(&mail.subparts[i])?);
}
}
Ok(content)
}
}
| 38.384977 | 91 | 0.566292 |
e5e79139b2ef29f0f24b8cd6c9c31cce14d270b3
| 1,266 |
use crate::config_reader::I3Binding;
use prettytable::Table;
use std::collections::HashMap;
pub fn build_table_from_bindings(mut bindings_map: HashMap<String, Vec<I3Binding>>) -> Table {
// draw table
let mut main_table = Table::new();
main_table.set_titles(row!["Category", "Actual Binding", "Binding mode"]);
let mut sorted_vec: Vec<_> = bindings_map.iter_mut().collect();
sorted_vec.sort_by(|a, b| a.0.cmp(b.0));
for (category, bindings_for_category) in sorted_vec.iter() {
if bindings_for_category.is_empty() {
continue;
}
let mut category_table = bindings_for_category
.iter()
.fold(Table::new(), |mut acc, e| {
acc.add_row(row![e.binding_type, e.binding, e.command]);
acc
});
let mut type_table = bindings_for_category
.iter()
.fold(Table::new(), |mut acc, e| {
acc.add_row(row![e.runtype]);
acc
});
category_table.set_format(*prettytable::format::consts::FORMAT_CLEAN);
type_table.set_format(*prettytable::format::consts::FORMAT_CLEAN);
main_table.add_row(row![category, category_table, type_table]);
}
main_table
}
| 32.461538 | 94 | 0.604265 |
db3dd1f751a6fb637c41e1b61cbbb45fd02aedae
| 9,910 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
pub mod pricings {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<PricingList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Security/pricings",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PricingList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
pricing_name: &str,
) -> std::result::Result<Pricing, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Security/pricings/{}",
operation_config.base_path(),
subscription_id,
pricing_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Pricing =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
pricing_name: &str,
pricing: &Pricing,
) -> std::result::Result<Pricing, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Security/pricings/{}",
operation_config.base_path(),
subscription_id,
pricing_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(pricing).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Pricing =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 47.190476 | 138 | 0.57558 |
e601cb2060ae26b3e5e6bd664a7b13e00a8f71f0
| 57,331 |
use crate::common;
use crate::errors::*;
use crate::help::*;
use crate::self_update;
use crate::term2;
use crate::term2::Terminal;
use crate::topical_doc;
use clap::{App, AppSettings, Arg, ArgGroup, ArgMatches, Shell, SubCommand};
use rustup::dist::dist::{PartialTargetTriple, PartialToolchainDesc, Profile, TargetTriple};
use rustup::dist::manifest::Component;
use rustup::toolchain::{CustomToolchain, DistributableToolchain};
use rustup::utils::utils::{self, ExitCode};
use rustup::Notification;
use rustup::{command, Cfg, ComponentStatus, Toolchain};
use std::error::Error;
use std::fmt;
use std::io::Write;
use std::iter;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use std::str::FromStr;
fn handle_epipe(res: Result<()>) -> Result<()> {
match res {
Err(Error(ErrorKind::Io(ref err), _)) if err.kind() == std::io::ErrorKind::BrokenPipe => {
Ok(())
}
res => res,
}
}
fn deprecated<F, B>(instead: &str, cfg: &mut Cfg, matches: B, callee: F) -> Result<()>
where
F: FnOnce(&mut Cfg, B) -> Result<()>,
{
(cfg.notify_handler)(Notification::PlainVerboseMessage(
"Use of (currently) unmaintained command line interface.",
));
(cfg.notify_handler)(Notification::PlainVerboseMessage(
"The exact API of this command may change without warning",
));
(cfg.notify_handler)(Notification::PlainVerboseMessage(
"Eventually this command will be a true alias. Until then:",
));
(cfg.notify_handler)(Notification::PlainVerboseMessage(&format!(
" Please use `rustup {}` instead",
instead
)));
callee(cfg, matches)
}
pub fn main() -> Result<()> {
crate::self_update::cleanup_self_updater()?;
let matches = cli().get_matches();
let verbose = matches.is_present("verbose");
let quiet = matches.is_present("quiet");
let cfg = &mut common::set_globals(verbose, quiet)?;
if let Some(t) = matches.value_of("+toolchain") {
cfg.set_toolchain_override(&t[1..]);
}
if maybe_upgrade_data(cfg, &matches)? {
return Ok(());
}
cfg.check_metadata_version()?;
match matches.subcommand() {
("dump-testament", _) => common::dump_testament(),
("show", Some(c)) => match c.subcommand() {
("active-toolchain", Some(_)) => handle_epipe(show_active_toolchain(cfg))?,
("home", Some(_)) => handle_epipe(show_rustup_home(cfg))?,
("profile", Some(_)) => handle_epipe(show_profile(cfg))?,
("keys", Some(_)) => handle_epipe(show_keys(cfg))?,
(_, _) => handle_epipe(show(cfg))?,
},
("install", Some(m)) => deprecated("toolchain install", cfg, m, update)?,
("update", Some(m)) => update(cfg, m)?,
("check", Some(_)) => check_updates(cfg)?,
("uninstall", Some(m)) => deprecated("toolchain uninstall", cfg, m, toolchain_remove)?,
("default", Some(m)) => default_(cfg, m)?,
("toolchain", Some(c)) => match c.subcommand() {
("install", Some(m)) => update(cfg, m)?,
("list", Some(m)) => handle_epipe(toolchain_list(cfg, m))?,
("link", Some(m)) => toolchain_link(cfg, m)?,
("uninstall", Some(m)) => toolchain_remove(cfg, m)?,
(_, _) => unreachable!(),
},
("target", Some(c)) => match c.subcommand() {
("list", Some(m)) => handle_epipe(target_list(cfg, m))?,
("add", Some(m)) => target_add(cfg, m)?,
("remove", Some(m)) => target_remove(cfg, m)?,
(_, _) => unreachable!(),
},
("component", Some(c)) => match c.subcommand() {
("list", Some(m)) => handle_epipe(component_list(cfg, m))?,
("add", Some(m)) => component_add(cfg, m)?,
("remove", Some(m)) => component_remove(cfg, m)?,
(_, _) => unreachable!(),
},
("override", Some(c)) => match c.subcommand() {
("list", Some(_)) => handle_epipe(common::list_overrides(cfg))?,
("set", Some(m)) => override_add(cfg, m)?,
("unset", Some(m)) => override_remove(cfg, m)?,
(_, _) => unreachable!(),
},
("run", Some(m)) => run(cfg, m)?,
("which", Some(m)) => which(cfg, m)?,
("doc", Some(m)) => doc(cfg, m)?,
("man", Some(m)) => man(cfg, m)?,
("self", Some(c)) => match c.subcommand() {
("update", Some(_)) => self_update::update(cfg)?,
("uninstall", Some(m)) => self_uninstall(m)?,
(_, _) => unreachable!(),
},
("set", Some(c)) => match c.subcommand() {
("default-host", Some(m)) => set_default_host_triple(cfg, m)?,
("profile", Some(m)) => set_profile(cfg, m)?,
(_, _) => unreachable!(),
},
("completions", Some(c)) => {
if let Some(shell) = c.value_of("shell") {
output_completion_script(
shell.parse::<Shell>().unwrap(),
c.value_of("command")
.and_then(|cmd| cmd.parse::<CompletionCommand>().ok())
.unwrap_or(CompletionCommand::Rustup),
)?;
}
}
(_, _) => unreachable!(),
}
Ok(())
}
pub fn cli() -> App<'static, 'static> {
let mut app = App::new("rustup")
.version(common::version())
.about("The Rust toolchain installer")
.after_help(RUSTUP_HELP)
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("verbose")
.help("Enable verbose output")
.short("v")
.long("verbose"),
)
.arg(
Arg::with_name("quiet")
.conflicts_with("verbose")
.help("Disable progress output")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("+toolchain")
.help("release channel (e.g. +stable) or custom toolchain to set override")
.validator(|s| {
if s.starts_with('+') {
Ok(())
} else {
Err("Toolchain overrides must begin with '+'".into())
}
}),
)
.subcommand(
SubCommand::with_name("dump-testament")
.about("Dump information about the build")
.setting(AppSettings::Hidden), // Not for users, only CI
)
.subcommand(
SubCommand::with_name("show")
.about("Show the active and installed toolchains or profiles")
.after_help(SHOW_HELP)
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.subcommand(
SubCommand::with_name("active-toolchain")
.about("Show the active toolchain")
.after_help(SHOW_ACTIVE_TOOLCHAIN_HELP),
)
.subcommand(
SubCommand::with_name("home")
.about("Display the computed value of RUSTUP_HOME"),
)
.subcommand(SubCommand::with_name("profile").about("Show the current profile"))
.subcommand(SubCommand::with_name("keys").about("Display the known PGP keys")),
)
.subcommand(
SubCommand::with_name("install")
.about("Update Rust toolchains")
.after_help(INSTALL_HELP)
.setting(AppSettings::Hidden) // synonym for 'toolchain install'
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(true)
.multiple(true),
)
.arg(
Arg::with_name("profile")
.long("profile")
.takes_value(true)
.possible_values(Profile::names())
.required(false),
)
.arg(
Arg::with_name("no-self-update")
.help("Don't perform self-update when running the `rustup install` command")
.long("no-self-update")
.takes_value(false),
)
.arg(
Arg::with_name("force")
.help("Force an update, even if some components are missing")
.long("force")
.takes_value(false),
),
)
.subcommand(
SubCommand::with_name("uninstall")
.about("Uninstall Rust toolchains")
.setting(AppSettings::Hidden) // synonym for 'toolchain uninstall'
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(true)
.multiple(true),
),
)
.subcommand(
SubCommand::with_name("update")
.about("Update Rust toolchains and rustup")
.after_help(UPDATE_HELP)
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(false)
.multiple(true),
)
.arg(
Arg::with_name("no-self-update")
.help("Don't perform self update when running the `rustup update` command")
.long("no-self-update")
.takes_value(false),
)
.arg(
Arg::with_name("force")
.help("Force an update, even if some components are missing")
.long("force")
.takes_value(false),
),
)
.subcommand(SubCommand::with_name("check").about("Check for updates to Rust toolchains"))
.subcommand(
SubCommand::with_name("default")
.about("Set the default toolchain")
.after_help(DEFAULT_HELP)
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(false),
),
)
.subcommand(
SubCommand::with_name("toolchain")
.about("Modify or query the installed toolchains")
.after_help(TOOLCHAIN_HELP)
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("list")
.about("List installed toolchains")
.arg(
Arg::with_name("verbose")
.help("Enable verbose output with toolchain information")
.takes_value(false)
.short("v")
.long("verbose"),
),
)
.subcommand(
SubCommand::with_name("install")
.about("Install or update a given toolchain")
.aliases(&["update", "add"])
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(true)
.multiple(true),
)
.arg(
Arg::with_name("profile")
.long("profile")
.takes_value(true)
.possible_values(Profile::names())
.required(false),
)
.arg(
Arg::with_name("no-self-update")
.help(
"Don't perform self update when running the\
`rustup toolchain install` command",
)
.long("no-self-update")
.takes_value(false),
)
.arg(
Arg::with_name("components")
.help("Add specific components on installation")
.long("component")
.short("c")
.takes_value(true)
.multiple(true)
.use_delimiter(true),
)
.arg(
Arg::with_name("targets")
.help("Add specific targets on installation")
.long("target")
.short("t")
.takes_value(true)
.multiple(true)
.use_delimiter(true),
)
.arg(
Arg::with_name("force")
.help("Force an update, even if some components are missing")
.long("force")
.takes_value(false),
)
.arg(
Arg::with_name("allow-downgrade")
.help("Allow rustup to downgrade the toolchain to satisfy your component choice")
.long("allow-downgrade")
.takes_value(false),
),
)
.subcommand(
SubCommand::with_name("uninstall")
.about("Uninstall a toolchain")
.alias("remove")
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(true)
.multiple(true),
),
)
.subcommand(
SubCommand::with_name("link")
.about("Create a custom toolchain by symlinking to a directory")
.after_help(TOOLCHAIN_LINK_HELP)
.arg(
Arg::with_name("toolchain")
.help("Custom toolchain name")
.required(true),
)
.arg(
Arg::with_name("path")
.help("Path to the directory")
.required(true),
),
),
)
.subcommand(
SubCommand::with_name("target")
.about("Modify a toolchain's supported targets")
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("list")
.about("List installed and available targets")
.arg(
Arg::with_name("installed")
.long("--installed")
.help("List only installed targets"),
)
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
),
)
.subcommand(
SubCommand::with_name("add")
.about("Add a target to a Rust toolchain")
.alias("install")
.arg(Arg::with_name("target").required(true).multiple(true).help(
"List of targets to install; \
\"all\" installs all available targets",
))
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
),
)
.subcommand(
SubCommand::with_name("remove")
.about("Remove a target from a Rust toolchain")
.alias("uninstall")
.arg(Arg::with_name("target").required(true).multiple(true))
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
),
),
)
.subcommand(
SubCommand::with_name("component")
.about("Modify a toolchain's installed components")
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("list")
.about("List installed and available components")
.arg(
Arg::with_name("installed")
.long("--installed")
.help("List only installed components"),
)
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
),
)
.subcommand(
SubCommand::with_name("add")
.about("Add a component to a Rust toolchain")
.arg(Arg::with_name("component").required(true).multiple(true))
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
)
.arg(Arg::with_name("target").long("target").takes_value(true)),
)
.subcommand(
SubCommand::with_name("remove")
.about("Remove a component from a Rust toolchain")
.arg(Arg::with_name("component").required(true).multiple(true))
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
)
.arg(Arg::with_name("target").long("target").takes_value(true)),
),
)
.subcommand(
SubCommand::with_name("override")
.about("Modify directory toolchain overrides")
.after_help(OVERRIDE_HELP)
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("list").about("List directory toolchain overrides"),
)
.subcommand(
SubCommand::with_name("set")
.about("Set the override toolchain for a directory")
.alias("add")
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(true),
)
.arg(
Arg::with_name("path")
.long("path")
.takes_value(true)
.help("Path to the directory"),
),
)
.subcommand(
SubCommand::with_name("unset")
.about("Remove the override toolchain for a directory")
.after_help(OVERRIDE_UNSET_HELP)
.alias("remove")
.arg(
Arg::with_name("path")
.long("path")
.takes_value(true)
.help("Path to the directory"),
)
.arg(
Arg::with_name("nonexistent")
.long("nonexistent")
.takes_value(false)
.help("Remove override toolchain for all nonexistent directories"),
),
),
)
.subcommand(
SubCommand::with_name("run")
.about("Run a command with an environment configured for a given toolchain")
.after_help(RUN_HELP)
.setting(AppSettings::TrailingVarArg)
.arg(
Arg::with_name("install")
.help("Install the requested toolchain if needed")
.long("install"),
)
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.required(true),
)
.arg(
Arg::with_name("command")
.required(true)
.multiple(true)
.use_delimiter(false),
),
)
.subcommand(
SubCommand::with_name("which")
.about("Display which binary will be run for a given command")
.arg(Arg::with_name("command").required(true))
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
),
)
.subcommand(
SubCommand::with_name("doc")
.alias("docs")
.about("Open the documentation for the current toolchain")
.after_help(DOC_HELP)
.arg(
Arg::with_name("path")
.long("path")
.help("Only print the path to the documentation"),
)
.args(
&DOCS_DATA
.iter()
.map(|(name, help_msg, _)| Arg::with_name(name).long(name).help(help_msg))
.collect::<Vec<_>>(),
)
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
)
.group(
ArgGroup::with_name("page").args(
&DOCS_DATA
.iter()
.map(|(name, _, _)| *name)
.collect::<Vec<_>>(),
),
)
.arg(Arg::with_name("topic").help(TOPIC_ARG_HELP)),
);
if cfg!(not(target_os = "windows")) {
app = app.subcommand(
SubCommand::with_name("man")
.about("View the man page for a given command")
.arg(Arg::with_name("command").required(true))
.arg(
Arg::with_name("toolchain")
.help(TOOLCHAIN_ARG_HELP)
.long("toolchain")
.takes_value(true),
),
);
}
app = app
.subcommand(
SubCommand::with_name("self")
.about("Modify the rustup installation")
.setting(AppSettings::VersionlessSubcommands)
.setting(AppSettings::DeriveDisplayOrder)
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("update").about("Download and install updates to rustup"),
)
.subcommand(
SubCommand::with_name("uninstall")
.about("Uninstall rustup.")
.arg(Arg::with_name("no-prompt").short("y")),
)
.subcommand(
SubCommand::with_name("upgrade-data")
.about("Upgrade the internal data format."),
),
)
.subcommand(
SubCommand::with_name("set")
.about("Alter rustup settings")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("default-host")
.about("The triple used to identify toolchains when not specified")
.arg(Arg::with_name("host_triple").required(true)),
)
.subcommand(
SubCommand::with_name("profile")
.about("The default components installed")
.arg(
Arg::with_name("profile-name")
.required(true)
.possible_values(Profile::names())
.default_value(Profile::default_name()),
),
),
);
// Clap provides no good way to say that help should be printed in all
// cases where an argument without a default is not provided. The following
// creates lists out all the conditions where the "shell" argument are
// provided and give the default of "rustup". This way if "shell" is not
// provided then the help will still be printed.
let completion_defaults = Shell::variants()
.iter()
.map(|&shell| ("shell", Some(shell), "rustup"))
.collect::<Vec<_>>();
app.subcommand(
SubCommand::with_name("completions")
.about("Generate tab-completion scripts for your shell")
.after_help(COMPLETIONS_HELP)
.setting(AppSettings::ArgRequiredElseHelp)
.arg(Arg::with_name("shell").possible_values(&Shell::variants()))
.arg(
Arg::with_name("command")
.possible_values(&CompletionCommand::variants())
.default_value_ifs(&completion_defaults[..]),
),
)
}
fn maybe_upgrade_data(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<bool> {
match m.subcommand() {
("self", Some(c)) => match c.subcommand() {
("upgrade-data", Some(_)) => {
cfg.upgrade_data()?;
Ok(true)
}
_ => Ok(false),
},
_ => Ok(false),
}
}
fn update_bare_triple_check(cfg: &Cfg, name: &str) -> Result<()> {
if let Some(triple) = PartialTargetTriple::new(name) {
warn!("(partial) target triple specified instead of toolchain name");
let installed_toolchains = cfg.list_toolchains()?;
let default = cfg.find_default()?;
let default_name = default.map(|t| t.name().to_string()).unwrap_or_default();
let mut candidates = vec![];
for t in installed_toolchains {
if t == default_name {
continue;
}
if let Ok(desc) = PartialToolchainDesc::from_str(&t) {
fn triple_comp_eq(given: &str, from_desc: Option<&String>) -> bool {
from_desc.map_or(false, |s| *s == *given)
}
let triple_matches = triple
.arch
.as_ref()
.map_or(true, |s| triple_comp_eq(s, desc.target.arch.as_ref()))
&& triple
.os
.as_ref()
.map_or(true, |s| triple_comp_eq(s, desc.target.os.as_ref()))
&& triple
.env
.as_ref()
.map_or(true, |s| triple_comp_eq(s, desc.target.env.as_ref()));
if triple_matches {
candidates.push(t);
}
}
}
match candidates.len() {
0 => err!("no candidate toolchains found"),
1 => println!("\nyou may use the following toolchain: {}\n", candidates[0]),
_ => {
println!("\nyou may use one of the following toolchains:");
for n in &candidates {
println!("{}", n);
}
println!();
}
}
return Err(ErrorKind::ToolchainNotInstalled(name.to_string()).into());
}
Ok(())
}
fn default_bare_triple_check(cfg: &Cfg, name: &str) -> Result<()> {
if let Some(triple) = PartialTargetTriple::new(name) {
warn!("(partial) target triple specified instead of toolchain name");
let default = cfg.find_default()?;
let default_name = default.map(|t| t.name().to_string()).unwrap_or_default();
if let Ok(mut desc) = PartialToolchainDesc::from_str(&default_name) {
desc.target = triple;
let maybe_toolchain = format!("{}", desc);
let toolchain = cfg.get_toolchain(maybe_toolchain.as_ref(), false)?;
if toolchain.name() == default_name {
warn!(
"(partial) triple '{}' resolves to a toolchain that is already default",
name
);
} else {
println!(
"\nyou may use the following toolchain: {}\n",
toolchain.name()
);
}
return Err(ErrorKind::ToolchainNotInstalled(name.to_string()).into());
}
}
Ok(())
}
fn default_(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
if m.is_present("toolchain") {
let toolchain = m.value_of("toolchain").unwrap();
default_bare_triple_check(cfg, toolchain)?;
let toolchain = cfg.get_toolchain(toolchain, false)?;
let status = if !toolchain.is_custom() {
let distributable = DistributableToolchain::new(&toolchain)?;
Some(distributable.install_from_dist_if_not_installed()?)
} else if !toolchain.exists() {
return Err(ErrorKind::ToolchainNotInstalled(toolchain.name().to_string()).into());
} else {
None
};
toolchain.make_default()?;
if let Some(status) = status {
println!();
common::show_channel_update(cfg, toolchain.name(), Ok(status))?;
}
let cwd = utils::current_dir()?;
if let Some((toolchain, reason)) = cfg.find_override(&cwd)? {
info!(
"note that the toolchain '{}' is currently in use ({})",
toolchain.name(),
reason
);
}
} else {
let default_toolchain: Result<String> = cfg
.get_default()?
.ok_or_else(|| "no default toolchain configured".into());
println!("{} (default)", default_toolchain?);
}
Ok(())
}
fn check_updates(cfg: &Cfg) -> Result<()> {
let mut t = term2::stdout();
let channels = cfg.list_channels()?;
for channel in channels {
match channel {
(ref name, Ok(ref toolchain)) => {
let distributable = DistributableToolchain::new(&toolchain)?;
let current_version = distributable.show_version()?;
let dist_version = distributable.show_dist_version()?;
let _ = t.attr(term2::Attr::Bold);
write!(t, "{} - ", name)?;
match (current_version, dist_version) {
(None, None) => {
let _ = t.fg(term2::color::RED);
writeln!(t, "Cannot identify installed or update versions")?;
}
(Some(cv), None) => {
let _ = t.fg(term2::color::GREEN);
write!(t, "Up to date")?;
let _ = t.reset();
writeln!(t, " : {}", cv)?;
}
(Some(cv), Some(dv)) => {
let _ = t.fg(term2::color::YELLOW);
write!(t, "Update available")?;
let _ = t.reset();
writeln!(t, " : {} -> {}", cv, dv)?;
}
(None, Some(dv)) => {
let _ = t.fg(term2::color::YELLOW);
write!(t, "Update available")?;
let _ = t.reset();
writeln!(t, " : (Unknown version) -> {}", dv)?;
}
}
}
(_, Err(err)) => return Err(err.into()),
}
}
Ok(())
}
fn update(cfg: &mut Cfg, m: &ArgMatches<'_>) -> Result<()> {
let self_update = !m.is_present("no-self-update") && !self_update::NEVER_SELF_UPDATE;
if let Some(p) = m.value_of("profile") {
let p = Profile::from_str(p)?;
cfg.set_profile_override(p);
}
let cfg = &cfg;
if cfg.get_profile()? == Profile::Complete {
warn!("{}", common::WARN_COMPLETE_PROFILE);
}
if let Some(names) = m.values_of("toolchain") {
for name in names {
update_bare_triple_check(cfg, name)?;
let toolchain = cfg.get_toolchain(name, false)?;
let status = if !toolchain.is_custom() {
let components: Vec<_> = m
.values_of("components")
.map(|v| v.collect())
.unwrap_or_else(Vec::new);
let targets: Vec<_> = m
.values_of("targets")
.map(|v| v.collect())
.unwrap_or_else(Vec::new);
let distributable = DistributableToolchain::new(&toolchain)?;
Some(distributable.install_from_dist(
m.is_present("force"),
m.is_present("allow-downgrade"),
&components,
&targets,
)?)
} else if !toolchain.exists() {
return Err(ErrorKind::InvalidToolchainName(toolchain.name().to_string()).into());
} else {
None
};
if let Some(status) = status.clone() {
println!();
common::show_channel_update(cfg, toolchain.name(), Ok(status))?;
}
if cfg.get_default()?.is_none() {
use rustup::UpdateStatus;
if let Some(UpdateStatus::Installed) = status {
toolchain.make_default()?;
}
}
}
if self_update {
common::self_update(|| Ok(()))?;
}
} else {
common::update_all_channels(cfg, self_update, m.is_present("force"))?;
info!("cleaning up downloads & tmp directories");
utils::delete_dir_contents(&cfg.download_dir);
cfg.temp_cfg.clean();
}
Ok(())
}
fn run(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = m.value_of("toolchain").unwrap();
let args = m.values_of("command").unwrap();
let args: Vec<_> = args.collect();
let cmd = cfg.create_command_for_toolchain(toolchain, m.is_present("install"), args[0])?;
let ExitCode(c) = command::run_command_for_dir(cmd, args[0], &args[1..])?;
process::exit(c)
}
fn which(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let binary = m.value_of("command").unwrap();
let binary_path = if m.is_present("toolchain") {
let toolchain = m.value_of("toolchain").unwrap();
cfg.which_binary_by_toolchain(toolchain, binary)?
.expect("binary not found")
} else {
cfg.which_binary(&utils::current_dir()?, binary)?
.expect("binary not found")
};
utils::assert_is_file(&binary_path)?;
println!("{}", binary_path.display());
Ok(())
}
fn show(cfg: &Cfg) -> Result<()> {
// Print host triple
{
let mut t = term2::stdout();
t.attr(term2::Attr::Bold)?;
write!(t, "Default host: ")?;
t.reset()?;
writeln!(t, "{}", cfg.get_default_host_triple()?)?;
}
// Print rustup home directory
{
let mut t = term2::stdout();
t.attr(term2::Attr::Bold)?;
write!(t, "rustup home: ")?;
t.reset()?;
writeln!(t, "{}", cfg.rustup_dir.display())?;
writeln!(t)?;
}
let cwd = utils::current_dir()?;
let installed_toolchains = cfg.list_toolchains()?;
// XXX: we may want a find_without_install capability for show.
let active_toolchain = cfg.find_or_install_override_toolchain_or_default(&cwd);
// active_toolchain will carry the reason we don't have one in its detail.
let active_targets = if let Ok(ref at) = active_toolchain {
if let Ok(distributable) = DistributableToolchain::new(&at.0) {
match distributable.list_components() {
Ok(cs_vec) => cs_vec
.into_iter()
.filter(|c| c.component.short_name_in_manifest() == "rust-std")
.filter(|c| c.installed)
.collect(),
Err(_) => vec![],
}
} else {
// These three vec![] could perhaps be reduced with and_then on active_toolchain.
vec![]
}
} else {
vec![]
};
let show_installed_toolchains = installed_toolchains.len() > 1;
let show_active_targets = active_targets.len() > 1;
let show_active_toolchain = true;
// Only need to display headers if we have multiple sections
let show_headers = [
show_installed_toolchains,
show_active_targets,
show_active_toolchain,
]
.iter()
.filter(|x| **x)
.count()
> 1;
if show_installed_toolchains {
let mut t = term2::stdout();
if show_headers {
print_header(&mut t, "installed toolchains")?;
}
let default_name: Result<String> = cfg
.get_default()?
.ok_or_else(|| "no default toolchain configured".into());
let default_name = default_name?;
for it in installed_toolchains {
if default_name == it {
writeln!(t, "{} (default)", it)?;
} else {
writeln!(t, "{}", it)?;
}
}
if show_headers {
writeln!(t)?
};
}
if show_active_targets {
let mut t = term2::stdout();
if show_headers {
print_header(&mut t, "installed targets for active toolchain")?;
}
for at in active_targets {
writeln!(
t,
"{}",
at.component
.target
.as_ref()
.expect("rust-std should have a target")
)?;
}
if show_headers {
writeln!(t)?;
};
}
if show_active_toolchain {
let mut t = term2::stdout();
if show_headers {
print_header(&mut t, "active toolchain")?;
}
match active_toolchain {
Ok(atc) => match atc {
(ref toolchain, Some(ref reason)) => {
writeln!(t, "{} ({})", toolchain.name(), reason)?;
writeln!(t, "{}", toolchain.rustc_version())?;
}
(ref toolchain, None) => {
writeln!(t, "{} (default)", toolchain.name())?;
writeln!(t, "{}", toolchain.rustc_version())?;
}
},
Err(rustup::Error(rustup::ErrorKind::ToolchainNotSelected, _)) => {
writeln!(t, "no active toolchain")?;
}
Err(err) => {
if let Some(cause) = err.source() {
writeln!(t, "(error: {}, {})", err, cause)?;
} else {
writeln!(t, "(error: {})", err)?;
}
}
}
if show_headers {
writeln!(t)?
}
}
fn print_header(t: &mut term::StdoutTerminal, s: &str) -> Result<()> {
t.attr(term2::Attr::Bold)?;
writeln!(t, "{}", s)?;
writeln!(t, "{}", iter::repeat("-").take(s.len()).collect::<String>())?;
writeln!(t)?;
t.reset()?;
Ok(())
}
Ok(())
}
fn show_active_toolchain(cfg: &Cfg) -> Result<()> {
let cwd = utils::current_dir()?;
match cfg.find_or_install_override_toolchain_or_default(&cwd) {
Err(rustup::Error(rustup::ErrorKind::ToolchainNotSelected, _)) => {}
Err(e) => return Err(e.into()),
Ok((toolchain, reason)) => {
if let Some(reason) = reason {
println!("{} ({})", toolchain.name(), reason);
} else {
println!("{} (default)", toolchain.name());
}
}
}
Ok(())
}
fn show_rustup_home(cfg: &Cfg) -> Result<()> {
println!("{}", cfg.rustup_dir.display());
Ok(())
}
fn target_list(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
if m.is_present("installed") {
common::list_installed_targets(&toolchain)
} else {
common::list_targets(&toolchain)
}
}
fn target_add(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
// XXX: long term move this error to cli ? the normal .into doesn't work
// because Result here is the wrong sort and expression type ascription
// isn't a feature yet.
// list_components *and* add_component would both be inappropriate for
// custom toolchains.
if toolchain.is_custom() {
return Err(rustup::Error(
rustup::ErrorKind::ComponentsUnsupported(toolchain.name().to_string()),
error_chain::State::default(),
)
.into());
}
let mut targets: Vec<String> = m
.values_of("target")
.unwrap()
.map(ToString::to_string)
.collect();
if targets.contains(&"all".to_string()) {
if targets.len() != 1 {
return Err(ErrorKind::TargetAllSpecifiedWithTargets(targets).into());
}
targets.clear();
let distributable = DistributableToolchain::new(&toolchain)?;
for component in distributable.list_components()? {
if component.component.short_name_in_manifest() == "rust-std"
&& component.available
&& !component.installed
{
let target = component
.component
.target
.as_ref()
.expect("rust-std should have a target");
targets.push(target.to_string());
}
}
}
for target in &targets {
let new_component = Component::new(
"rust-std".to_string(),
Some(TargetTriple::new(target)),
false,
);
let distributable = DistributableToolchain::new(&toolchain)?;
distributable.add_component(new_component)?;
}
Ok(())
}
fn target_remove(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
for target in m.values_of("target").unwrap() {
let new_component = Component::new(
"rust-std".to_string(),
Some(TargetTriple::new(target)),
false,
);
let distributable = DistributableToolchain::new(&toolchain)
.chain_err(|| rustup::ErrorKind::ComponentsUnsupported(toolchain.name().to_string()))?;
distributable.remove_component(new_component)?;
}
Ok(())
}
fn component_list(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
if m.is_present("installed") {
common::list_installed_components(&toolchain)
} else {
common::list_components(&toolchain)
}
}
fn component_add(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
let distributable = DistributableToolchain::new(&toolchain)?;
let target = m.value_of("target").map(TargetTriple::new).or_else(|| {
distributable
.desc()
.as_ref()
.ok()
.map(|desc| desc.target.clone())
});
for component in m.values_of("component").unwrap() {
let new_component = Component::new_with_target(component, false)
.unwrap_or_else(|| Component::new(component.to_string(), target.clone(), true));
distributable.add_component(new_component)?;
}
Ok(())
}
fn component_remove(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
let distributable = DistributableToolchain::new(&toolchain)
.chain_err(|| rustup::ErrorKind::ComponentsUnsupported(toolchain.name().to_string()))?;
let target = m.value_of("target").map(TargetTriple::new).or_else(|| {
distributable
.desc()
.as_ref()
.ok()
.map(|desc| desc.target.clone())
});
for component in m.values_of("component").unwrap() {
let new_component = Component::new_with_target(component, false)
.unwrap_or_else(|| Component::new(component.to_string(), target.clone(), true));
distributable.remove_component(new_component)?;
}
Ok(())
}
fn explicit_or_dir_toolchain<'a>(cfg: &'a Cfg, m: &ArgMatches<'_>) -> Result<Toolchain<'a>> {
let toolchain = m.value_of("toolchain");
if let Some(toolchain) = toolchain {
let toolchain = cfg.get_toolchain(toolchain, false)?;
return Ok(toolchain);
}
let cwd = utils::current_dir()?;
let (toolchain, _) = cfg.toolchain_for_dir(&cwd)?;
Ok(toolchain)
}
fn toolchain_list(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
common::list_toolchains(cfg, m.is_present("verbose"))
}
fn toolchain_link(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = m.value_of("toolchain").unwrap();
let path = m.value_of("path").unwrap();
let toolchain = cfg.get_toolchain(toolchain, true)?;
if let Ok(custom) = CustomToolchain::new(&toolchain) {
custom
.install_from_dir(Path::new(path), true)
.map_err(Into::into)
} else {
Err(ErrorKind::InvalidCustomToolchainName(toolchain.name().to_string()).into())
}
}
fn toolchain_remove(cfg: &mut Cfg, m: &ArgMatches<'_>) -> Result<()> {
for toolchain in m.values_of("toolchain").unwrap() {
let toolchain = cfg.get_toolchain(toolchain, false)?;
toolchain.remove()?;
}
Ok(())
}
fn override_add(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = m.value_of("toolchain").unwrap();
let toolchain = cfg.get_toolchain(toolchain, false)?;
let status = if !toolchain.is_custom() {
let distributable = DistributableToolchain::new(&toolchain)?;
Some(distributable.install_from_dist_if_not_installed()?)
} else if !toolchain.exists() {
return Err(ErrorKind::ToolchainNotInstalled(toolchain.name().to_string()).into());
} else {
None
};
let path = if let Some(path) = m.value_of("path") {
PathBuf::from(path)
} else {
utils::current_dir()?
};
toolchain.make_override(&path)?;
if let Some(status) = status {
println!();
common::show_channel_update(cfg, toolchain.name(), Ok(status))?;
}
Ok(())
}
fn override_remove(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let paths = if m.is_present("nonexistent") {
let list: Vec<_> = cfg.settings_file.with(|s| {
Ok(s.overrides
.iter()
.filter_map(|(k, _)| {
if Path::new(k).is_dir() {
None
} else {
Some(k.clone())
}
})
.collect())
})?;
if list.is_empty() {
info!("no nonexistent paths detected");
}
list
} else if m.is_present("path") {
vec![m.value_of("path").unwrap().to_string()]
} else {
vec![utils::current_dir()?.to_str().unwrap().to_string()]
};
for path in paths {
if cfg
.settings_file
.with_mut(|s| Ok(s.remove_override(&Path::new(&path), cfg.notify_handler.as_ref())))?
{
info!("override toolchain for '{}' removed", path);
} else {
info!("no override toolchain for '{}'", path);
if !m.is_present("path") && !m.is_present("nonexistent") {
info!(
"you may use `--path <path>` option to remove override toolchain \
for a specific path"
);
}
}
}
Ok(())
}
const DOCS_DATA: &[(&str, &str, &str,)] = &[
// flags can be used to open specific documents, e.g. `rustup doc --nomicon`
// tuple elements: document name used as flag, help message, document index path
("alloc", "The Rust core allocation and collections library", "alloc/index.html"),
("book", "The Rust Programming Language book", "book/index.html"),
("cargo", "The Cargo Book", "cargo/index.html"),
("core", "The Rust Core Library", "core/index.html"),
("edition-guide", "The Rust Edition Guide", "edition-guide/index.html"),
("nomicon", "The Dark Arts of Advanced and Unsafe Rust Programming", "nomicon/index.html"),
("proc_macro", "A support library for macro authors when defining new macros", "proc_macro/index.html"),
("reference", "The Rust Reference", "reference/index.html"),
("rust-by-example", "A collection of runnable examples that illustrate various Rust concepts and standard libraries", "rust-by-example/index.html"),
("rustc", "The compiler for the Rust programming language", "rustc/index.html"),
("rustdoc", "Generate documentation for Rust projects", "rustdoc/index.html"),
("std", "Standard library API documentation", "std/index.html"),
("test", "Support code for rustc's built in unit-test and micro-benchmarking framework", "test/index.html"),
("unstable-book", "The Unstable Book", "unstable-book/index.html"),
("embedded-book", "The Embedded Rust Book", "embedded-book/index.html"),
];
fn doc(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
if let Ok(distributable) = DistributableToolchain::new(&toolchain) {
let components = distributable.list_components()?;
if let [_] = components
.into_iter()
.filter(|cstatus| {
cstatus.component.short_name_in_manifest() == "rust-docs" && !cstatus.installed
})
.take(1)
.collect::<Vec<ComponentStatus>>()
.as_slice()
{
info!(
"`rust-docs` not installed in toolchain `{}`",
toolchain.name()
);
info!(
"To install, try `rustup component add --toolchain {} rust-docs`",
toolchain.name()
);
return Err("unable to view documentation which is not installed".into());
}
}
let topical_path: PathBuf;
let doc_url = if let Some(topic) = m.value_of("topic") {
topical_path = topical_doc::local_path(&toolchain.doc_path("").unwrap(), topic)?;
topical_path.to_str().unwrap()
} else if let Some((_, _, path)) = DOCS_DATA.iter().find(|(name, _, _)| m.is_present(name)) {
path
} else {
"index.html"
};
if m.is_present("path") {
let doc_path = toolchain.doc_path(doc_url)?;
println!("{}", doc_path.display());
Ok(())
} else {
toolchain.open_docs(doc_url).map_err(Into::into)
}
}
fn man(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
let command = m.value_of("command").unwrap();
let toolchain = explicit_or_dir_toolchain(cfg, m)?;
let mut toolchain = toolchain.path().to_path_buf();
toolchain.push("share");
toolchain.push("man");
utils::assert_is_directory(&toolchain)?;
let mut manpaths = std::ffi::OsString::from(toolchain);
manpaths.push(":"); // prepend to the default MANPATH list
if let Some(path) = std::env::var_os("MANPATH") {
manpaths.push(path);
}
Command::new("man")
.env("MANPATH", manpaths)
.arg(command)
.status()
.expect("failed to open man page");
Ok(())
}
fn self_uninstall(m: &ArgMatches<'_>) -> Result<()> {
let no_prompt = m.is_present("no-prompt");
self_update::uninstall(no_prompt)
}
fn set_default_host_triple(cfg: &Cfg, m: &ArgMatches<'_>) -> Result<()> {
cfg.set_default_host_triple(m.value_of("host_triple").unwrap())?;
Ok(())
}
fn set_profile(cfg: &mut Cfg, m: &ArgMatches) -> Result<()> {
cfg.set_profile(&m.value_of("profile-name").unwrap())?;
Ok(())
}
fn show_profile(cfg: &Cfg) -> Result<()> {
println!("{}", cfg.get_profile()?);
Ok(())
}
fn show_keys(cfg: &Cfg) -> Result<()> {
for key in cfg.get_pgp_keys() {
for l in key.show_key()? {
info!("{}", l);
}
}
Ok(())
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum CompletionCommand {
Rustup,
Cargo,
}
static COMPLETIONS: &[(&str, CompletionCommand)] = &[
("rustup", CompletionCommand::Rustup),
("cargo", CompletionCommand::Cargo),
];
impl CompletionCommand {
fn variants() -> Vec<&'static str> {
COMPLETIONS.iter().map(|&(s, _)| s).collect::<Vec<_>>()
}
}
impl FromStr for CompletionCommand {
type Err = String;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match COMPLETIONS
.iter()
.find(|&(val, _)| val.eq_ignore_ascii_case(s))
{
Some(&(_, cmd)) => Ok(cmd),
None => {
let completion_options = COMPLETIONS
.iter()
.map(|&(v, _)| v)
.fold("".to_owned(), |s, v| format!("{}{}, ", s, v));
Err(format!(
"[valid values: {}]",
completion_options.trim_end_matches(", ")
))
}
}
}
}
impl fmt::Display for CompletionCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match COMPLETIONS.iter().find(|&(_, cmd)| cmd == self) {
Some(&(val, _)) => write!(f, "{}", val),
None => unreachable!(),
}
}
}
fn output_completion_script(shell: Shell, command: CompletionCommand) -> Result<()> {
match command {
CompletionCommand::Rustup => {
cli().gen_completions_to("rustup", shell, &mut term2::stdout());
}
CompletionCommand::Cargo => {
if let Shell::Zsh = shell {
writeln!(&mut term2::stdout(), "#compdef cargo")?;
}
let script = match shell {
Shell::Bash => "/etc/bash_completion.d/cargo",
Shell::Zsh => "/share/zsh/site-functions/_cargo",
_ => return Err(ErrorKind::UnsupportedCompletionShell(shell, command).into()),
};
writeln!(
&mut term2::stdout(),
"source $(rustc --print sysroot){}",
script,
)?;
}
}
Ok(())
}
| 37.6682 | 152 | 0.47824 |
dba19dfd0238444fa326fcdc079b6fab2b1d9523
| 1,197 |
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Test casts for alignment issues
#![feature(rustc_private)]
extern crate libc;
#[warn(clippy::cast_ptr_alignment)]
#[allow(clippy::no_effect, clippy::unnecessary_operation, clippy::cast_lossless)]
fn main() {
/* These should be warned against */
// cast to more-strictly-aligned type
(&1u8 as *const u8) as *const u16;
(&mut 1u8 as *mut u8) as *mut u16;
/* These should be okay */
// not a pointer type
1u8 as u16;
// cast to less-strictly-aligned type
(&1u16 as *const u16) as *const u8;
(&mut 1u16 as *mut u16) as *mut u8;
// For c_void, we should trust the user. See #2677
(&1u32 as *const u32 as *const std::os::raw::c_void) as *const u32;
(&1u32 as *const u32 as *const libc::c_void) as *const u32;
}
| 34.2 | 81 | 0.684211 |
abd46b60ab194498d83f429979c427695166735c
| 1,287 |
// Regression test for #57979. This situation is meant to be an error.
// As noted in the issue thread, we decided to forbid nested impl
// trait of this kind:
//
// ```rust
// fn foo() -> impl Foo<impl Bar> { .. }
// ```
//
// Basically there are two hidden variables here, let's call them `X`
// and `Y`, and we must prove that:
//
// ```
// X: Foo<Y>
// Y: Bar
// ```
//
// However, the user is only giving us the return type `X`. It's true
// that in some cases, we can infer `Y` from `X`, because `X` only
// implements `Foo` for one type (and indeed the compiler does
// inference of this kind), but I do recall that we intended to forbid
// this -- in part because such inference is fragile, and there is not
// necessarily a way for the user to be more explicit should the
// inference fail (so you could get stuck with no way to port your
// code forward if, for example, more impls are added to an existing
// type).
//
// The same seems to apply in this situation. Here there are three impl traits, so we have
//
// ```
// X: IntoIterator<Item = Y>
// Y: Borrow<Data<Z>>
// Z: AsRef<[u8]>
// ```
use std::borrow::Borrow;
pub struct Data<TBody>(TBody);
pub fn collect(_: impl IntoIterator<Item = impl Borrow<Data<impl AsRef<[u8]>>>>) {
//~^ ERROR
unimplemented!()
}
| 29.930233 | 90 | 0.658897 |
c1500e3eb82f4e7589948e30b30cbaa8205de1f4
| 1,883 |
use crate::client::types::ClientTypes;
use crate::common::http_sink::HttpSink;
use crate::common::types::Types;
use crate::error;
use bytes::Bytes;
use crate::marshall::Marshaller;
use crate::or_static::arc::ArcOrStatic;
use crate::proto::grpc_frame::write_grpc_frame_to_vec;
use crate::result;
use crate::server::types::ServerTypes;
use futures::task::Context;
use httpbis;
use std::task::Poll;
pub enum SendError {
Http(httpbis::SendError),
_Marshall(error::Error),
}
impl From<httpbis::SendError> for SendError {
fn from(e: httpbis::SendError) -> Self {
SendError::Http(e)
}
}
pub(crate) trait SinkUntyped {
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), httpbis::StreamDead>>;
fn send_data(&mut self, message: Bytes) -> result::Result<()>;
}
pub(crate) struct SinkCommonUntyped<T: Types> {
pub(crate) http: T::HttpSink,
}
impl<T: Types> SinkCommonUntyped<T> {
pub fn send_data(&mut self, message: Bytes) -> result::Result<()> {
// TODO: allocation
self.http
.send_data(Bytes::from(write_grpc_frame_to_vec(&message)))?;
Ok(())
}
}
pub(crate) struct SinkCommon<M: 'static, T: Types> {
pub marshaller: ArcOrStatic<dyn Marshaller<M>>,
pub sink: T::SinkUntyped,
}
impl<M: 'static, T: Types> SinkCommon<M, T> {
pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), httpbis::StreamDead>> {
self.sink.poll(cx)
}
pub fn send_data(&mut self, message: M) -> result::Result<()> {
let mut bytes = Vec::new();
self.marshaller.write(&message, &mut bytes)?;
// TODO: extra allocation
self.sink.send_data(Bytes::from(bytes))?;
Ok(())
}
}
fn _assert_types() {
crate::assert_types::assert_send::<SinkCommon<String, ClientTypes>>();
crate::assert_types::assert_send::<SinkCommon<String, ServerTypes>>();
}
| 27.691176 | 91 | 0.647371 |
22851193ae8f65ffd9ba48a55b69018987cf5a81
| 1,806 |
use std::boxed::Box;
use std::ptr::Shared;
use std::option::Option;
struct Node {
elem: i32,
next: Option<Box<Node>>
}
impl Node {
fn new(e: i32) -> Node {
Node {
elem: e,
next: None
}
}
}
#[derive(Default)]
pub struct Queue {
size: usize,
head: Option<Box<Node>>,
tail: Option<Shared<Node>>
}
#[allow(boxed_local)]
impl Queue {
pub fn new() -> Queue {
Queue {
size: 0,
head: None,
tail: None
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn len(&self) -> usize {
self.size
}
pub fn enqueue(&mut self, e: i32) {
self.size += 1;
let mut node = Box::new(Node::new(e));
let raw: *mut _ = &mut *node;
match self.tail {
Some(share) => unsafe {
(**share).next = Some(node);
},
None => self.head = Some(node),
}
unsafe {
self.tail = Some(Shared::new(raw));
}
}
pub fn contains(&self, e: i32) -> bool {
match self.head {
Some(ref head) => {
let mut node = head;
while (*node).elem != e && (*node).next.is_some() {
node = (*node).next.as_ref().unwrap();
}
(*node).elem == e
},
None => false,
}
}
pub fn dequeue(&mut self) -> Option<i32> {
self.head.take().map(
|head| {
let h = *head;
self.size -= 1;
self.head = h.next;
if self.head.is_none() {
self.tail = None;
}
h.elem
}
)
}
}
| 20.292135 | 67 | 0.403654 |
5610f6e49eac31e90fbdc14e654980671b1ef849
| 229,374 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_bot_version_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateBotVersionOutput, crate::error::CreateBotVersionError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateBotVersionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateBotVersionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::CreateBotVersionError {
meta: generic,
kind: crate::error::CreateBotVersionErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::CreateBotVersionError {
meta: generic,
kind: crate::error::CreateBotVersionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::CreateBotVersionError {
meta: generic,
kind: crate::error::CreateBotVersionErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::CreateBotVersionError {
meta: generic,
kind: crate::error::CreateBotVersionErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateBotVersionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_create_bot_version_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::CreateBotVersionError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::CreateBotVersionError {
meta: generic,
kind: crate::error::CreateBotVersionErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::CreateBotVersionError {
meta: generic,
kind: crate::error::CreateBotVersionErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateBotVersionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_bot_version_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateBotVersionOutput, crate::error::CreateBotVersionError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_bot_version_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_bot_version(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateBotVersionError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_intent_version_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateIntentVersionOutput,
crate::error::CreateIntentVersionError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateIntentVersionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateIntentVersionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::CreateIntentVersionError {
meta: generic,
kind: crate::error::CreateIntentVersionErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::CreateIntentVersionError {
meta: generic,
kind: crate::error::CreateIntentVersionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::CreateIntentVersionError {
meta: generic,
kind: crate::error::CreateIntentVersionErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::CreateIntentVersionError {
meta: generic,
kind: crate::error::CreateIntentVersionErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateIntentVersionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_create_intent_version_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::CreateIntentVersionError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::CreateIntentVersionError {
meta: generic,
kind: crate::error::CreateIntentVersionErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::CreateIntentVersionError {
meta: generic,
kind: crate::error::CreateIntentVersionErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateIntentVersionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_intent_version_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateIntentVersionOutput,
crate::error::CreateIntentVersionError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_intent_version_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_intent_version(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateIntentVersionError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_slot_type_version_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateSlotTypeVersionOutput,
crate::error::CreateSlotTypeVersionError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateSlotTypeVersionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::CreateSlotTypeVersionError {
meta: generic,
kind: crate::error::CreateSlotTypeVersionErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::CreateSlotTypeVersionError {
meta: generic,
kind: crate::error::CreateSlotTypeVersionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::CreateSlotTypeVersionError {
meta: generic,
kind: crate::error::CreateSlotTypeVersionErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::CreateSlotTypeVersionError {
meta: generic,
kind: crate::error::CreateSlotTypeVersionErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_create_slot_type_version_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::CreateSlotTypeVersionError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::CreateSlotTypeVersionError {
meta: generic,
kind: crate::error::CreateSlotTypeVersionErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::CreateSlotTypeVersionError {
meta: generic,
kind: crate::error::CreateSlotTypeVersionErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateSlotTypeVersionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_slot_type_version_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateSlotTypeVersionOutput,
crate::error::CreateSlotTypeVersionError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_slot_type_version_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_create_slot_type_version(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateSlotTypeVersionError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteBotOutput, crate::error::DeleteBotError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteBotError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteBotError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteBotError {
meta: generic,
kind: crate::error::DeleteBotErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteBotError {
meta: generic,
kind: crate::error::DeleteBotErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteBotError {
meta: generic,
kind: crate::error::DeleteBotErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteBotError {
meta: generic,
kind: crate::error::DeleteBotErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_bot_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteBotError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteBotError {
meta: generic,
kind: crate::error::DeleteBotErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteBotError {
meta: generic,
kind: crate::error::DeleteBotErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteBotError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteBotOutput, crate::error::DeleteBotError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_bot_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_alias_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteBotAliasOutput, crate::error::DeleteBotAliasError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteBotAliasError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteBotAliasError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteBotAliasError {
meta: generic,
kind: crate::error::DeleteBotAliasErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteBotAliasError {
meta: generic,
kind: crate::error::DeleteBotAliasErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteBotAliasError {
meta: generic,
kind: crate::error::DeleteBotAliasErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteBotAliasError {
meta: generic,
kind: crate::error::DeleteBotAliasErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotAliasError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_bot_alias_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteBotAliasError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteBotAliasError {
meta: generic,
kind: crate::error::DeleteBotAliasErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteBotAliasError {
meta: generic,
kind: crate::error::DeleteBotAliasErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteBotAliasError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_alias_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteBotAliasOutput, crate::error::DeleteBotAliasError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_bot_alias_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_channel_association_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteBotChannelAssociationOutput,
crate::error::DeleteBotChannelAssociationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteBotChannelAssociationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DeleteBotChannelAssociationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteBotChannelAssociationError {
meta: generic,
kind: crate::error::DeleteBotChannelAssociationErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteBotChannelAssociationError {
meta: generic,
kind: crate::error::DeleteBotChannelAssociationErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteBotChannelAssociationError {
meta: generic,
kind: crate::error::DeleteBotChannelAssociationErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteBotChannelAssociationError {
meta: generic,
kind: crate::error::DeleteBotChannelAssociationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotChannelAssociationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_bot_channel_association_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteBotChannelAssociationError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteBotChannelAssociationError {
meta: generic,
kind: crate::error::DeleteBotChannelAssociationErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteBotChannelAssociationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_channel_association_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteBotChannelAssociationOutput,
crate::error::DeleteBotChannelAssociationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_bot_channel_association_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_version_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteBotVersionOutput, crate::error::DeleteBotVersionError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteBotVersionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteBotVersionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteBotVersionError {
meta: generic,
kind: crate::error::DeleteBotVersionErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteBotVersionError {
meta: generic,
kind: crate::error::DeleteBotVersionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteBotVersionError {
meta: generic,
kind: crate::error::DeleteBotVersionErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteBotVersionError {
meta: generic,
kind: crate::error::DeleteBotVersionErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotVersionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_bot_version_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteBotVersionError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteBotVersionError {
meta: generic,
kind: crate::error::DeleteBotVersionErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteBotVersionError {
meta: generic,
kind: crate::error::DeleteBotVersionErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteBotVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteBotVersionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_bot_version_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteBotVersionOutput, crate::error::DeleteBotVersionError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_bot_version_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_intent_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteIntentOutput, crate::error::DeleteIntentError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteIntentError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteIntentError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteIntentError {
meta: generic,
kind: crate::error::DeleteIntentErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteIntentError {
meta: generic,
kind: crate::error::DeleteIntentErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteIntentError {
meta: generic,
kind: crate::error::DeleteIntentErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteIntentError {
meta: generic,
kind: crate::error::DeleteIntentErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_intent_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteIntentError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteIntentError {
meta: generic,
kind: crate::error::DeleteIntentErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteIntentError {
meta: generic,
kind: crate::error::DeleteIntentErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteIntentError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_intent_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteIntentOutput, crate::error::DeleteIntentError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_intent_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_intent_version_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteIntentVersionOutput,
crate::error::DeleteIntentVersionError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteIntentVersionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteIntentVersionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteIntentVersionError {
meta: generic,
kind: crate::error::DeleteIntentVersionErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteIntentVersionError {
meta: generic,
kind: crate::error::DeleteIntentVersionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteIntentVersionError {
meta: generic,
kind: crate::error::DeleteIntentVersionErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteIntentVersionError {
meta: generic,
kind: crate::error::DeleteIntentVersionErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentVersionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_intent_version_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteIntentVersionError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteIntentVersionError {
meta: generic,
kind: crate::error::DeleteIntentVersionErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteIntentVersionError {
meta: generic,
kind: crate::error::DeleteIntentVersionErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteIntentVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteIntentVersionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_intent_version_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteIntentVersionOutput,
crate::error::DeleteIntentVersionError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_intent_version_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_slot_type_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteSlotTypeOutput, crate::error::DeleteSlotTypeError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteSlotTypeError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteSlotTypeError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteSlotTypeError {
meta: generic,
kind: crate::error::DeleteSlotTypeErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteSlotTypeError {
meta: generic,
kind: crate::error::DeleteSlotTypeErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteSlotTypeError {
meta: generic,
kind: crate::error::DeleteSlotTypeErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteSlotTypeError {
meta: generic,
kind: crate::error::DeleteSlotTypeErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_slot_type_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteSlotTypeError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteSlotTypeError {
meta: generic,
kind: crate::error::DeleteSlotTypeErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteSlotTypeError {
meta: generic,
kind: crate::error::DeleteSlotTypeErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteSlotTypeError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_slot_type_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteSlotTypeOutput, crate::error::DeleteSlotTypeError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_slot_type_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_slot_type_version_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteSlotTypeVersionOutput,
crate::error::DeleteSlotTypeVersionError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteSlotTypeVersionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteSlotTypeVersionError {
meta: generic,
kind: crate::error::DeleteSlotTypeVersionErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::DeleteSlotTypeVersionError {
meta: generic,
kind: crate::error::DeleteSlotTypeVersionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::DeleteSlotTypeVersionError {
meta: generic,
kind: crate::error::DeleteSlotTypeVersionErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteSlotTypeVersionError {
meta: generic,
kind: crate::error::DeleteSlotTypeVersionErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_slot_type_version_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteSlotTypeVersionError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteSlotTypeVersionError {
meta: generic,
kind: crate::error::DeleteSlotTypeVersionErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceInUseException" => crate::error::DeleteSlotTypeVersionError {
meta: generic,
kind: crate::error::DeleteSlotTypeVersionErrorKind::ResourceInUseException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_in_use_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_resource_in_use_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteSlotTypeVersionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteSlotTypeVersionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_slot_type_version_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteSlotTypeVersionOutput,
crate::error::DeleteSlotTypeVersionError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_slot_type_version_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_utterances_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteUtterancesOutput, crate::error::DeleteUtterancesError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DeleteUtterancesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteUtterancesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::DeleteUtterancesError {
meta: generic,
kind: crate::error::DeleteUtterancesErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteUtterancesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::DeleteUtterancesError {
meta: generic,
kind: crate::error::DeleteUtterancesErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteUtterancesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteUtterancesError {
meta: generic,
kind: crate::error::DeleteUtterancesErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteUtterancesError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_delete_utterances_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DeleteUtterancesError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::DeleteUtterancesError {
meta: generic,
kind: crate::error::DeleteUtterancesErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::DeleteUtterancesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DeleteUtterancesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_utterances_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteUtterancesOutput, crate::error::DeleteUtterancesError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_utterances_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotOutput, crate::error::GetBotError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBotError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotError {
meta: generic,
kind: crate::error::GetBotErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotError {
meta: generic,
kind: crate::error::GetBotErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotError {
meta: generic,
kind: crate::error::GetBotErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bot_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetBotError {
meta: generic,
kind: crate::error::GetBotErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotOutput, crate::error::GetBotError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bot_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bot(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_alias_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotAliasOutput, crate::error::GetBotAliasError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotAliasError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBotAliasError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotAliasError {
meta: generic,
kind: crate::error::GetBotAliasErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotAliasError {
meta: generic,
kind: crate::error::GetBotAliasErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotAliasError {
meta: generic,
kind: crate::error::GetBotAliasErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bot_alias_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotAliasError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetBotAliasError {
meta: generic,
kind: crate::error::GetBotAliasErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotAliasError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_alias_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotAliasOutput, crate::error::GetBotAliasError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bot_alias_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bot_alias(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotAliasError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_aliases_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotAliasesOutput, crate::error::GetBotAliasesError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotAliasesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBotAliasesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotAliasesError {
meta: generic,
kind: crate::error::GetBotAliasesErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotAliasesError {
meta: generic,
kind: crate::error::GetBotAliasesErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotAliasesError {
meta: generic,
kind: crate::error::GetBotAliasesErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotAliasesError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bot_aliases_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotAliasesError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotAliasesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_aliases_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotAliasesOutput, crate::error::GetBotAliasesError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bot_aliases_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bot_aliases(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotAliasesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_channel_association_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetBotChannelAssociationOutput,
crate::error::GetBotChannelAssociationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotChannelAssociationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::GetBotChannelAssociationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotChannelAssociationError {
meta: generic,
kind: crate::error::GetBotChannelAssociationErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotChannelAssociationError {
meta: generic,
kind: crate::error::GetBotChannelAssociationErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotChannelAssociationError {
meta: generic,
kind: crate::error::GetBotChannelAssociationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bot_channel_association_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotChannelAssociationError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetBotChannelAssociationError {
meta: generic,
kind: crate::error::GetBotChannelAssociationErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotChannelAssociationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_channel_association_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetBotChannelAssociationOutput,
crate::error::GetBotChannelAssociationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bot_channel_association_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bot_channel_association(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotChannelAssociationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_channel_associations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetBotChannelAssociationsOutput,
crate::error::GetBotChannelAssociationsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotChannelAssociationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::GetBotChannelAssociationsError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotChannelAssociationsError {
meta: generic,
kind: crate::error::GetBotChannelAssociationsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotChannelAssociationsError {
meta: generic,
kind: crate::error::GetBotChannelAssociationsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotChannelAssociationsError {
meta: generic,
kind: crate::error::GetBotChannelAssociationsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotChannelAssociationsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bot_channel_associations_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotChannelAssociationsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotChannelAssociationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_channel_associations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetBotChannelAssociationsOutput,
crate::error::GetBotChannelAssociationsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bot_channel_associations_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bot_channel_associations(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotChannelAssociationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bots_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotsOutput, crate::error::GetBotsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBotsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotsError {
meta: generic,
kind: crate::error::GetBotsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotsError {
meta: generic,
kind: crate::error::GetBotsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotsError {
meta: generic,
kind: crate::error::GetBotsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bots_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetBotsError {
meta: generic,
kind: crate::error::GetBotsErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bots_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotsOutput, crate::error::GetBotsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bots_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bots(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_versions_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotVersionsOutput, crate::error::GetBotVersionsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBotVersionsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBotVersionsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBotVersionsError {
meta: generic,
kind: crate::error::GetBotVersionsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBotVersionsError {
meta: generic,
kind: crate::error::GetBotVersionsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBotVersionsError {
meta: generic,
kind: crate::error::GetBotVersionsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotVersionsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_bot_versions_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBotVersionsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetBotVersionsError {
meta: generic,
kind: crate::error::GetBotVersionsErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBotVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBotVersionsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_bot_versions_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBotVersionsOutput, crate::error::GetBotVersionsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_bot_versions_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_bot_versions(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBotVersionsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_builtin_intent_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBuiltinIntentOutput, crate::error::GetBuiltinIntentError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBuiltinIntentError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBuiltinIntentError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBuiltinIntentError {
meta: generic,
kind: crate::error::GetBuiltinIntentErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBuiltinIntentError {
meta: generic,
kind: crate::error::GetBuiltinIntentErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBuiltinIntentError {
meta: generic,
kind: crate::error::GetBuiltinIntentErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_builtin_intent_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBuiltinIntentError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetBuiltinIntentError {
meta: generic,
kind: crate::error::GetBuiltinIntentErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBuiltinIntentError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_builtin_intent_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBuiltinIntentOutput, crate::error::GetBuiltinIntentError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_builtin_intent_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_builtin_intent(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBuiltinIntentError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_builtin_intents_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBuiltinIntentsOutput, crate::error::GetBuiltinIntentsError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBuiltinIntentsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBuiltinIntentsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBuiltinIntentsError {
meta: generic,
kind: crate::error::GetBuiltinIntentsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBuiltinIntentsError {
meta: generic,
kind: crate::error::GetBuiltinIntentsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBuiltinIntentsError {
meta: generic,
kind: crate::error::GetBuiltinIntentsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinIntentsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_builtin_intents_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBuiltinIntentsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBuiltinIntentsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_builtin_intents_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetBuiltinIntentsOutput, crate::error::GetBuiltinIntentsError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_builtin_intents_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_builtin_intents(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBuiltinIntentsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_builtin_slot_types_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetBuiltinSlotTypesOutput,
crate::error::GetBuiltinSlotTypesError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetBuiltinSlotTypesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetBuiltinSlotTypesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetBuiltinSlotTypesError {
meta: generic,
kind: crate::error::GetBuiltinSlotTypesErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinSlotTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetBuiltinSlotTypesError {
meta: generic,
kind: crate::error::GetBuiltinSlotTypesErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinSlotTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetBuiltinSlotTypesError {
meta: generic,
kind: crate::error::GetBuiltinSlotTypesErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetBuiltinSlotTypesError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_builtin_slot_types_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetBuiltinSlotTypesError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetBuiltinSlotTypesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_builtin_slot_types_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetBuiltinSlotTypesOutput,
crate::error::GetBuiltinSlotTypesError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_builtin_slot_types_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_builtin_slot_types(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetBuiltinSlotTypesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_export_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetExportOutput, crate::error::GetExportError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetExportError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetExportError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetExportError {
meta: generic,
kind: crate::error::GetExportErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetExportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetExportError {
meta: generic,
kind: crate::error::GetExportErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetExportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetExportError {
meta: generic,
kind: crate::error::GetExportErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetExportError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_export_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetExportError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetExportError {
meta: generic,
kind: crate::error::GetExportErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetExportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetExportError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_export_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetExportOutput, crate::error::GetExportError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_export_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_export(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetExportError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_import_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetImportOutput, crate::error::GetImportError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetImportError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetImportError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetImportError {
meta: generic,
kind: crate::error::GetImportErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetImportError {
meta: generic,
kind: crate::error::GetImportErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetImportError {
meta: generic,
kind: crate::error::GetImportErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetImportError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_import_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetImportError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetImportError {
meta: generic,
kind: crate::error::GetImportErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetImportError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_import_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetImportOutput, crate::error::GetImportError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_import_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_import(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetImportError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_intent_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetIntentOutput, crate::error::GetIntentError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetIntentError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetIntentError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetIntentError {
meta: generic,
kind: crate::error::GetIntentErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetIntentError {
meta: generic,
kind: crate::error::GetIntentErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetIntentError {
meta: generic,
kind: crate::error::GetIntentErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_intent_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetIntentError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetIntentError {
meta: generic,
kind: crate::error::GetIntentErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetIntentError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_intent_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetIntentOutput, crate::error::GetIntentError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_intent_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_intent(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetIntentError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_intents_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetIntentsOutput, crate::error::GetIntentsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetIntentsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetIntentsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetIntentsError {
meta: generic,
kind: crate::error::GetIntentsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetIntentsError {
meta: generic,
kind: crate::error::GetIntentsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetIntentsError {
meta: generic,
kind: crate::error::GetIntentsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_intents_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetIntentsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetIntentsError {
meta: generic,
kind: crate::error::GetIntentsErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetIntentsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_intents_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetIntentsOutput, crate::error::GetIntentsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_intents_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_intents(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetIntentsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_intent_versions_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetIntentVersionsOutput, crate::error::GetIntentVersionsError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetIntentVersionsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetIntentVersionsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetIntentVersionsError {
meta: generic,
kind: crate::error::GetIntentVersionsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetIntentVersionsError {
meta: generic,
kind: crate::error::GetIntentVersionsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetIntentVersionsError {
meta: generic,
kind: crate::error::GetIntentVersionsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentVersionsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_intent_versions_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetIntentVersionsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetIntentVersionsError {
meta: generic,
kind: crate::error::GetIntentVersionsErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetIntentVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetIntentVersionsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_intent_versions_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetIntentVersionsOutput, crate::error::GetIntentVersionsError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_intent_versions_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_intent_versions(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetIntentVersionsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_migration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMigrationOutput, crate::error::GetMigrationError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetMigrationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetMigrationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetMigrationError {
meta: generic,
kind: crate::error::GetMigrationErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetMigrationError {
meta: generic,
kind: crate::error::GetMigrationErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetMigrationError {
meta: generic,
kind: crate::error::GetMigrationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_migration_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetMigrationError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetMigrationError {
meta: generic,
kind: crate::error::GetMigrationErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetMigrationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_migration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMigrationOutput, crate::error::GetMigrationError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_migration_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_migration(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMigrationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_migrations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMigrationsOutput, crate::error::GetMigrationsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetMigrationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetMigrationsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetMigrationsError {
meta: generic,
kind: crate::error::GetMigrationsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetMigrationsError {
meta: generic,
kind: crate::error::GetMigrationsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetMigrationsError {
meta: generic,
kind: crate::error::GetMigrationsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetMigrationsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_migrations_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetMigrationsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetMigrationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_migrations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMigrationsOutput, crate::error::GetMigrationsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_migrations_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_migrations(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMigrationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_slot_type_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetSlotTypeOutput, crate::error::GetSlotTypeError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetSlotTypeError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetSlotTypeError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetSlotTypeError {
meta: generic,
kind: crate::error::GetSlotTypeErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetSlotTypeError {
meta: generic,
kind: crate::error::GetSlotTypeErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetSlotTypeError {
meta: generic,
kind: crate::error::GetSlotTypeErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_slot_type_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetSlotTypeError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetSlotTypeError {
meta: generic,
kind: crate::error::GetSlotTypeErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetSlotTypeError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_slot_type_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetSlotTypeOutput, crate::error::GetSlotTypeError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_slot_type_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_slot_type(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetSlotTypeError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_slot_types_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetSlotTypesOutput, crate::error::GetSlotTypesError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetSlotTypesError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetSlotTypesError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetSlotTypesError {
meta: generic,
kind: crate::error::GetSlotTypesErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetSlotTypesError {
meta: generic,
kind: crate::error::GetSlotTypesErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetSlotTypesError {
meta: generic,
kind: crate::error::GetSlotTypesErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypesError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_slot_types_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetSlotTypesError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetSlotTypesError {
meta: generic,
kind: crate::error::GetSlotTypesErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypesError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetSlotTypesError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_slot_types_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetSlotTypesOutput, crate::error::GetSlotTypesError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_slot_types_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_slot_types(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetSlotTypesError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_slot_type_versions_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetSlotTypeVersionsOutput,
crate::error::GetSlotTypeVersionsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetSlotTypeVersionsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetSlotTypeVersionsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetSlotTypeVersionsError {
meta: generic,
kind: crate::error::GetSlotTypeVersionsErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetSlotTypeVersionsError {
meta: generic,
kind: crate::error::GetSlotTypeVersionsErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetSlotTypeVersionsError {
meta: generic,
kind: crate::error::GetSlotTypeVersionsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeVersionsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_slot_type_versions_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetSlotTypeVersionsError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::GetSlotTypeVersionsError {
meta: generic,
kind: crate::error::GetSlotTypeVersionsErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetSlotTypeVersionsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetSlotTypeVersionsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_slot_type_versions_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetSlotTypeVersionsOutput,
crate::error::GetSlotTypeVersionsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_slot_type_versions_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_slot_type_versions(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetSlotTypeVersionsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_utterances_view_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetUtterancesViewOutput, crate::error::GetUtterancesViewError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetUtterancesViewError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetUtterancesViewError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::GetUtterancesViewError {
meta: generic,
kind: crate::error::GetUtterancesViewErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetUtterancesViewError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::GetUtterancesViewError {
meta: generic,
kind: crate::error::GetUtterancesViewErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetUtterancesViewError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetUtterancesViewError {
meta: generic,
kind: crate::error::GetUtterancesViewErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::GetUtterancesViewError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_utterances_view_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetUtterancesViewError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetUtterancesViewError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_utterances_view_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetUtterancesViewOutput, crate::error::GetUtterancesViewError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_utterances_view_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_get_utterances_view(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetUtterancesViewError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListTagsForResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_tags_for_resource_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListTagsForResourceError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListTagsForResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_tags_for_resource_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_list_tags_for_resource(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_bot_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutBotOutput, crate::error::PutBotError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::PutBotError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutBotError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::PutBotError {
meta: generic,
kind: crate::error::PutBotErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::PutBotError {
meta: generic,
kind: crate::error::PutBotErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::PutBotError {
meta: generic,
kind: crate::error::PutBotErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::PutBotError {
meta: generic,
kind: crate::error::PutBotErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_put_bot_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::PutBotError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::PutBotError {
meta: generic,
kind: crate::error::PutBotErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutBotError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_bot_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutBotOutput, crate::error::PutBotError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_bot_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_put_bot(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutBotError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_bot_alias_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutBotAliasOutput, crate::error::PutBotAliasError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::PutBotAliasError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutBotAliasError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::PutBotAliasError {
meta: generic,
kind: crate::error::PutBotAliasErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::PutBotAliasError {
meta: generic,
kind: crate::error::PutBotAliasErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::PutBotAliasError {
meta: generic,
kind: crate::error::PutBotAliasErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::PutBotAliasError {
meta: generic,
kind: crate::error::PutBotAliasErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotAliasError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_put_bot_alias_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::PutBotAliasError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::PutBotAliasError {
meta: generic,
kind: crate::error::PutBotAliasErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutBotAliasError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutBotAliasError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_bot_alias_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutBotAliasOutput, crate::error::PutBotAliasError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_bot_alias_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_put_bot_alias(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutBotAliasError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_intent_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutIntentOutput, crate::error::PutIntentError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::PutIntentError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutIntentError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::PutIntentError {
meta: generic,
kind: crate::error::PutIntentErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::PutIntentError {
meta: generic,
kind: crate::error::PutIntentErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::PutIntentError {
meta: generic,
kind: crate::error::PutIntentErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::PutIntentError {
meta: generic,
kind: crate::error::PutIntentErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutIntentError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_put_intent_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::PutIntentError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::PutIntentError {
meta: generic,
kind: crate::error::PutIntentErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutIntentError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutIntentError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_intent_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutIntentOutput, crate::error::PutIntentError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_intent_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_put_intent(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutIntentError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_slot_type_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutSlotTypeOutput, crate::error::PutSlotTypeError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::PutSlotTypeError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutSlotTypeError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::PutSlotTypeError {
meta: generic,
kind: crate::error::PutSlotTypeErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::PutSlotTypeError {
meta: generic,
kind: crate::error::PutSlotTypeErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::PutSlotTypeError {
meta: generic,
kind: crate::error::PutSlotTypeErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::PutSlotTypeError {
meta: generic,
kind: crate::error::PutSlotTypeErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutSlotTypeError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_put_slot_type_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::PutSlotTypeError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"PreconditionFailedException" => crate::error::PutSlotTypeError {
meta: generic,
kind: crate::error::PutSlotTypeErrorKind::PreconditionFailedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::precondition_failed_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_precondition_failed_exception_json_err(response.body().as_ref(), output).map_err(crate::error::PutSlotTypeError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutSlotTypeError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_slot_type_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutSlotTypeOutput, crate::error::PutSlotTypeError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_slot_type_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_put_slot_type(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutSlotTypeError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_start_import_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::StartImportOutput, crate::error::StartImportError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::StartImportError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::StartImportError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::StartImportError {
meta: generic,
kind: crate::error::StartImportErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::StartImportError {
meta: generic,
kind: crate::error::StartImportErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::StartImportError {
meta: generic,
kind: crate::error::StartImportErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartImportError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_start_import_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::StartImportError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::StartImportError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_start_import_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::StartImportOutput, crate::error::StartImportError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::start_import_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_start_import(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartImportError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_start_migration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::StartMigrationOutput, crate::error::StartMigrationError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::StartMigrationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::StartMigrationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::StartMigrationError {
meta: generic,
kind: crate::error::StartMigrationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_access_denied_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"BadRequestException" => crate::error::StartMigrationError {
meta: generic,
kind: crate::error::StartMigrationErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalFailureException" => crate::error::StartMigrationError {
meta: generic,
kind: crate::error::StartMigrationErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::StartMigrationError {
meta: generic,
kind: crate::error::StartMigrationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartMigrationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_start_migration_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::StartMigrationError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::StartMigrationError {
meta: generic,
kind: crate::error::StartMigrationErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::StartMigrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::StartMigrationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_start_migration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::StartMigrationOutput, crate::error::StartMigrationError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::start_migration_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_crate_operation_start_migration(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartMigrationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::TagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::TagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_tag_resource_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::TagResourceError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::TagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::tag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UntagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UntagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"BadRequestException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::BadRequestException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::bad_request_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_bad_request_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => {
crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_conflict_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"InternalFailureException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::InternalFailureException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_failure_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_internal_failure_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_limit_exceeded_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_untag_resource_limit_exceeded_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::UntagResourceError::unhandled("Failed to parse retryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"NotFoundException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::NotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_crate_error_not_found_exception_json_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UntagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::untag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
| 46.983613 | 214 | 0.557796 |
ed148fe7ed7272897fad8e8f3524282cfdb11fe9
| 13,409 |
use super::{candid_path, idl_hash};
use proc_macro2::TokenStream;
use quote::quote;
use std::collections::BTreeSet;
use syn::ext::IdentExt;
use syn::punctuated::Punctuated;
use syn::{Data, DeriveInput, GenericParam, Generics, Token};
pub(crate) fn derive_idl_type(
input: DeriveInput,
custom_candid_path: &Option<TokenStream>,
) -> TokenStream {
let candid = candid_path(custom_candid_path);
let name = input.ident;
let generics = add_trait_bounds(input.generics, custom_candid_path);
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let (ty_body, ser_body) = match input.data {
Data::Enum(ref data) => enum_from_ast(&name, &data.variants, custom_candid_path),
Data::Struct(ref data) => {
let (ty, idents, is_bytes) = struct_from_ast(&data.fields, custom_candid_path);
(ty, serialize_struct(&idents, &is_bytes, custom_candid_path))
}
Data::Union(_) => unimplemented!("doesn't derive union type"),
};
let gen = quote! {
impl #impl_generics #candid::types::CandidType for #name #ty_generics #where_clause {
fn _ty() -> #candid::types::Type {
#ty_body
}
fn id() -> #candid::types::TypeId { #candid::types::TypeId::of::<#name #ty_generics>() }
fn idl_serialize<__S>(&self, __serializer: __S) -> ::std::result::Result<(), __S::Error>
where
__S: #candid::types::Serializer,
{
#ser_body
}
}
};
//panic!(gen.to_string());
gen
}
struct Variant {
real_ident: syn::Ident,
renamed_ident: syn::Ident,
hash: u32,
ty: TokenStream,
members: Vec<Ident>,
with_bytes: bool,
}
enum Style {
Struct,
Tuple,
Unit,
}
impl Variant {
fn style(&self) -> Style {
if self.members.is_empty() {
return Style::Unit;
};
match self.members[0] {
Ident::Named(_) => Style::Struct,
Ident::Unnamed(_) => Style::Tuple,
}
}
fn to_pattern(&self) -> (TokenStream, Vec<TokenStream>) {
match self.style() {
Style::Unit => (quote! {}, Vec::new()),
Style::Struct => {
let id: Vec<_> = self.members.iter().map(|ident| ident.to_token()).collect();
(
quote! {
{#(ref #id),*}
},
id,
)
}
Style::Tuple => {
let id: Vec<_> = self
.members
.iter()
.map(|ident| {
let ident = ident.to_string();
let var = format!("__field{}", ident);
syn::parse_str(&var).unwrap()
})
.collect();
(
quote! {
(#(ref #id),*)
},
id,
)
}
}
}
}
fn enum_from_ast(
name: &syn::Ident,
variants: &Punctuated<syn::Variant, Token![,]>,
custom_candid_path: &Option<TokenStream>,
) -> (TokenStream, TokenStream) {
let mut fs: Vec<_> = variants
.iter()
.map(|variant| {
let id = variant.ident.clone();
let attrs = get_attrs(&variant.attrs);
let (renamed_ident, hash) = match attrs.rename {
Some(ref rename) => (
proc_macro2::Ident::new(rename, proc_macro2::Span::call_site()),
idl_hash(rename),
),
None => (id.clone(), idl_hash(&id.unraw().to_string())),
};
let (ty, idents, _) = struct_from_ast(&variant.fields, custom_candid_path);
Variant {
real_ident: id,
renamed_ident,
hash,
ty,
members: idents,
with_bytes: attrs.with_bytes,
}
})
.collect();
let unique: BTreeSet<_> = fs.iter().map(|Variant { hash, .. }| hash).collect();
assert_eq!(unique.len(), fs.len());
fs.sort_unstable_by_key(|Variant { hash, .. }| *hash);
let id = fs
.iter()
.map(|Variant { renamed_ident, .. }| renamed_ident.unraw().to_string());
let ty = fs.iter().map(|Variant { ty, .. }| ty);
let candid = candid_path(custom_candid_path);
let ty_gen = quote! {
#candid::types::Type::Variant(
vec![
#(#candid::types::Field {
id: #candid::types::Label::Named(#id.to_owned()),
ty: #ty }
),*
]
)
};
let id = fs.iter().map(|Variant { real_ident, .. }| {
syn::parse_str::<TokenStream>(&format!("{}::{}", name, real_ident)).unwrap()
});
let index = 0..fs.len() as u64;
let (pattern, members): (Vec<_>, Vec<_>) = fs
.iter()
.map(|f| {
let (pattern, id) = f.to_pattern();
(
pattern,
if f.with_bytes {
quote! {
#(#candid::types::Compound::serialize_blob(&mut ser, #id.as_ref())?;)*
}
} else {
quote! {
#(#candid::types::Compound::serialize_element(&mut ser, #id)?;)*
}
},
)
})
.unzip();
let variant_gen = quote! {
match *self {
#(#id #pattern => {
let mut ser = __serializer.serialize_variant(#index)?;
#members
}),*
};
Ok(())
};
(ty_gen, variant_gen)
}
fn serialize_struct(
idents: &[Ident],
is_bytes: &[bool],
custom_candid_path: &Option<TokenStream>,
) -> TokenStream {
let candid = candid_path(custom_candid_path);
let id = idents.iter().map(|ident| ident.to_token());
let ser_elem = id.zip(is_bytes.iter()).map(|(id, is_bytes)| {
if *is_bytes {
quote! { #candid::types::Compound::serialize_blob(&mut ser, self.#id.as_ref())?; }
} else {
quote! { #candid::types::Compound::serialize_element(&mut ser, &self.#id)?; }
}
});
quote! {
let mut ser = __serializer.serialize_struct()?;
#(#ser_elem)*
Ok(())
}
}
fn struct_from_ast(
fields: &syn::Fields,
custom_candid_path: &Option<TokenStream>,
) -> (TokenStream, Vec<Ident>, Vec<bool>) {
let candid = candid_path(custom_candid_path);
match *fields {
syn::Fields::Named(ref fields) => {
let (fs, idents, is_bytes) = fields_from_ast(&fields.named, custom_candid_path);
(
quote! { #candid::types::Type::Record(#fs) },
idents,
is_bytes,
)
}
syn::Fields::Unnamed(ref fields) => {
let (fs, idents, is_bytes) = fields_from_ast(&fields.unnamed, custom_candid_path);
if idents.len() == 1 {
let newtype = derive_type(&fields.unnamed[0].ty, custom_candid_path);
(quote! { #newtype }, idents, is_bytes)
} else {
(
quote! { #candid::types::Type::Record(#fs) },
idents,
is_bytes,
)
}
}
syn::Fields::Unit => (
quote! { #candid::types::Type::Null },
Vec::new(),
Vec::new(),
),
}
}
#[derive(Clone)]
enum Ident {
Named(syn::Ident),
Unnamed(u32),
}
impl Ident {
fn to_token(&self) -> TokenStream {
match self {
Ident::Named(ident) => quote! { #ident },
Ident::Unnamed(ref i) => syn::parse_str::<TokenStream>(&format!("{}", i)).unwrap(),
}
}
}
impl std::fmt::Display for Ident {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Ident::Named(ref ident) => f.write_fmt(format_args!("{}", ident.to_string())),
Ident::Unnamed(ref i) => f.write_fmt(format_args!("{}", (*i).to_string())),
}
}
}
struct Field {
real_ident: Ident,
renamed_ident: Ident,
hash: u32,
ty: TokenStream,
with_bytes: bool,
}
fn get_serde_meta_items(attr: &syn::Attribute) -> Result<Vec<syn::NestedMeta>, ()> {
if !attr.path.is_ident("serde") {
return Ok(Vec::new());
}
match attr.parse_meta() {
Ok(syn::Meta::List(meta)) => Ok(meta.nested.into_iter().collect()),
_ => Err(()),
}
}
struct Attributes {
rename: Option<String>,
with_bytes: bool,
}
fn get_attrs(attrs: &[syn::Attribute]) -> Attributes {
use syn::Meta::{List, NameValue};
use syn::NestedMeta::Meta;
let mut res = Attributes {
rename: None,
with_bytes: false,
};
for item in attrs.iter().flat_map(get_serde_meta_items).flatten() {
match &item {
// #[serde(rename = "foo")]
Meta(NameValue(m)) if m.path.is_ident("rename") => {
if let syn::Lit::Str(lit) = &m.lit {
res.rename = Some(lit.value());
}
}
// #[serde(rename(serialize = "foo"))]
Meta(List(metas)) if metas.path.is_ident("rename") => {
for item in metas.nested.iter() {
match item {
Meta(NameValue(m)) if m.path.is_ident("serialize") => {
if let syn::Lit::Str(lit) = &m.lit {
res.rename = Some(lit.value());
}
}
_ => continue,
}
}
}
// #[serde(with = "serde_bytes")]
Meta(NameValue(m)) if m.path.is_ident("with") => {
if let syn::Lit::Str(lit) = &m.lit {
if lit.value() == "serde_bytes" {
res.with_bytes = true;
}
}
}
_ => continue,
}
}
res
}
fn fields_from_ast(
fields: &Punctuated<syn::Field, syn::Token![,]>,
custom_candid_path: &Option<TokenStream>,
) -> (TokenStream, Vec<Ident>, Vec<bool>) {
let candid = candid_path(custom_candid_path);
let mut fs: Vec<_> = fields
.iter()
.enumerate()
.map(|(i, field)| {
let attrs = get_attrs(&field.attrs);
let (real_ident, renamed_ident, hash) = match field.ident {
Some(ref ident) => {
let real_ident = Ident::Named(ident.clone());
match attrs.rename {
Some(ref renamed) => {
let ident =
proc_macro2::Ident::new(renamed, proc_macro2::Span::call_site());
let renamed_ident = Ident::Named(ident);
(real_ident, renamed_ident, idl_hash(renamed))
}
None => (
real_ident.clone(),
real_ident,
idl_hash(&ident.unraw().to_string()),
),
}
}
None => (Ident::Unnamed(i as u32), Ident::Unnamed(i as u32), i as u32),
};
Field {
real_ident,
renamed_ident,
hash,
ty: derive_type(&field.ty, custom_candid_path),
with_bytes: attrs.with_bytes,
}
})
.collect();
let unique: BTreeSet<_> = fs.iter().map(|Field { hash, .. }| hash).collect();
assert_eq!(unique.len(), fs.len());
fs.sort_unstable_by_key(|Field { hash, .. }| *hash);
let id = fs
.iter()
.map(|Field { renamed_ident, .. }| match renamed_ident {
Ident::Named(ref id) => {
let name = id.unraw().to_string();
quote! { #candid::types::Label::Named(#name.to_string()) }
}
Ident::Unnamed(ref i) => quote! { #candid::types::Label::Id(#i) },
});
let ty = fs.iter().map(|Field { ty, .. }| ty);
let ty_gen = quote! {
vec![
#(#candid::types::Field {
id: #id,
ty: #ty }
),*
]
};
let idents: Vec<Ident> = fs
.iter()
.map(|Field { real_ident, .. }| real_ident.clone())
.collect();
let is_bytes: Vec<_> = fs.iter().map(|f| f.with_bytes).collect();
(ty_gen, idents, is_bytes)
}
fn derive_type(t: &syn::Type, custom_candid_path: &Option<TokenStream>) -> TokenStream {
let candid = candid_path(custom_candid_path);
quote! {
<#t as #candid::types::CandidType>::ty()
}
}
fn add_trait_bounds(mut generics: Generics, custom_candid_path: &Option<TokenStream>) -> Generics {
for param in &mut generics.params {
let candid = candid_path(custom_candid_path);
if let GenericParam::Type(ref mut type_param) = *param {
let bound = syn::parse_quote! { #candid::types::CandidType };
type_param.bounds.push(bound);
}
}
generics
}
| 32.704878 | 100 | 0.476322 |
6a78805cb776f8084389be7c71697d18af8fd3b8
| 1,353 |
use super::{wasm_externtype_t, wasm_name_t};
use wasmer::ExportType;
#[allow(non_camel_case_types)]
#[derive(Clone)]
pub struct wasm_exporttype_t {
name: Box<wasm_name_t>,
extern_type: Box<wasm_externtype_t>,
}
wasm_declare_boxed_vec!(exporttype);
#[no_mangle]
pub extern "C" fn wasm_exporttype_new(
name: Option<Box<wasm_name_t>>,
extern_type: Option<Box<wasm_externtype_t>>,
) -> Option<Box<wasm_exporttype_t>> {
Some(Box::new(wasm_exporttype_t {
name: name?,
extern_type: extern_type?,
}))
}
#[no_mangle]
pub extern "C" fn wasm_exporttype_name(export_type: &wasm_exporttype_t) -> &wasm_name_t {
export_type.name.as_ref()
}
#[no_mangle]
pub extern "C" fn wasm_exporttype_type(export_type: &wasm_exporttype_t) -> &wasm_externtype_t {
export_type.extern_type.as_ref()
}
#[no_mangle]
pub extern "C" fn wasm_exporttype_delete(_export_type: Option<Box<wasm_exporttype_t>>) {}
impl From<ExportType> for wasm_exporttype_t {
fn from(other: ExportType) -> Self {
(&other).into()
}
}
impl From<&ExportType> for wasm_exporttype_t {
fn from(other: &ExportType) -> Self {
let name: Box<wasm_name_t> = Box::new(other.name().to_string().into());
let extern_type: Box<wasm_externtype_t> = Box::new(other.ty().into());
wasm_exporttype_t { name, extern_type }
}
}
| 26.529412 | 95 | 0.698448 |
0ea1634c0b4c5e9c38bc63155b0e6924e14e83a0
| 1,752 |
//! Infrastructure for compiler plugins.
//!
//! Plugins are Rust libraries which extend the behavior of `rustc`
//! in various ways.
//!
//! Plugin authors will use the `Registry` type re-exported by
//! this module, along with its methods. The rest of the module
//! is for use by `rustc` itself.
//!
//! To define a plugin, build a dylib crate with a
//! `#[plugin_registrar]` function:
//!
//! ```no_run
//! #![crate_name = "myplugin"]
//! #![crate_type = "dylib"]
//! #![feature(plugin_registrar)]
//! #![feature(rustc_private)]
//!
//! extern crate rustc_plugin;
//! extern crate syntax;
//! extern crate syntax_pos;
//!
//! use rustc_plugin::Registry;
//! use syntax::ext::base::{ExtCtxt, MacResult};
//! use syntax_pos::Span;
//! use syntax::tokenstream::TokenTree;
//!
//! #[plugin_registrar]
//! pub fn plugin_registrar(reg: &mut Registry) {
//! reg.register_macro("mymacro", expand_mymacro);
//! }
//!
//! fn expand_mymacro(cx: &mut ExtCtxt, span: Span, tt: &[TokenTree]) -> Box<MacResult> {
//! unimplemented!()
//! }
//!
//! # fn main() {}
//! ```
//!
//! WARNING: We currently don't check that the registrar function
//! has the appropriate type!
//!
//! To use a plugin while compiling another crate:
//!
//! ```rust
//! #![feature(plugin)]
//! #![plugin(myplugin)]
//! ```
//!
//! See the [`plugin` feature](../unstable-book/language-features/plugin.html) of
//! the Unstable Book for more examples.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(nll)]
#![feature(rustc_diagnostic_macros)]
#![recursion_limit="256"]
#![deny(rust_2018_idioms)]
pub use registry::Registry;
mod diagnostics;
pub mod registry;
pub mod load;
pub mod build;
__build_diagnostic_array! { librustc_plugin, DIAGNOSTICS }
| 25.028571 | 89 | 0.658676 |
cc6fe14736cc72ddb854ce5765dd8f2629950b91
| 4,177 |
extern crate rand;
use std::env;
use std::fmt::Debug;
use std::iter::FromIterator;
use self::rand::{thread_rng, seq, Rng};
const MAX_SIMS: u32 = 100_000;
/// The `Bag` struct. The main collection type for holding populations of things.
pub struct Bag<T: Clone> {
pub items: Vec<T>,
pub max_sims: u32
}
fn get_default_max_sims() -> u32 {
match env::var("MENDEL_MAX_SIMS") {
Ok(val) => val.parse::<u32>().unwrap(),
Err(_) => MAX_SIMS
}
}
impl<T: Clone> Bag<T> {
/// Constructs a new `Bag<T>` from range.
///
/// [`min`, `max`)
///
/// # Examples
///
/// Generate a new `Bag<i32>` with numbers 1 through 10:
///
/// ```
/// use mendel::Bag;
///
/// let my_number_bag = Bag::from_range(1, 11);
/// ```
pub fn from_range(min: i32, max: i32) -> Self where
Vec<T>: FromIterator<i32> {
// TODO: Add shuffle option
let items: Vec<T> = (min..max).collect();
Bag { items, max_sims: get_default_max_sims() }
}
/// Constructs a new `Bag<T>` from a vector of items.
///
/// # Examples
///
/// Generate a new `Bag<&str>`:
///
/// ```
/// use mendel::Bag;
///
/// let animals = vec!["spider", "fish", "tiger", "pigeon"];
/// let animal_bag = Bag::from_vec(animals);
/// ```
pub fn from_vec(v: Vec<T>) -> Self {
let items: Vec<T> = v.clone();
Bag { items, max_sims: get_default_max_sims() }
}
/// Predicts probability of criteria being met for the first random item grabbed from the bag.
///
/// # Examples
///
/// Odds of selecting an even number from 1 - 10. Assert factors in +/- 1%:
///
/// ```
/// use mendel::Bag;
///
/// let my_bag = Bag::from_range(1, 11);
/// let odds_of_even = my_bag.one(|v| v % 2 == 0);
/// assert!(0.49 < odds_of_even && odds_of_even < 0.51);
/// ```
pub fn one<F>(&self, f: F) -> f64 where
F: Fn(&T) -> bool {
let mut picks_in_favor: u32 = 0;
for _ in 0..self.max_sims {
let idx = thread_rng().gen_range(0, self.items.len());
let item = &self.items[idx];
if f(item) {
picks_in_favor += 1;
}
}
picks_in_favor as f64 / MAX_SIMS as f64
}
/// Predicts probability of criteria being met for the first `sample_size` random items grabbed from the bag.
///
/// # Examples
///
/// Odds of getting a 2 in your first 3 picks from a list of numbers 1 - 10:
///
/// ```
/// use mendel::Bag;
///
/// let my_bag = Bag::from_range(1, 11);
/// let odds_of_two = my_bag.sample(3, |values| {
/// for v in values {
/// if *v == 2 {
/// return true;
/// }
/// }
/// false
/// });
/// assert!(0.29 < odds_of_two && odds_of_two < 0.31);
/// ```
pub fn sample<F>(&self, sample_size: usize, f: F) -> f64 where
T: Debug,
F: Fn(Vec<&T>) -> bool {
let mut picks_in_favor: u32 = 0;
let mut rng = thread_rng();
let items_clone = self.items.clone();
for _ in 0..self.max_sims {
let sample = seq::sample_iter(&mut rng, &items_clone, sample_size).unwrap();
if f(sample) {
picks_in_favor += 1;
}
}
picks_in_favor as f64 / MAX_SIMS as f64
}
/// Set the Bag's maximum amount of simulations to run when generating probabilities.
///
/// The default `max_sims` is set by either the MENDEL_MAX_SIMS environment variable value,
/// or if that env var doesn't exist than it defaults to 100,000. In the future this may
/// be a field that can be set during initialization of the `Bag` struct.
///
/// # Examples
///
/// Set the max simulations to 10 thousand:
///
/// ```
/// use mendel::Bag;
///
/// let mut my_bag = Bag::from_range(1, 11);
/// my_bag.set_max_sims(10_000);
/// assert!(my_bag.max_sims == 10_000);
/// ```
pub fn set_max_sims(&mut self, max_sims: u32) -> () {
self.max_sims = max_sims;
}
}
| 29.006944 | 113 | 0.526454 |
088fc5ae0e8ccb3925dfe953c9adc3df3e91630a
| 1,361 |
use serde_json::Value;
use crate::{
registry::MetaSchemaRef,
types::{ParseError, ParseFromJSON, ParseFromParameter, ParseResult, ToJSON, Type, TypeName},
};
/// A password type.
///
/// NOTE: Its type is `string` and the format is `password`, and it does not
/// protect the data in the memory.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct Password(pub String);
impl AsRef<str> for Password {
fn as_ref(&self) -> &str {
&self.0
}
}
impl Type for Password {
const NAME: TypeName = TypeName::Normal {
ty: "string",
format: Some("password"),
};
fn schema_ref() -> MetaSchemaRef {
MetaSchemaRef::Inline(Box::new(Self::NAME.into()))
}
impl_value_type!();
}
impl ParseFromJSON for Password {
fn parse_from_json(value: Value) -> ParseResult<Self> {
if let Value::String(value) = value {
Ok(Self(value))
} else {
Err(ParseError::expected_type(value))
}
}
}
impl ParseFromParameter for Password {
fn parse_from_parameter(value: Option<&str>) -> ParseResult<Self> {
match value {
Some(value) => Ok(Self(value.to_string())),
None => Err(ParseError::expected_input()),
}
}
}
impl ToJSON for Password {
fn to_json(&self) -> Value {
Value::String(self.0.clone())
}
}
| 23.465517 | 96 | 0.603968 |
8a34065e69298794afcbaeadb20f60d7c046170b
| 386 |
//! Test.
//! テスト。
//!
//! `cargo run --example inline_table`
extern crate tomboy_toml_dom;
// use tomboy_toml_dom::Toml;
fn main() {
// Read a Toml file.
// let doc = Toml::from_file("./resource/inline-table.toml");
// TODO
/*
assert_eq!(
&format!("{:?}", doc.get_val_by_key("inline_table_3")),
"{ name = \"b\", weight = 93.5 }"
);
*/
}
| 17.545455 | 65 | 0.536269 |
dee9e856f9562fe08953c9c2e382fabe694ffe1d
| 3,590 |
/*!
This is an example `interface crate`,
where all publically available modules(structs of function pointers) and types are declared,
These crate test a few of the errors that are returned when loading dynamic libraries
*/
use abi_stable::{
StableAbi,
package_version_strings,
library::RootModule,
sabi_types::VersionStrings,
};
impl RootModule for TestingMod_Ref {
abi_stable::declare_root_module_statics!{TestingMod_Ref}
const BASE_NAME: &'static str = "testing_1_loading_errors";
const NAME: &'static str = "testing_1_loading_errors";
const VERSION_STRINGS: VersionStrings = package_version_strings!();
}
#[repr(C)]
#[derive(StableAbi)]
#[sabi(kind(Prefix(prefix_ref="TestingMod_Ref")))]
#[sabi(missing_field(panic))]
pub struct TestingMod {
#[sabi(last_prefix_field)]
pub a: u32,
pub b: u32,
pub c: u32,
}
////////////////////////////////////////////////////////////////////////////////
/// This type is used to test that errors from types with an incompatble ABI can be printed.
///
/// The reason that needs to be printed is because the
#[repr(C)]
#[derive(StableAbi)]
#[sabi(kind(Prefix(prefix_ref="WithIncompatibleLayout_Ref")))]
pub struct WithIncompatibleLayout {
#[sabi(last_prefix_field)]
pub __foo: u64,
}
impl RootModule for WithIncompatibleLayout_Ref {
abi_stable::declare_root_module_statics!{WithIncompatibleLayout_Ref}
const BASE_NAME: &'static str = "testing_1_loading_errors";
const NAME: &'static str = "testing_1_loading_errors";
const VERSION_STRINGS: VersionStrings = package_version_strings!();
}
////////////////////////////////////////////////////////////////////////////////
/// This type is used to test that errors from types with an incompatble ABI can be printed.
///
/// The reason that needs to be printed is because the
#[repr(C)]
#[derive(StableAbi)]
#[sabi(kind(Prefix(prefix_ref="NonAbiStableLib_Ref")))]
pub struct NonAbiStableLib {
#[sabi(last_prefix_field)]
pub __foo: u64,
}
impl RootModule for NonAbiStableLib_Ref {
abi_stable::declare_root_module_statics!{NonAbiStableLib_Ref}
const BASE_NAME: &'static str = "non_abi_stable_lib";
const NAME: &'static str = "non_abi_stable_lib";
const VERSION_STRINGS: VersionStrings = package_version_strings!();
}
////////////////////////////////////////////////////////////////////////////////
/// Parameters for the program passed through environment variables.
///
/// The reason that env vars are used instead of command line arguments is because
/// both the dynamic library and the executable can see the env vars.
#[derive(Debug)]
pub struct EnvVars{
/// Whether the dynamic library returns an error.
pub return_what: ReturnWhat,
}
#[derive(Debug)]
pub enum ReturnWhat{
Ok,
Error,
Panic,
}
#[derive(Debug)]
pub struct ParseReturnWhatError(String);
impl std::str::FromStr for ReturnWhat {
type Err = ParseReturnWhatError;
fn from_str(str: &str)->Result<Self, ParseReturnWhatError> {
let trimmed = str.trim();
if trimmed.starts_with("ok") {
Ok(ReturnWhat::Ok)
} else if trimmed.starts_with("error") {
Ok(ReturnWhat::Error)
} else if trimmed.starts_with("panic") {
Ok(ReturnWhat::Panic)
} else {
Err(ParseReturnWhatError(str.to_string()))
}
}
}
/// Returns the parameters passed through environment variables
pub fn get_env_vars() -> EnvVars {
EnvVars{
return_what: std::env::var("RETURN").unwrap().parse().unwrap(),
}
}
| 26.791045 | 92 | 0.654596 |
f933e078bfb5f2dabbe8a484886cdc889a4a7cba
| 99,341 |
// Copyright (c) 2019, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use crate::expression_parser::ExpressionParser;
use crate::lexer::Lexer;
use crate::parser_env::ParserEnv;
use crate::parser_trait::{Context, ExpectedTokens, ParserTrait, SeparatedListKind};
use crate::smart_constructors::{NodeType, SmartConstructors, Token};
use crate::statement_parser::StatementParser;
use crate::type_parser::TypeParser;
use parser_core_types::lexable_token::LexableToken;
use parser_core_types::syntax_error::{self as Errors, SyntaxError};
use parser_core_types::token_kind::TokenKind;
use parser_core_types::trivia_kind::TriviaKind;
pub struct DeclarationParser<'a, S>
where
S: SmartConstructors,
S::R: NodeType,
{
lexer: Lexer<'a, S::TF>,
env: ParserEnv,
context: Context<'a, Token<S>>,
errors: Vec<SyntaxError>,
sc: S,
}
impl<'a, S> Clone for DeclarationParser<'a, S>
where
S: SmartConstructors,
S::R: NodeType,
{
fn clone(&self) -> Self {
Self {
lexer: self.lexer.clone(),
env: self.env.clone(),
context: self.context.clone(),
errors: self.errors.clone(),
sc: self.sc.clone(),
}
}
}
impl<'a, S> ParserTrait<'a, S> for DeclarationParser<'a, S>
where
S: SmartConstructors,
S::R: NodeType,
{
fn make(
lexer: Lexer<'a, S::TF>,
env: ParserEnv,
context: Context<'a, Token<S>>,
errors: Vec<SyntaxError>,
sc: S,
) -> Self {
Self {
lexer,
env,
context,
errors,
sc,
}
}
fn into_parts(self) -> (Lexer<'a, S::TF>, Context<'a, Token<S>>, Vec<SyntaxError>, S) {
(self.lexer, self.context, self.errors, self.sc)
}
fn lexer(&self) -> &Lexer<'a, S::TF> {
&self.lexer
}
fn lexer_mut(&mut self) -> &mut Lexer<'a, S::TF> {
&mut self.lexer
}
fn continue_from<P: ParserTrait<'a, S>>(&mut self, other: P) {
let (lexer, context, errors, sc) = other.into_parts();
self.lexer = lexer;
self.context = context;
self.errors = errors;
self.sc = sc;
}
fn add_error(&mut self, error: SyntaxError) {
self.errors.push(error)
}
fn env(&self) -> &ParserEnv {
&self.env
}
fn sc_mut(&mut self) -> &mut S {
&mut self.sc
}
fn drain_skipped_tokens(&mut self) -> std::vec::Drain<Token<S>> {
self.context.skipped_tokens.drain(..)
}
fn skipped_tokens(&self) -> &[Token<S>] {
&self.context.skipped_tokens
}
fn context_mut(&mut self) -> &mut Context<'a, Token<S>> {
&mut self.context
}
fn context(&self) -> &Context<'a, Token<S>> {
&self.context
}
}
impl<'a, S> DeclarationParser<'a, S>
where
S: SmartConstructors,
S::R: NodeType,
{
fn with_type_parser<F, U>(&mut self, f: F) -> U
where
F: Fn(&mut TypeParser<'a, S>) -> U,
{
let mut type_parser: TypeParser<S> = TypeParser::make(
self.lexer.clone(),
self.env.clone(),
self.context.clone(),
self.errors.clone(),
self.sc.clone(),
);
let res = f(&mut type_parser);
self.continue_from(type_parser);
res
}
fn parse_type_specifier(&mut self, allow_var: bool, allow_attr: bool) -> S::R {
self.with_type_parser(|p: &mut TypeParser<'a, S>| {
p.parse_type_specifier(allow_var, allow_attr)
})
}
fn with_statement_parser<F, U>(&mut self, f: F) -> U
where
F: Fn(&mut StatementParser<'a, S>) -> U,
{
let mut statement_parser: StatementParser<S> = StatementParser::make(
self.lexer.clone(),
self.env.clone(),
self.context.clone(),
self.errors.clone(),
self.sc.clone(),
);
let res = f(&mut statement_parser);
self.continue_from(statement_parser);
res
}
fn parse_simple_type_or_type_constant(&mut self) -> S::R {
self.with_type_parser(|x: &mut TypeParser<'a, S>| x.parse_simple_type_or_type_constant())
}
fn parse_simple_type_or_generic(&mut self) -> S::R {
self.with_type_parser(|p: &mut TypeParser<'a, S>| p.parse_simple_type_or_generic())
}
fn with_expression_parser<F, U>(&mut self, f: F) -> U
where
F: Fn(&mut ExpressionParser<'a, S>) -> U,
{
let mut expression_parser: ExpressionParser<S> = ExpressionParser::make(
self.lexer.clone(),
self.env.clone(),
self.context.clone(),
self.errors.clone(),
self.sc.clone(),
);
let res = f(&mut expression_parser);
self.continue_from(expression_parser);
res
}
fn parse_expression(&mut self) -> S::R {
self.with_expression_parser(|p: &mut ExpressionParser<'a, S>| p.parse_expression())
}
fn parse_compound_statement(&mut self) -> S::R {
self.with_statement_parser(|p: &mut StatementParser<'a, S>| p.parse_compound_statement())
}
fn parse_enumerator_list_opt(&mut self) -> S::R {
// SPEC
// enumerator-list:
// enumerator
// enumerator-list enumerator
//
self.parse_terminated_list(|x: &mut Self| x.parse_enumerator(), TokenKind::RightBrace)
}
fn parse_enum_class_enumerator_list_opt(&mut self) -> S::R {
self.parse_terminated_list(
|x: &mut Self| x.parse_enum_class_enumerator(),
TokenKind::RightBrace,
)
}
fn parse_enum_declaration(&mut self, attrs: S::R) -> S::R {
// enum-name-list-nonempty:
// : enum-name enum-name-list
// includes-declaration-opt:
// :
// : includes enum-name-list-nonempty
// enum-declaration:
// attribute-specification-opt enum name enum-base type-constraint-opt includes-declaration-opt /
// { enumerator-list-opt }
// enum-base:
// : int
// : string
//
// TODO: SPEC ERROR: The spec states that the only legal enum types
// are "int" and "string", but Hack allows any type, and apparently
// some of those are meaningful and desired. Figure out what types
// are actually legal and illegal as enum base types; put them in the
// spec, and add an error pass that says when they are wrong.
let enum_ = self.assert_token(TokenKind::Enum);
let name = self.require_name();
let colon = self.require_colon();
let base =
self.parse_type_specifier(false /* allow_var */, true /* allow_attr */);
let enum_type = self.parse_type_constraint_opt();
let (classish_includes, classish_includes_list) = self.parse_classish_includes_opt();
let (left_brace, enumerators, right_brace) =
self.parse_braced_list(|x: &mut Self| x.parse_enumerator_list_opt());
S!(
make_enum_declaration,
self,
attrs,
enum_,
name,
colon,
base,
enum_type,
classish_includes,
classish_includes_list,
left_brace,
enumerators,
right_brace,
)
}
fn parse_enum_class_declaration(&mut self, attrs: S::R) -> S::R {
// enum-class-declaration:
// attribute-specification-opt enum class name : base { enum-class-enumerator-list-opt }
let enum_kw = self.assert_token(TokenKind::Enum);
let class_kw = self.assert_token(TokenKind::Class);
let name = self.require_class_name();
let colon = self.require_colon();
let base =
self.parse_type_specifier(false /* allow_var */, false /* allow_attr */);
let (classish_extends, classish_extends_list) = self.parse_extends_opt();
let left_brace = self.require_left_brace();
let enumerators = self.parse_enum_class_enumerator_list_opt();
let right_brace = self.require_right_brace();
S!(
make_enum_class_declaration,
self,
attrs,
enum_kw,
class_kw,
name,
colon,
base,
classish_extends,
classish_extends_list,
left_brace,
enumerators,
right_brace
)
}
fn parse_enum_or_enum_class_declaration(&mut self, attrs: S::R) -> S::R {
match self.peek_token_kind_with_lookahead(1) {
TokenKind::Class => self.parse_enum_class_declaration(attrs),
_ => self.parse_enum_declaration(attrs),
}
}
fn parse_record_field(&mut self) -> S::R {
// SPEC
// record_field:
// type-specifier name (= expression)? ;
let field_type = self.parse_type_specifier(false, true);
let name = self.require_name_allow_non_reserved();
let init = self.parse_simple_initializer_opt();
let semi = self.require_semicolon();
S!(make_record_field, self, field_type, name, init, semi)
}
fn parse_record_fields(&mut self) -> S::R {
// SPEC
// record-list:
// record-field
// record-list record-field
self.parse_terminated_list(|x| x.parse_record_field(), TokenKind::RightBrace)
}
fn parse_single_extends_opt(&mut self) -> (S::R, S::R) {
let token_kind = self.peek_token_kind();
if token_kind != TokenKind::Extends {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
} else {
let token = self.next_token();
let extends_token = S!(make_token, self, token);
let extends_type = self.parse_type_specifier(false, false);
(extends_token, extends_type)
}
}
fn parse_record_declaration(&mut self, attrs: S::R) -> S::R {
// record-declaration:
// abstract? record name extends-single? { record-list }
let abstract_ = self.optional_token(TokenKind::Abstract);
let record = self.require_token(TokenKind::RecordDec, Errors::error1037);
let name = self.require_name();
let (record_extends, record_extends_type) = self.parse_single_extends_opt();
let (left_brace, record_fields, right_brace) =
self.parse_braced_list(|x| x.parse_record_fields());
S!(
make_record_declaration,
self,
attrs,
abstract_,
record,
name,
record_extends,
record_extends_type,
left_brace,
record_fields,
right_brace
)
}
pub fn parse_leading_markup_section(&mut self) -> Option<S::R> {
let mut parser1 = self.clone();
let (markup_section, has_suffix) =
parser1.with_statement_parser(|p: &mut StatementParser<'a, S>| p.parse_header());
// proceed successfully if we've consumed <?..., or dont need it
// We purposefully ignore leading trivia before the <?hh, and handle
// the error on a later pass
let file_path = self.lexer().source().file_path();
if has_suffix {
self.continue_from(parser1);
Some(markup_section)
} else {
if self.lexer().source().length() > 0 {
if file_path.has_extension("php") {
self.with_error(Errors::error1001);
None
} else if file_path.has_extension("hack") || file_path.has_extension("hackpartial")
{
// a .hack or .hackpartial file with a hashbang
// parse the hashbang correctly and continue
self.continue_from(parser1);
Some(markup_section)
} else {
// Otherwise it's a non .php file with no <?
None
}
} else {
None
}
}
}
fn parse_namespace_body(&mut self) -> S::R {
match self.peek_token_kind() {
TokenKind::Semicolon => {
let token = self.fetch_token();
S!(make_namespace_empty_body, self, token)
}
TokenKind::LeftBrace => {
let left = self.fetch_token();
let body = self.parse_terminated_list(
|x: &mut Self| x.parse_declaration(),
TokenKind::RightBrace,
);
let right = self.require_right_brace();
S!(make_namespace_body, self, left, body, right)
}
_ => {
// ERROR RECOVERY: return an inert namespace (one with all of its
// components 'missing'), and recover--without advancing the parser--
// back to the level that the namespace was declared in.
self.with_error(Errors::error1038);
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
let missing3 = S!(make_missing, self, self.pos());
S!(make_namespace_body, self, missing1, missing2, missing3)
}
}
}
fn is_group_use(&self) -> bool {
let mut parser = self.clone();
// We want a heuristic to determine whether to parse the use clause as
// a group use or normal use clause. We distinguish the two by (1) whether
// there is a namespace prefix -- in this case it is definitely a group use
// clause -- or, if there is a name followed by a curly. That's illegal, but
// we should give an informative error message about that.
parser.assert_token(TokenKind::Use);
parser.parse_namespace_use_kind_opt();
let token = parser.next_token();
match token.kind() {
TokenKind::Backslash => {
let missing = S!(make_missing, parser, parser.pos());
let backslash = S!(make_token, parser, token);
let (_name, is_backslash) = parser.scan_qualified_name_extended(missing, backslash);
is_backslash || parser.peek_token_kind() == TokenKind::LeftBrace
}
TokenKind::Name => {
let token = S!(make_token, parser, token);
let roken_ref = &token as *const _;
let (name, is_backslash) = parser.scan_remaining_qualified_name_extended(token);
// Here we rely on the implementation details of
// scan_remaining_qualified_name_extended. It's returning
// *exactly* token if there is nothing except it in the name.
is_backslash && (&name as *const _ == roken_ref)
|| parser.peek_token_kind() == TokenKind::LeftBrace
}
_ => false,
}
}
fn parse_namespace_use_kind_opt(&mut self) -> S::R {
// SPEC
// namespace-use-kind:
// namespace
// function
// const
let mut parser1 = self.clone();
let token = parser1.next_token();
match token.kind() {
TokenKind::Type | TokenKind::Namespace | TokenKind::Function | TokenKind::Const => {
self.continue_from(parser1);
S!(make_token, self, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_group_use(&mut self) -> S::R {
// See below for grammar.
let use_token = self.assert_token(TokenKind::Use);
let use_kind = self.parse_namespace_use_kind_opt();
// We already know that this is a name, qualified name, or prefix.
// If this is not a prefix, it will be detected as an error in a later pass
let prefix = self.scan_name_or_qualified_name();
let (left, clauses, right) =
self.parse_braced_comma_list_opt_allow_trailing(|x: &mut Self| {
x.parse_namespace_use_clause()
});
let semi = self.require_semicolon();
S!(
make_namespace_group_use_declaration,
self,
use_token,
use_kind,
prefix,
left,
clauses,
right,
semi,
)
}
fn parse_namespace_use_clause(&mut self) -> S::R {
// SPEC
// namespace-use-clause:
// qualified-name namespace-aliasing-clauseopt
// namespace-use-kind-clause:
// namespace-use-kind-opt qualified-name namespace-aliasing-clauseopt
// namespace-aliasing-clause:
// as name
//
let use_kind = self.parse_namespace_use_kind_opt();
let name = self.require_qualified_name();
let (as_token, alias) = if self.peek_token_kind() == TokenKind::As {
let as_token = self.next_token();
let as_token = S!(make_token, self, as_token);
let alias = self.require_xhp_class_name_or_name();
(as_token, alias)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
};
S!(
make_namespace_use_clause,
self,
use_kind,
name,
as_token,
alias
)
}
fn parse_namespace_use_declaration(&mut self) -> S::R {
// SPEC
// namespace-use-declaration:
// use namespace-use-kind-opt namespace-use-clauses ;
// use namespace-use-kind namespace-name-as-a-prefix
// { namespace-use-clauses } ;
// use namespace-name-as-a-prefix { namespace-use-kind-clauses } ;
//
// TODO: Add the grammar for the namespace-use-clauses; ensure that it
// indicates that trailing commas are allowed in the list.
//
// ERROR RECOVERY
// In the "simple" format, the kind may only be specified up front.
//
// The grammar in the specification says that in the "group"
// format, if the kind is specified up front then it may not
// be specified in each clause. However, HHVM's parser disallows
// the kind in each clause regardless of whether it is specified up front.
// We will fix the specification to match HHVM.
//
// The grammar in the specification also says that in the "simple" format,
// the kind may only be specified up front. But HHVM allows the kind to
// be specified in each clause. Again, we will fix the specification to match
// HHVM.
//
// TODO: Update the grammar comment above when the specification is fixed.
// (This work is being tracked by spec work items 102, 103 and 104.)
//
// We do not enforce these rules here. Rather, we allow the kind to be anywhere,
// and detect the errors in a later pass.
if self.is_group_use() {
self.parse_group_use()
} else {
let use_token = self.assert_token(TokenKind::Use);
let use_kind = self.parse_namespace_use_kind_opt();
let (clauses, _) = self.parse_comma_list_allow_trailing(
TokenKind::Semicolon,
Errors::error1004,
|x: &mut Self| x.parse_namespace_use_clause(),
);
let semi = self.require_semicolon();
S!(
make_namespace_use_declaration,
self,
use_token,
use_kind,
clauses,
semi
)
}
}
fn parse_namespace_declaration(&mut self) -> S::R {
// SPEC
// namespace-definition:
// namespace namespace-name ;
// namespace namespace-name-opt { declaration-list }
//
// TODO: An error case not caught by the parser that should be caught
// in a later pass:
// Qualified names are a superset of legal namespace names.
let namespace_token = self.assert_token(TokenKind::Namespace);
let name = match self.peek_token_kind() {
TokenKind::Name => {
let token = self.next_token();
let token = S!(make_token, self, token);
self.scan_remaining_qualified_name(token)
}
TokenKind::LeftBrace => S!(make_missing, self, self.pos()),
TokenKind::Semicolon => {
// ERROR RECOVERY Plainly the name is missing.
self.with_error(Errors::error1004);
S!(make_missing, self, self.pos())
}
_ =>
// TODO: Death to PHPisms; keywords as namespace names
{
self.require_name_allow_non_reserved()
}
};
let header = S!(
make_namespace_declaration_header,
self,
namespace_token,
name
);
let body = self.parse_namespace_body();
S!(make_namespace_declaration, self, header, body)
}
pub fn parse_classish_declaration(&mut self, attribute_spec: S::R) -> S::R {
let modifiers = self.parse_classish_modifiers();
let (xhp, is_xhp_class) = self.parse_xhp_keyword();
// Error on the XHP token unless it's been explicitly enabled
if is_xhp_class && !self.env.enable_xhp_class_modifier {
self.with_error(Errors::error1057("XHP"));
}
let token = self.parse_classish_token();
let name = if is_xhp_class {
self.require_xhp_class_name()
} else {
self.require_class_name()
};
let generic_type_parameter_list = self.with_type_parser(|p: &mut TypeParser<'a, S>| {
p.parse_generic_type_parameter_list_opt()
});
let (classish_extends, classish_extends_list) = self.parse_extends_opt();
let (classish_implements, classish_implements_list) = self.parse_classish_implements_opt();
let classish_where_clause = self.parse_classish_where_clause_opt();
let body = self.parse_classish_body();
S!(
make_classish_declaration,
self,
attribute_spec,
modifiers,
xhp,
token,
name,
generic_type_parameter_list,
classish_extends,
classish_extends_list,
classish_implements,
classish_implements_list,
classish_where_clause,
body,
)
}
fn parse_classish_where_clause_opt(&mut self) -> S::R {
if self.peek_token_kind() == TokenKind::Where {
self.parse_where_clause()
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_classish_implements_opt(&mut self) -> (S::R, S::R) {
if self.peek_token_kind() != TokenKind::Implements {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
} else {
let implements_token = self.next_token();
let implements_token = S!(make_token, self, implements_token);
let implements_list = self.parse_special_type_list();
(implements_token, implements_list)
}
}
fn parse_classish_includes_opt(&mut self) -> (S::R, S::R) {
if self.peek_token_kind() != TokenKind::Includes {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
} else {
let includes_token = self.next_token();
let includes_token = S!(make_token, self, includes_token);
let includes_list = self.parse_special_type_list();
(includes_token, includes_list)
}
}
fn parse_xhp_keyword(&mut self) -> (S::R, bool) {
let xhp = self.optional_token(TokenKind::XHP);
let is_missing = xhp.is_missing();
(xhp, !is_missing)
}
fn parse_classish_modifiers(&mut self) -> S::R {
let mut acc = vec![];
loop {
match self.peek_token_kind() {
TokenKind::Abstract | TokenKind::Final => {
// TODO(T25649779)
let token = self.next_token();
let token = S!(make_token, self, token);
acc.push(token);
}
_ => return S!(make_list, self, acc, self.pos()),
}
}
}
fn parse_classish_token(&mut self) -> S::R {
let spellcheck_tokens = vec![TokenKind::Class, TokenKind::Trait, TokenKind::Interface];
let token_str = &self.current_token_text();
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Class | TokenKind::Trait | TokenKind::Interface => {
let token = self.next_token();
S!(make_token, self, token)
}
// Spellcheck case
TokenKind::Name if Self::is_misspelled_from(&spellcheck_tokens, token_str) => {
// Default won't be used, since we already checked is_misspelled_from
let suggested_kind = Self::suggested_kind_from(&spellcheck_tokens, token_str)
.unwrap_or(TokenKind::Name);
self.skip_and_log_misspelled_token(suggested_kind);
S!(make_missing, self, self.pos())
}
_ => {
self.with_error(Errors::error1035);
S!(make_missing, self, self.pos())
}
}
}
fn parse_special_type(&mut self) -> (S::R, bool) {
let mut parser1 = self.clone();
let token = parser1.next_xhp_class_name_or_other_token();
match token.kind() {
TokenKind::Comma => {
// ERROR RECOVERY. We expected a type but we got a comma.
// Give the error that we expected a type, not a name, even though
// not every type is legal here.
self.continue_from(parser1);
self.with_error(Errors::error1007);
let comma = S!(make_token, self, token);
let missing = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, missing, comma);
(list_item, false)
}
TokenKind::Backslash
| TokenKind::Namespace
| TokenKind::Name
| TokenKind::XHPClassName => {
let item = self
.parse_type_specifier(false /* allow_var */, true /* allow_attr */);
let comma = self.optional_token(TokenKind::Comma);
let is_missing = comma.is_missing();
let list_item = S!(make_list_item, self, item, comma);
(list_item, is_missing)
}
TokenKind::Parent
| TokenKind::Enum
| TokenKind::RecordDec
| TokenKind::Shape
| TokenKind::SelfToken
if self.env.hhvm_compat_mode =>
{
// HHVM allows these keywords here for some reason
let item = self.parse_simple_type_or_type_constant();
let comma = self.optional_token(TokenKind::Comma);
let is_missing = comma.is_missing();
let list_item = S!(make_list_item, self, item, comma);
(list_item, is_missing)
}
_ => {
// ERROR RECOVERY: We are expecting a type; give an error as above.
// Don't eat the offending token.
self.with_error(Errors::error1007);
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, missing1, missing2);
(list_item, true)
}
}
}
fn parse_special_type_list(&mut self) -> S::R {
// An extends / implements / includes list is a comma-separated list of types,
// but very special types; we want the types to consist of a name and an
// optional generic type argument list.
//
// TODO: Can the type name be of the form "foo::bar"? Those do not
// necessarily start with names. Investigate this.
//
// Normally we'd use one of the separated list helpers, but there is no
// specific end token we could use to detect the end of the list, and we
// want to bail out if we get something that is not a type of the right form.
// So we have custom logic here.
//
// TODO: This is one of the rare cases in Hack where a comma-separated list
// may not have a trailing comma. Is that desirable, or was that an
// oversight when the trailing comma rules were added? If possible we
// should keep the rule as-is, and disallow the trailing comma; it makes
// parsing and error recovery easier.
let mut items = vec![];
loop {
let (item, is_missing) = self.parse_special_type();
items.push(item);
if is_missing {
break;
}
}
S!(make_list, self, items, self.pos())
}
fn parse_extends_opt(&mut self) -> (S::R, S::R) {
let token_kind = self.peek_token_kind();
if token_kind != TokenKind::Extends {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
} else {
let token = self.next_token();
let extends_token = S!(make_token, self, token);
let extends_list = self.parse_special_type_list();
(extends_token, extends_list)
}
}
fn parse_classish_body(&mut self) -> S::R {
let left_brace_token = self.require_left_brace();
let classish_element_list = self.parse_classish_element_list_opt();
let right_brace_token = self.require_right_brace();
S!(
make_classish_body,
self,
left_brace_token,
classish_element_list,
right_brace_token
)
}
fn parse_classish_element_list_opt(&mut self) -> S::R {
// TODO: ERROR RECOVERY: consider bailing if the token cannot possibly
// start a classish element.
// ERROR RECOVERY: we're in the body of a classish, so we add visibility
// modifiers to our context.
self.expect_in_new_scope(ExpectedTokens::Visibility);
let element_list = self.parse_terminated_list(
|x: &mut Self| x.parse_classish_element(),
TokenKind::RightBrace,
);
self.pop_scope(ExpectedTokens::Visibility);
element_list
}
fn parse_xhp_children_paren(&mut self) -> S::R {
// SPEC (Draft)
// ( xhp-children-expressions )
//
// xhp-children-expressions:
// xhp-children-expression
// xhp-children-expressions , xhp-children-expression
//
// TODO: The parenthesized list of children expressions is NOT allowed
// to be comma-terminated. Is this intentional? It is inconsistent with
// practice throughout the rest of Hack. There is no syntactic difficulty
// in allowing a comma before the close paren. Consider allowing it.
let (left, exprs, right) =
self.parse_parenthesized_comma_list(|x: &mut Self| x.parse_xhp_children_expression());
S!(
make_xhp_children_parenthesized_list,
self,
left,
exprs,
right
)
}
fn parse_xhp_children_term(&mut self) -> S::R {
// SPEC (Draft)
// xhp-children-term:
// ( xhp-children-expressions ) trailing-opt
// name trailing-opt
// xhp-class-name trailing-opt
// xhp-category-name trailing-opt
// trailing: * ? +
//
// name should be 'pcdata', 'any', or 'empty', however any name is
// currently permitted
//
// Note that there may be only zero or one trailing unary operator.
// "foo*?" is not a legal xhp child expression.
//
let mut parser1 = self.clone();
let token = parser1.next_xhp_children_name_or_other();
let kind = token.kind();
let name = S!(make_token, parser1, token);
match kind {
TokenKind::Name => {
self.continue_from(parser1);
self.parse_xhp_children_trailing(name)
}
TokenKind::XHPClassName | TokenKind::XHPCategoryName => {
self.continue_from(parser1);
self.parse_xhp_children_trailing(name)
}
TokenKind::LeftParen => {
let term = self.parse_xhp_children_paren();
self.parse_xhp_children_trailing(term)
}
_ => {
// ERROR RECOVERY: Eat the offending token, keep going.
self.with_error(Errors::error1053);
name
}
}
}
fn parse_xhp_children_trailing(&mut self, term: S::R) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Star | TokenKind::Plus | TokenKind::Question => {
let token = self.next_token();
let token = S!(make_token, self, token);
S!(make_postfix_unary_expression, self, term, token)
}
_ => term,
}
}
fn parse_xhp_children_bar(&mut self, left: S::R) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Bar => {
let token = self.next_token();
let token = S!(make_token, self, token);
let right = self.parse_xhp_children_term();
let result = S!(make_binary_expression, self, left, token, right);
self.parse_xhp_children_bar(result)
}
_ => left,
}
}
fn parse_xhp_children_expression(&mut self) -> S::R {
// SPEC (Draft)
// xhp-children-expression:
// xhp-children-term
// xhp-children-expression | xhp-children-term
//
// Note that the bar operator is left-associative. (Not that it matters
// semantically.
let term = self.parse_xhp_children_term();
self.parse_xhp_children_bar(term)
}
fn parse_xhp_children_declaration(&mut self) -> S::R {
// SPEC (Draft)
// xhp-children-declaration:
// children empty ;
// children xhp-children-expression ;
let children = self.assert_token(TokenKind::Children);
if self.env.disable_xhp_children_declarations {
self.with_error(Errors::error1064);
}
let token_kind = self.peek_token_kind();
let expr = match token_kind {
TokenKind::Empty => {
let token = self.next_token();
S!(make_token, self, token)
}
_ => self.parse_xhp_children_expression(),
};
let semi = self.require_semicolon();
S!(make_xhp_children_declaration, self, children, expr, semi)
}
fn parse_xhp_category(&mut self) -> S::R {
let token = self.next_xhp_category_name();
let token_kind = token.kind();
let category = S!(make_token, self, token);
match token_kind {
TokenKind::XHPCategoryName => category,
_ => {
self.with_error(Errors::error1052);
category
}
}
}
fn parse_xhp_type_specifier(&mut self) -> S::R {
// SPEC (Draft)
// xhp-type-specifier:
// enum { xhp-attribute-enum-list ,-opt }
// type-specifier
//
// The list of enum values must have at least one value and can be
// comma-terminated.
//
// xhp-enum-list:
// xhp-attribute-enum-value
// xhp-enum-list , xhp-attribute-enum-value
//
// xhp-attribute-enum-value:
// any integer literal
// any single-quoted-string literal
// any double-quoted-string literal
//
// TODO: What are the semantics of encapsulated expressions in double-quoted
// string literals here?
// ERROR RECOVERY: We parse any expressions here;
// TODO: give an error in a later pass if the expressions are not literals.
// (This work is tracked by task T21175355)
//
// An empty list is illegal, but we allow it here and give an error in
// a later pass.
let mut parser1 = self.clone();
let token = parser1.next_token();
match token.kind() {
TokenKind::Enum => {
self.continue_from(parser1);
let enum_token = S!(make_token, self, token);
let (left_brace, values, right_brace) = self
.parse_braced_comma_list_opt_allow_trailing(|x: &mut Self| {
x.parse_expression()
});
S!(
make_xhp_enum_type,
self,
enum_token,
left_brace,
values,
right_brace
)
}
_ => self.parse_type_specifier(true, true),
}
}
fn parse_xhp_required_opt(&mut self) -> S::R {
// SPEC (Draft)
// xhp-required :
// @ (required | lateinit)
//
// Note that these are two tokens. They can have whitespace between them.
if self.peek_token_kind() == TokenKind::At {
let at = self.assert_token(TokenKind::At);
let req_kind = self.next_token();
let kind = req_kind.kind();
let req = S!(make_token, self, req_kind);
match kind {
TokenKind::Required => S!(make_xhp_required, self, at, req),
TokenKind::Lateinit => S!(make_xhp_lateinit, self, at, req),
_ => {
self.with_error(Errors::error1051);
S!(make_missing, self, self.pos())
}
}
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_xhp_class_attribute_typed(&mut self) -> S::R {
// xhp-type-specifier xhp-name initializer-opt xhp-required-opt
let ty = self.parse_xhp_type_specifier();
let name = self.require_xhp_name();
let init = self.parse_simple_initializer_opt();
let req = self.parse_xhp_required_opt();
S!(make_xhp_class_attribute, self, ty, name, init, req)
}
fn parse_xhp_category_declaration(&mut self) -> S::R {
// SPEC (Draft)
// xhp-category-declaration:
// category xhp-category-list ,-opt ;
//
// xhp-category-list:
// xhp-category-name
// xhp-category-list , xhp-category-name
let category = self.assert_token(TokenKind::Category);
let (items, _) = self.parse_comma_list_allow_trailing(
TokenKind::Semicolon,
Errors::error1052,
|x: &mut Self| x.parse_xhp_category(),
);
let semi = self.require_semicolon();
S!(make_xhp_category_declaration, self, category, items, semi)
}
fn parse_xhp_class_attribute(&mut self) -> S::R {
// SPEC (Draft)
// xhp-attribute-declaration:
// xhp-class-name
// xhp-type-specifier xhp-name initializer-opt xhp-required-opt
//
// ERROR RECOVERY:
// The xhp type specifier could be an xhp class name. To disambiguate we peek
// ahead a token; if it's a comma or semi, we're done. If not, then we assume
// that we are in the more complex case.
if self.is_next_xhp_class_name() {
let mut parser1 = self.clone();
let class_name = parser1.require_class_name();
match parser1.peek_token_kind() {
TokenKind::Comma | TokenKind::Semicolon => {
self.continue_from(parser1);
let type_specifier = S!(make_simple_type_specifier, self, class_name);
S!(make_xhp_simple_class_attribute, self, type_specifier)
}
_ => self.parse_xhp_class_attribute_typed(),
}
} else {
self.parse_xhp_class_attribute_typed()
}
}
fn parse_xhp_class_attribute_declaration(&mut self) -> S::R {
// SPEC: (Draft)
// xhp-class-attribute-declaration :
// attribute xhp-attribute-declaration-list ;
//
// xhp-attribute-declaration-list:
// xhp-attribute-declaration
// xhp-attribute-declaration-list , xhp-attribute-declaration
//
// TODO: The list of attributes may NOT be terminated with a trailing comma
// before the semicolon. This is inconsistent with the rest of Hack.
// Allowing a comma before the semi does not introduce any syntactic
// difficulty; consider allowing it.
let attr_token = self.assert_token(TokenKind::Attribute);
// TODO: Better error message.
let attrs =
self.parse_comma_list(TokenKind::Semicolon, Errors::error1004, |x: &mut Self| {
x.parse_xhp_class_attribute()
});
let semi = self.require_semicolon();
S!(
make_xhp_class_attribute_declaration,
self,
attr_token,
attrs,
semi
)
}
fn parse_qualified_name_type(&mut self) -> S::R {
// Here we're parsing a name followed by an optional generic type
// argument list; if we don't have a name, give an error.
match self.peek_token_kind() {
TokenKind::Backslash | TokenKind::Name => self.parse_simple_type_or_generic(),
_ => self.require_qualified_name(),
}
}
fn parse_qualified_name_type_opt(&mut self) -> S::R {
// Here we're parsing a name followed by an optional generic type
// argument list; if we don't have a name, give an error.
match self.peek_token_kind() {
TokenKind::Backslash | TokenKind::Construct | TokenKind::Name => {
self.parse_simple_type_or_generic()
}
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_require_clause(&mut self) -> S::R {
// SPEC
// require-extends-clause:
// require extends qualified-name ;
//
// require-implements-clause:
// require implements qualified-name ;
//
// We must also parse "require extends :foo;"
// TODO: What about "require extends :foo<int>;" ?
// TODO: The spec is incomplete; we need to be able to parse
// require extends Foo<int>;
// (This work is being tracked by spec issue 105.)
// TODO: Check whether we also need to handle
// require extends foo::bar
// and so on.
//
// ERROR RECOVERY: Detect if the implements/extends, name and semi are
// missing.
let req = self.assert_token(TokenKind::Require);
let token_kind = self.peek_token_kind();
let req_kind = match token_kind {
TokenKind::Implements | TokenKind::Extends => {
let req_kind_token = self.next_token();
S!(make_token, self, req_kind_token)
}
_ => {
self.with_error(Errors::error1045);
S!(make_missing, self, self.pos())
}
};
let name = if self.is_next_xhp_class_name() {
self.parse_simple_type_or_generic()
} else {
self.parse_qualified_name_type()
};
let semi = self.require_semicolon();
S!(make_require_clause, self, req, req_kind, name, semi)
}
// This duplicates work from parse_methodish_or_const_or_type_const,
// but this function is only invoked after an attribute spec, while
// parse_methodish_or_const_or_type_const is called after a modifier.
// Having this function prevents constants from having attributes as
// this cannot be checked in parser_errors as there is no field in constant
// declaration to store 'attributes'.
fn parse_methodish_or_property_or_type_constant(&mut self, attribute_spec: S::R) -> S::R {
let mut parser1 = self.clone();
let modifiers = parser1.parse_modifiers();
let current_token_kind = parser1.peek_token_kind();
let next_token = parser1.peek_token_with_lookahead(1);
let next_token_kind = next_token.kind();
match (current_token_kind, next_token_kind) {
(TokenKind::Const, TokenKind::Type) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_type_const_declaration(attribute_spec, modifiers, const_)
}
(TokenKind::Const, TokenKind::Ctx) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_context_const_declaration(modifiers, const_)
}
_ => self.parse_methodish_or_property(attribute_spec),
}
}
fn parse_methodish_or_property(&mut self, attribute_spec: S::R) -> S::R {
let modifiers = self.parse_modifiers();
// ERROR RECOVERY: match against two tokens, because if one token is
// in error but the next isn't, then it's likely that the user is
// simply still typing. Throw an error on what's being typed, then eat
// it and keep going.
let current_token_kind = self.peek_token_kind();
let next_token = self.peek_token_with_lookahead(1);
let next_token_kind = next_token.kind();
match (current_token_kind, next_token_kind) {
// Detected the usual start to a method, so continue parsing as method.
(TokenKind::Async, _) | (TokenKind::Function, _) => {
self.parse_methodish(attribute_spec, modifiers)
}
(TokenKind::LeftParen, _) => self.parse_property_declaration(attribute_spec, modifiers),
// We encountered one unexpected token, but the next still indicates that
// we should be parsing a methodish. Throw an error, process the token
// as an extra, and keep going.
(_, TokenKind::Async) | (_, TokenKind::Function)
if !(next_token.has_leading_trivia_kind(TriviaKind::EndOfLine)) =>
{
self.with_error_on_whole_token(Errors::error1056);
self.skip_and_log_unexpected_token(false);
self.parse_methodish(attribute_spec, modifiers)
}
// Otherwise, continue parsing as a property (which might be a lambda).
_ => self.parse_property_declaration(attribute_spec, modifiers),
}
}
fn parse_trait_use_precedence_item(&mut self, name: S::R) -> S::R {
let keyword = self.assert_token(TokenKind::Insteadof);
let removed_names = self.parse_trait_name_list(|x: TokenKind| x == TokenKind::Semicolon);
S!(
make_trait_use_precedence_item,
self,
name,
keyword,
removed_names
)
}
fn parse_trait_use_alias_item(&mut self, aliasing_name: S::R) -> S::R {
let keyword = self.require_token(TokenKind::As, Errors::expected_as_or_insteadof);
let modifiers = self.parse_modifiers();
let aliased_name = self.parse_qualified_name_type_opt();
S!(
make_trait_use_alias_item,
self,
aliasing_name,
keyword,
modifiers,
aliased_name
)
}
fn parse_trait_use_conflict_resolution_item(&mut self) -> S::R {
let qualifier = self.parse_qualified_name_type();
let name = if self.peek_token_kind() == TokenKind::ColonColon {
// scope resolution expression case
let cc_token = self.require_coloncolon();
let name = self
.require_token_one_of(&[TokenKind::Name, TokenKind::Construct], Errors::error1004);
S!(
make_scope_resolution_expression,
self,
qualifier,
cc_token,
name
)
} else {
// plain qualified name case
qualifier
};
match self.peek_token_kind() {
TokenKind::Insteadof => self.parse_trait_use_precedence_item(name),
TokenKind::As | _ => self.parse_trait_use_alias_item(name),
}
}
// SPEC:
// trait-use-conflict-resolution:
// use trait-name-list { trait-use-conflict-resolution-list }
//
// trait-use-conflict-resolution-list:
// trait-use-conflict-resolution-item
// trait-use-conflict-resolution-item trait-use-conflict-resolution-list
//
// trait-use-conflict-resolution-item:
// trait-use-alias-item
// trait-use-precedence-item
//
// trait-use-alias-item:
// trait-use-conflict-resolution-item-name as name;
// trait-use-conflict-resolution-item-name as visibility-modifier name;
// trait-use-conflict-resolution-item-name as visibility-modifier;
//
// trait-use-precedence-item:
// scope-resolution-expression insteadof trait-name-list
//
// trait-use-conflict-resolution-item-name:
// qualified-name
// scope-resolution-expression
fn parse_trait_use_conflict_resolution(
&mut self,
use_token: S::R,
trait_name_list: S::R,
) -> S::R {
let left_brace = self.assert_token(TokenKind::LeftBrace);
let clauses = self.parse_separated_list_opt(
TokenKind::Semicolon,
SeparatedListKind::TrailingAllowed,
TokenKind::RightBrace,
Errors::error1004,
|x: &mut Self| x.parse_trait_use_conflict_resolution_item(),
);
let right_brace = self.require_token(TokenKind::RightBrace, Errors::error1006);
S!(
make_trait_use_conflict_resolution,
self,
use_token,
trait_name_list,
left_brace,
clauses,
right_brace,
)
}
// SPEC:
// trait-use-clause:
// use trait-name-list ;
//
// trait-name-list:
// qualified-name generic-type-parameter-listopt
// trait-name-list , qualified-name generic-type-parameter-listopt
fn parse_trait_name_list<P>(&mut self, predicate: P) -> S::R
where
P: Fn(TokenKind) -> bool,
{
let (items, _, _) = self.parse_separated_list_predicate(
|x| x == TokenKind::Comma,
SeparatedListKind::NoTrailing,
predicate,
Errors::error1004,
|x: &mut Self| x.parse_qualified_name_type(),
);
items
}
fn parse_trait_use(&mut self) -> S::R {
let use_token = self.assert_token(TokenKind::Use);
let trait_name_list =
self.parse_trait_name_list(|x| x == TokenKind::Semicolon || x == TokenKind::LeftBrace);
if self.peek_token_kind() == TokenKind::LeftBrace {
self.parse_trait_use_conflict_resolution(use_token, trait_name_list)
} else {
let semi = self.require_semicolon();
S!(make_trait_use, self, use_token, trait_name_list, semi)
}
}
fn parse_property_declaration(&mut self, attribute_spec: S::R, modifiers: S::R) -> S::R {
// SPEC:
// property-declaration:
// attribute-spec-opt property-modifier type-specifier
// property-declarator-list ;
//
// property-declarator-list:
// property-declarator
// property-declarator-list , property-declarator
//
// The type specifier is optional in non-strict mode and required in
// strict mode. We give an error in a later pass.
let prop_type = match self.peek_token_kind() {
TokenKind::Variable => S!(make_missing, self, self.pos()),
_ => self.parse_type_specifier(false /* allow_var */, false /* allow_attr */),
};
let decls =
self.parse_comma_list(TokenKind::Semicolon, Errors::error1008, |x: &mut Self| {
x.parse_property_declarator()
});
let semi = self.require_semicolon();
S!(
make_property_declaration,
self,
attribute_spec,
modifiers,
prop_type,
decls,
semi
)
}
fn parse_property_declarator(&mut self) -> S::R {
// SPEC:
// property-declarator:
// variable-name property-initializer-opt
// property-initializer:
// = expression
let name = self.require_variable();
let simple_init = self.parse_simple_initializer_opt();
S!(make_property_declarator, self, name, simple_init)
}
fn is_type_in_const(&self) -> bool {
let mut parser1 = self.clone();
let _ = parser1.parse_type_specifier(false, true);
let _ = parser1.require_name_allow_all_keywords();
self.errors.len() == parser1.errors.len()
}
// SPEC:
// const-declaration:
// abstract_opt const type-specifier_opt constant-declarator-list ;
// visibility const type-specifier_opt constant-declarator-list ;
// constant-declarator-list:
// constant-declarator
// constant-declarator-list , constant-declarator
// constant-declarator:
// name constant-initializer_opt
// constant-initializer:
// = const-expression
fn parse_const_declaration(&mut self, modifiers: S::R, const_: S::R) -> S::R {
let type_spec = if self.is_type_in_const() {
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true)
} else {
S!(make_missing, self, self.pos())
};
let const_list =
self.parse_comma_list(TokenKind::Semicolon, Errors::error1004, |x: &mut Self| {
x.parse_constant_declarator()
});
let semi = self.require_semicolon();
S!(
make_const_declaration,
self,
modifiers,
const_,
type_spec,
const_list,
semi
)
}
fn parse_constant_declarator(&mut self) -> S::R {
// TODO: We allow const names to be keywords here; in particular we
// require that const string TRUE = "true"; be legal. Likely this
// should be more strict. What are the rules for which keywords are
// legal constant names and which are not?
// Note that if this logic is changed, it should be changed in
// is_type_in_const above as well.
//
// This permits abstract variables to have an initializer, and vice-versa.
// This is deliberate, and those errors will be detected after the syntax
// tree is created.
let const_name = self.require_name_allow_all_keywords();
let initializer_ = self.parse_simple_initializer_opt();
S!(make_constant_declarator, self, const_name, initializer_)
}
// SPEC:
// type-constant-declaration:
// abstract-type-constant-declaration
// concrete-type-constant-declaration
// abstract-type-constant-declaration:
// abstract const type name type-constraintopt ;
// concrete-type-constant-declaration:
// const type name type-constraintopt = type-specifier ;
//
// ERROR RECOVERY:
//
// An abstract type constant may only occur in an interface or an abstract
// class. We allow that to be parsed here, and the type checker detects the
// error.
// CONSIDER: We could detect this error in a post-parse pass; it is entirely
// syntactic. Consider moving the error detection out of the type checker.
//
// An interface may not contain a non-abstract type constant that has a
// type constraint. We allow that to be parsed here, and the type checker
// detects the error.
// CONSIDER: We could detect this error in a post-parse pass; it is entirely
// syntactic. Consider moving the error detection out of the type checker.
fn parse_type_const_declaration(
&mut self,
attributes: S::R,
modifiers: S::R,
const_: S::R,
) -> S::R {
let type_token = self.assert_token(TokenKind::Type);
let name = self.require_name_allow_non_reserved();
let generic_type_parameter_list = self.with_type_parser(|p: &mut TypeParser<'a, S>| {
p.parse_generic_type_parameter_list_opt()
});
let type_constraint = self.parse_type_constraint_opt();
let (equal_token, type_specifier) = if self.peek_token_kind() == TokenKind::Equal {
let equal_token = self.assert_token(TokenKind::Equal);
let type_spec = self
.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true);
(equal_token, type_spec)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
};
let semicolon = self.require_semicolon();
S!(
make_type_const_declaration,
self,
attributes,
modifiers,
const_,
type_token,
name,
generic_type_parameter_list,
type_constraint,
equal_token,
type_specifier,
semicolon,
)
}
fn parse_context_const_declaration(&mut self, modifiers: S::R, const_: S::R) -> S::R {
// SPEC
// context-constant-declaration:
// abstract-context-constant-declaration
// concrete-context-constant-declaration
// abstract-context-constant-declaration:
// abstract const ctx name context-constraint* ;
// concrete-context-constant-declaration:
// const ctx name context-constraint* = context-list ;
let ctx_keyword = self.assert_token(TokenKind::Ctx);
let name = self.require_name_allow_non_reserved();
let (tparam_list, ctx_constraints) = self.with_type_parser(|p| {
(
p.parse_generic_type_parameter_list_opt(),
p.parse_list_until_none(|p| p.parse_context_constraint_opt()),
)
});
let (equal, ctx_list) = if self.peek_token_kind() == TokenKind::Equal {
let equal = self.assert_token(TokenKind::Equal);
let ctx_list = self.with_type_parser(|p| p.parse_contexts());
(equal, ctx_list)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
};
let semicolon = self.require_semicolon();
S!(
make_context_const_declaration,
self,
modifiers,
const_,
ctx_keyword,
name,
tparam_list,
ctx_constraints,
equal,
ctx_list,
semicolon,
)
}
// SPEC:
// attribute_specification :=
// attribute_list
// old_attribute_specification
// attribute_list :=
// attribute
// attribute_list attribute
// attribute := @ attribute_name attribute_value_list_opt
// old_attribute_specification := << old_attribute_list >>
// old_attribute_list :=
// old_attribute
// old_attribute_list , old_attribute
// old_attribute := attribute_name attribute_value_list_opt
// attribute_name := name
// attribute_value_list := ( attribute_values_opt )
// attribute_values :=
// attribute_value
// attribute_values , attribute_value
// attribute_value := expression
//
// TODO: The list of attrs can have a trailing comma. Update the spec.
// TODO: The list of values can have a trailing comma. Update the spec.
// (Both these work items are tracked by spec issue 106.)
pub fn parse_old_attribute_specification_opt(&mut self) -> S::R {
if self.peek_token_kind() == TokenKind::LessThanLessThan {
let (left, items, right) =
self.parse_double_angled_comma_list_allow_trailing(|x: &mut Self| {
x.parse_old_attribute()
});
S!(make_old_attribute_specification, self, left, items, right)
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_file_attribute_specification_opt(&mut self) -> S::R {
if self.peek_token_kind() == TokenKind::LessThanLessThan {
let left = self.assert_token(TokenKind::LessThanLessThan);
let keyword = self.assert_token(TokenKind::File);
let colon = self.require_colon();
let (items, _) = self.parse_comma_list_allow_trailing(
TokenKind::GreaterThanGreaterThan,
Errors::expected_user_attribute,
|x: &mut Self| x.parse_old_attribute(),
);
let right = self.require_token(TokenKind::GreaterThanGreaterThan, Errors::error1029);
S!(
make_file_attribute_specification,
self,
left,
keyword,
colon,
items,
right
)
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_return_type_hint_opt(&mut self) -> (S::R, S::R) {
let token_kind = self.peek_token_kind();
if token_kind == TokenKind::Colon {
let token = self.next_token();
let colon_token = S!(make_token, self, token);
let return_type =
self.with_type_parser(|p: &mut TypeParser<'a, S>| p.parse_return_type());
(colon_token, return_type)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
}
}
pub fn parse_parameter_list_opt(&mut self) -> (S::R, S::R, S::R) {
// SPEC
// TODO: The specification is wrong in several respects concerning
// variadic parameters. Variadic parameters are permitted to have a
// type and name but this is not mentioned in the spec. And variadic
// parameters are not mentioned at all in the grammar for constructor
// parameter lists. (This is tracked by spec issue 107.)
//
// parameter-list:
// variadic-parameter
// parameter-declaration-list
// parameter-declaration-list ,
// parameter-declaration-list , variadic-parameter
//
// parameter-declaration-list:
// parameter-declaration
// parameter-declaration-list , parameter-declaration
//
// variadic-parameter:
// ...
// attribute-specification-opt visiblity-modifier-opt type-specifier \
// ... variable-name
//
// This function parses the parens as well.
// ERROR RECOVERY: We allow variadic parameters in all positions; a later
// pass gives an error if a variadic parameter is in an incorrect position
// or followed by a trailing comma, or if the parameter has a
// default value.
self.parse_parenthesized_comma_list_opt_allow_trailing(|x: &mut Self| x.parse_parameter())
}
fn parse_parameter(&mut self) -> S::R {
let mut parser1 = self.clone();
let token = parser1.next_token();
match token.kind() {
TokenKind::DotDotDot => {
let next_kind = parser1.peek_token_kind();
if next_kind == TokenKind::Variable {
self.parse_parameter_declaration()
} else {
let missing1 = S!(make_missing, parser1, self.pos());
let missing2 = S!(make_missing, parser1, self.pos());
self.continue_from(parser1);
let token = S!(make_token, self, token);
S!(make_variadic_parameter, self, missing1, missing2, token)
}
}
_ => self.parse_parameter_declaration(),
}
}
fn parse_parameter_declaration(&mut self) -> S::R {
// SPEC
//
// TODO: Add call-convention-opt to the specification.
// (This work is tracked by task T22582676.)
//
// TODO: Update grammar for inout parameters.
// (This work is tracked by task T22582715.)
//
// parameter-declaration:
// attribute-specification-opt \
// call-convention-opt \
// type-specifier variable-name \
// default-argument-specifier-opt
//
// ERROR RECOVERY
// In strict mode, we require a type specifier. This error is not caught
// at parse time but rather by a later pass.
// Visibility modifiers are only legal in constructor parameter
// lists; we give an error in a later pass.
// Variadic params cannot be declared inout; we permit that here but
// give an error in a later pass.
// Variadic params and inout params cannot have default values; these
// errors are also reported in a later pass.
let attrs = self.parse_attribute_specification_opt();
let visibility = self.parse_visibility_modifier_opt();
let callconv = self.parse_call_convention_opt();
let token = self.peek_token();
let type_specifier = match token.kind() {
TokenKind::Variable | TokenKind::DotDotDot => S!(make_missing, self, self.pos()),
_ => {
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr */ false)
}
};
let name = self.parse_decorated_variable_opt();
let default = self.parse_simple_initializer_opt();
S!(
make_parameter_declaration,
self,
attrs,
visibility,
callconv,
type_specifier,
name,
default
)
}
fn parse_decorated_variable_opt(&mut self) -> S::R {
match self.peek_token_kind() {
TokenKind::DotDotDot => self.parse_decorated_variable(),
_ => self.require_variable(),
}
}
// TODO: This is wrong. The variable here is not anexpression* that has
// an optional decoration on it. It's a declaration. We shouldn't be using the
// same data structure for a decorated expression as a declaration; one
// is ause* and the other is a *definition*.
fn parse_decorated_variable(&mut self) -> S::R {
// ERROR RECOVERY
// Detection of (variadic, byRef) inout params happens in post-parsing.
// Although a parameter can have at most one variadic/reference decorator,
// we deliberately allow multiple decorators in the initial parse and produce
// an error in a later pass.
let decorator = self.fetch_token();
let variable = match self.peek_token_kind() {
TokenKind::DotDotDot => self.parse_decorated_variable(),
_ => self.require_variable(),
};
S!(make_decorated_expression, self, decorator, variable)
}
fn parse_visibility_modifier_opt(&mut self) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Public | TokenKind::Protected | TokenKind::Private => {
let token = self.next_token();
S!(make_token, self, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
// SPEC
//
// TODO: Add this to the specification.
// (This work is tracked by task T22582676.)
//
// call-convention:
// inout
fn parse_call_convention_opt(&mut self) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Inout => {
let token = self.next_token();
S!(make_token, self, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
// SPEC
// default-argument-specifier:
// = const-expression
//
// constant-initializer:
// = const-expression
fn parse_simple_initializer_opt(&mut self) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Equal => {
let token = self.next_token();
// TODO: Detect if expression is not const
let token = S!(make_token, self, token);
let default_value = self.parse_expression();
S!(make_simple_initializer, self, token, default_value)
}
_ => S!(make_missing, self, self.pos()),
}
}
pub fn parse_function_declaration(&mut self, attribute_specification: S::R) -> S::R {
let modifiers = self.parse_modifiers();
let header =
self.parse_function_declaration_header(modifiers, /* is_methodish =*/ false);
let body = self.parse_compound_statement();
S!(
make_function_declaration,
self,
attribute_specification,
header,
body
)
}
fn parse_constraint_operator(&mut self) -> S::R {
// TODO: Put this in the specification
// (This work is tracked by spec issue 100.)
// constraint-operator:
// =
// as
// super
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Equal | TokenKind::As | TokenKind::Super => {
let token = self.next_token();
S!(make_token, self, token)
}
_ =>
// ERROR RECOVERY: don't eat the offending token.
// TODO: Give parse error
{
S!(make_missing, self, self.pos())
}
}
}
fn parse_where_constraint(&mut self) -> S::R {
// TODO: Put this in the specification
// (This work is tracked by spec issue 100.)
// constraint:
// type-specifier constraint-operator type-specifier
let left =
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true);
let op = self.parse_constraint_operator();
let right =
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true);
S!(make_where_constraint, self, left, op, right)
}
fn parse_where_constraint_list_item(&mut self) -> Option<S::R> {
match self.peek_token_kind() {
TokenKind::Semicolon | TokenKind::LeftBrace => None,
_ => {
let where_constraint = self.parse_where_constraint();
let comma = self.optional_token(TokenKind::Comma);
let result = S!(make_list_item, self, where_constraint, comma);
Some(result)
}
}
}
fn parse_where_clause(&mut self) -> S::R {
// TODO: Add this to the specification
// (This work is tracked by spec issue 100.)
// where-clause:
// where constraint-list
//
// constraint-list:
// constraint
// constraint-list , constraint
let keyword = self.assert_token(TokenKind::Where);
let constraints =
self.parse_list_until_none(|x: &mut Self| x.parse_where_constraint_list_item());
S!(make_where_clause, self, keyword, constraints)
}
fn parse_where_clause_opt(&mut self) -> S::R {
if self.peek_token_kind() != TokenKind::Where {
S!(make_missing, self, self.pos())
} else {
self.parse_where_clause()
}
}
fn parse_function_declaration_header(&mut self, modifiers: S::R, is_methodish: bool) -> S::R {
// SPEC
// function-definition-header:
// attribute-specification-opt async-opt function name /
// generic-type-parameter-list-opt ( parameter-list-opt ) : /
// return-type where-clause-opt
// TODO: The spec does not specify "where" clauses. Add them.
// (This work is tracked by spec issue 100.)
//
// In strict mode, we require a type specifier. This error is not caught
// at parse time but rather by a later pass.
let function_token = self.require_function();
let label = self.parse_function_label_opt(is_methodish);
let generic_type_parameter_list = self.with_type_parser(|p: &mut TypeParser<'a, S>| {
p.parse_generic_type_parameter_list_opt()
});
let (left_paren_token, parameter_list, right_paren_token) = self.parse_parameter_list_opt();
let contexts = self.with_type_parser(|p: &mut TypeParser<'a, S>| p.parse_contexts());
let (colon_token, return_type) = self.parse_return_type_hint_opt();
let where_clause = self.parse_where_clause_opt();
S!(
make_function_declaration_header,
self,
modifiers,
function_token,
label,
generic_type_parameter_list,
left_paren_token,
parameter_list,
right_paren_token,
contexts,
colon_token,
return_type,
where_clause,
)
}
// A function label is either a function name or a __construct label.
fn parse_function_label_opt(&mut self, is_methodish: bool) -> S::R {
let report_error = |x: &mut Self, token: Token<S>| {
x.with_error(Errors::error1044);
let token = S!(make_token, x, token);
S!(make_error, x, token)
};
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Name | TokenKind::Construct => {
let token = self.next_token();
S!(make_token, self, token)
}
TokenKind::LeftParen => {
// It turns out, it was just a verbose lambda; YOLO PHP
S!(make_missing, self, self.pos())
}
TokenKind::Isset | TokenKind::Unset | TokenKind::Empty => {
// We need to parse those as names as they are defined in hhi
let token = self.next_token_as_name();
S!(make_token, self, token)
}
_ => {
let token = if is_methodish {
self.next_token_as_name()
} else {
self.next_token_non_reserved_as_name()
};
if token.kind() == TokenKind::Name {
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Eat the offending token.
report_error(self, token)
}
}
}
}
fn parse_old_attribute(&mut self) -> S::R {
self.with_expression_parser(|p: &mut ExpressionParser<'a, S>| p.parse_constructor_call())
}
pub fn parse_attribute_specification_opt(&mut self) -> S::R {
match self.peek_token_kind() {
TokenKind::At if self.env.allow_new_attribute_syntax => {
self.parse_new_attribute_specification_opt()
}
TokenKind::LessThanLessThan => self.parse_old_attribute_specification_opt(),
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_new_attribute_specification_opt(&mut self) -> S::R {
let attributes = self.parse_list_while(
|p: &mut Self| p.parse_new_attribute(),
|p: &Self| p.peek_token_kind() == TokenKind::At,
);
S!(make_attribute_specification, self, attributes)
}
fn parse_new_attribute(&mut self) -> S::R {
let at = self.assert_token(TokenKind::At);
let token = self.peek_token();
let constructor_call = match token.kind() {
TokenKind::Name => self.with_expression_parser(|p: &mut ExpressionParser<'a, S>| {
p.parse_constructor_call()
}),
_ => {
self.with_error(Errors::expected_user_attribute);
S!(make_missing, self, self.pos())
}
};
S!(make_attribute, self, at, constructor_call)
}
// Parses modifiers and passes them into the parse methods for the
// respective class body element.
fn parse_methodish_or_property_or_const_or_type_const(&mut self) -> S::R {
let mut parser1 = self.clone();
let modifiers = parser1.parse_modifiers();
let kind0 = parser1.peek_token_kind_with_lookahead(0);
let kind1 = parser1.peek_token_kind_with_lookahead(1);
let kind2 = parser1.peek_token_kind_with_lookahead(2);
match (kind0, kind1, kind2) {
(TokenKind::Const, TokenKind::Type, TokenKind::Semicolon) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_const_declaration(modifiers, const_)
}
(TokenKind::Const, TokenKind::Type, _) if kind2 != TokenKind::Equal => {
let attributes = S!(make_missing, self, self.pos());
let modifiers = self.parse_modifiers();
let const_ = self.assert_token(TokenKind::Const);
self.parse_type_const_declaration(attributes, modifiers, const_)
}
(TokenKind::Const, TokenKind::Ctx, _) if kind2 != TokenKind::Equal => {
let modifiers = self.parse_modifiers();
let const_ = self.assert_token(TokenKind::Const);
self.parse_context_const_declaration(modifiers, const_)
}
(TokenKind::Const, _, _) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_const_declaration(modifiers, const_)
}
_ => {
let missing = S!(make_missing, self, self.pos());
self.parse_methodish_or_property(missing)
}
}
}
// SPEC
// method-declaration:
// attribute-spec-opt method-modifiers function-definition
// attribute-spec-opt method-modifiers function-definition-header ;
// method-modifiers:
// method-modifier
// method-modifiers method-modifier
// method-modifier:
// visibility-modifier (i.e. private, public, protected)
// static
// abstract
// final
fn parse_methodish(&mut self, attribute_spec: S::R, modifiers: S::R) -> S::R {
let header =
self.parse_function_declaration_header(modifiers, /* is_methodish:*/ true);
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::LeftBrace => {
let body = self.parse_compound_statement();
let missing = S!(make_missing, self, self.pos());
S!(
make_methodish_declaration,
self,
attribute_spec,
header,
body,
missing
)
}
TokenKind::Semicolon => {
let pos = self.pos();
let token = self.next_token();
let missing = S!(make_missing, self, pos);
let semicolon = S!(make_token, self, token);
S!(
make_methodish_declaration,
self,
attribute_spec,
header,
missing,
semicolon
)
}
TokenKind::Equal => {
let equal = self.assert_token(TokenKind::Equal);
let qualifier = self.parse_qualified_name_type();
let cc_token = self.require_coloncolon();
let name = self.require_token_one_of(
&[TokenKind::Name, TokenKind::Construct],
Errors::error1004,
);
let name = S!(
make_scope_resolution_expression,
self,
qualifier,
cc_token,
name
);
let semi = self.require_semicolon();
S!(
make_methodish_trait_resolution,
self,
attribute_spec,
header,
equal,
name,
semi
)
}
_ => {
// ERROR RECOVERY: We expected either a block or a semicolon; we got
// neither. Use the offending token as the body of the method.
// TODO: Is this the right error recovery?
let pos = self.pos();
let token = self.next_token();
self.with_error(Errors::error1041);
let token = S!(make_token, self, token);
let error = S!(make_error, self, token);
let missing = S!(make_missing, self, pos);
S!(
make_methodish_declaration,
self,
attribute_spec,
header,
error,
missing
)
}
}
}
fn parse_modifiers(&mut self) -> S::R {
let mut items = vec![];
loop {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Abstract
| TokenKind::Static
| TokenKind::Public
| TokenKind::Protected
| TokenKind::Private
| TokenKind::Async
| TokenKind::Final => {
let token = self.next_token();
let item = S!(make_token, self, token);
items.push(item)
}
_ => break,
}
}
S!(make_list, self, items, self.pos())
}
fn parse_enum_or_classish_or_function_declaration(&mut self) -> S::R {
// An enum, type alias, function, interface, trait or class may all
// begin with an attribute.
let attribute_specification = match self.peek_token_kind() {
TokenKind::At | TokenKind::LessThanLessThan => self.parse_attribute_specification_opt(),
_ => S!(make_missing, self, self.pos()),
};
match self.peek_token_kind() {
TokenKind::Enum => self.parse_enum_or_enum_class_declaration(attribute_specification),
TokenKind::Type | TokenKind::Newtype => {
self.parse_alias_declaration(attribute_specification)
}
TokenKind::Async | TokenKind::Function => {
if attribute_specification.is_missing() {
// if attribute section is missing - it might be either
// function declaration or expression statement containing
// anonymous function - use statement parser to determine in which case
// we are currently in
self.with_statement_parser(|p: &mut StatementParser<'a, S>| {
p.parse_possible_php_function(/* toplevel=*/ true)
})
} else {
self.parse_function_declaration(attribute_specification)
}
}
TokenKind::Abstract
| TokenKind::Final
| TokenKind::Interface
| TokenKind::Trait
| TokenKind::XHP
| TokenKind::Class => self.parse_classish_declaration(attribute_specification),
_ => {
// ERROR RECOVERY: we encountered an unexpected token, raise an error and continue
// TODO: This is wrong; we have lost the attribute specification
// from the tree.
let token = self.next_token();
self.with_error(Errors::error1057(self.token_text(&token)));
let token = S!(make_token, self, token);
S!(make_error, self, token)
}
}
}
fn parse_classish_element(&mut self) -> S::R {
// We need to identify an element of a class, trait, etc. Possibilities
// are:
//
// // constant-declaration:
// const T $x = v ;
// abstract const T $x ;
// public const T $x = v ; // PHP7 only
//
// // type-constant-declaration
// const type T = X;
// abstract const type T;
//
// // property-declaration:
// public/private/protected/static T $x;
// TODO: We may wish to parse "T $x" and give an error indicating
// TODO: that we were expecting either const or public.
// Note that a visibility modifier is required; static is optional;
// any order is allowed.
//
// // method-declaration
// <<attr>> public/private/protected/abstract/final/static async function
// Note that a modifier is required, the attr and async are optional.
// TODO: Hack requires a visibility modifier, unless "static" is supplied,
// TODO: in which case the method is considered to be public. Is this
// TODO: desired? Resolve this disagreement with the spec.
//
// // constructor-declaration
// <<attr>> public/private/protected/abstract/final function __construct
// Note that we allow static constructors in this parser; we produce an
// error in the post-parse error detection pass.
//
// // trait clauses
// require extends qualified-name
// require implements qualified-name
//
// // XHP class attribute declaration
// attribute ... ;
//
// // XHP category declaration
// category ... ;
//
// // XHP children declaration
// children ... ;
match self.peek_token_kind() {
TokenKind::Children => self.parse_xhp_children_declaration(),
TokenKind::Category => self.parse_xhp_category_declaration(),
TokenKind::Use => self.parse_trait_use(),
TokenKind::Const
| TokenKind::Abstract
| TokenKind::Public
| TokenKind::Protected
| TokenKind::Private
| TokenKind::Static => self.parse_methodish_or_property_or_const_or_type_const(),
TokenKind::Async | TokenKind::Final | TokenKind::LessThanLessThan => {
// Parse methods, constructors, properties, type constants,
let attr = self.parse_attribute_specification_opt();
self.parse_methodish_or_property_or_type_constant(attr)
}
TokenKind::At if self.env.allow_new_attribute_syntax => {
let attr = self.parse_attribute_specification_opt();
self.parse_methodish_or_property_or_type_constant(attr)
}
TokenKind::Require => {
// We give an error if these are found where they should not be,
// in a later pass.
self.parse_require_clause()
}
TokenKind::Attribute => self.parse_xhp_class_attribute_declaration(),
TokenKind::Function => {
// ERROR RECOVERY
// Hack requires that a function inside a class be marked
// with a visibility modifier, but PHP does not have this requirement.
// We accept the lack of a modifier here, and produce an error in
// a later pass.
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
self.parse_methodish(missing1, missing2)
}
kind if self.expects(kind) => S!(make_missing, self, self.pos()),
_ => {
// If this is a property declaration which is missing its visibility
// modifier, accept it here, but emit an error in a later pass.
let mut parser1 = self.clone();
let missing1 = S!(make_missing, parser1, self.pos());
let missing2 = S!(make_missing, parser1, self.pos());
let property = parser1.parse_property_declaration(missing1, missing2);
if self.errors.len() == parser1.errors.len() {
self.continue_from(parser1);
property
} else {
// TODO ERROR RECOVERY could be improved here.
let token = self.fetch_token();
self.with_error(Errors::error1033);
S!(make_error, self, token)
// Parser does not detect the error where non-static instance variables
// or methods are within abstract final classes in its first pass, but
// instead detects it in its second pass.
}
}
}
}
fn parse_type_constraint_opt(&mut self) -> S::R {
self.with_type_parser(|p: &mut TypeParser<'a, S>| p.parse_type_constraint_opt())
}
fn parse_alias_declaration(&mut self, attr: S::R) -> S::R {
// SPEC
// alias-declaration:
// attribute-spec-opt type name
// generic-type-parameter-list-opt = type-specifier ;
// attribute-spec-opt newtype name
// generic-type-parameter-list-opt type-constraint-opt
// = type-specifier ;
let token = self.fetch_token();
// Not `require_name` but `require_name_allow_non_reserved`, because the parser
// must allow keywords in the place of identifiers; at least to parse .hhi
// files.
let name = self.require_name_allow_non_reserved();
let generic = self.with_type_parser(|p: &mut TypeParser<'a, S>| {
p.parse_generic_type_parameter_list_opt()
});
let constr = self.parse_type_constraint_opt();
let equal = self.require_equal();
let ty = self.parse_type_specifier(false /* allow_var */, true /* allow_attr */);
let semi = self.require_semicolon();
S!(
make_alias_declaration,
self,
attr,
token,
name,
generic,
constr,
equal,
ty,
semi
)
}
fn parse_enumerator(&mut self) -> S::R {
// SPEC
// enumerator:
// enumerator-constant = constant-expression ;
// enumerator-constant:
// name
//
// TODO: Add an error to a later pass that determines the value is
// a constant.
// TODO: We must allow TRUE to be a legal enum member name; here we allow
// any keyword. Consider making this more strict.
let name = self.require_name_allow_all_keywords();
let equal = self.require_equal();
let value = self.parse_expression();
let semicolon = self.require_semicolon();
S!(make_enumerator, self, name, equal, value, semicolon)
}
fn parse_enum_class_enumerator(&mut self) -> S::R {
// SPEC
// enum-class-enumerator:
// type-specifier name = expression ;
// Taken from parse_enumerator:
// TODO: We must allow TRUE to be a legal enum member name; here we allow
// any keyword. Consider making this more strict.
let ty = self.parse_type_specifier(/*allow_var*/ false, /*allow_attr*/ true);
let name = self.require_name_allow_all_keywords();
let equal_ = self.require_equal();
let initial_value = self.parse_expression();
let semicolon = self.require_semicolon();
S!(
make_enum_class_enumerator,
self,
ty,
name,
equal_,
initial_value,
semicolon
)
}
fn parse_inclusion_directive(&mut self) -> S::R {
// SPEC:
// inclusion-directive:
// require-multiple-directive
// require-once-directive
//
// require-multiple-directive:
// require include-filename ;
//
// include-filename:
// expression
//
// require-once-directive:
// require_once include-filename ;
//
// In non-strict mode we allow an inclusion directive (without semi) to be
// used as an expression. It is therefore easier to actually parse this as:
//
// inclusion-directive:
// inclusion-expression ;
//
// inclusion-expression:
// require include-filename
// require_once include-filename
let expr = self.parse_expression();
let semi = self.require_semicolon();
S!(make_inclusion_directive, self, expr, semi)
}
fn parse_declaration(&mut self) -> S::R {
self.expect_in_new_scope(ExpectedTokens::Classish);
let mut parser1 = self.clone();
let token = parser1.next_token();
let result = match token.kind() {
TokenKind::Include
| TokenKind::Include_once
| TokenKind::Require
| TokenKind::Require_once => self.parse_inclusion_directive(),
TokenKind::Type | TokenKind::Newtype
if {
let kind = parser1.peek_token_kind();
kind == TokenKind::Name || kind == TokenKind::Classname
} =>
{
let missing = S!(make_missing, self, self.pos());
self.parse_alias_declaration(missing)
}
TokenKind::Enum => {
let missing = S!(make_missing, self, self.pos());
self.parse_enum_or_enum_class_declaration(missing)
}
TokenKind::RecordDec => {
let missing = S!(make_missing, self, self.pos());
self.parse_record_declaration(missing)
}
// The keyword namespace before a name should be parsed as
// "the current namespace we are in", essentially a no op.
// example:
// namespace\f1(); should be parsed as a call to the function f1 in
// the current namespace.
TokenKind::Namespace if parser1.peek_token_kind() == TokenKind::Backslash => {
self.with_statement_parser(|p: &mut StatementParser<'a, S>| p.parse_statement())
}
TokenKind::Namespace => self.parse_namespace_declaration(),
TokenKind::Use => self.parse_namespace_use_declaration(),
TokenKind::Trait | TokenKind::Interface | TokenKind::Class => {
let missing = S!(make_missing, self, self.pos());
self.parse_classish_declaration(missing)
}
TokenKind::Abstract | TokenKind::Final | TokenKind::XHP => {
let missing = S!(make_missing, self, self.pos());
match parser1.peek_token_kind() {
TokenKind::RecordDec => self.parse_record_declaration(missing),
_ => self.parse_classish_declaration(missing),
}
}
TokenKind::Async | TokenKind::Function => {
self.with_statement_parser(|p: &mut StatementParser<'a, S>| {
p.parse_possible_php_function(true)
})
}
TokenKind::At if self.env.allow_new_attribute_syntax => {
self.parse_enum_or_classish_or_function_declaration()
}
TokenKind::LessThanLessThan => match parser1.peek_token_kind() {
TokenKind::File
if parser1.peek_token_kind_with_lookahead(1) == TokenKind::Colon =>
{
self.parse_file_attribute_specification_opt()
}
_ => self.parse_enum_or_classish_or_function_declaration(),
},
// TODO figure out what global const differs from class const
TokenKind::Const => {
let missing1 = S!(make_missing, parser1, self.pos());
self.continue_from(parser1);
let token = S!(make_token, self, token);
self.parse_const_declaration(missing1, token)
}
// TODO: What if it's not a legal statement? Do we still make progress here?
_ => self.with_statement_parser(|p: &mut StatementParser<'a, S>| p.parse_statement()),
};
self.pop_scope(ExpectedTokens::Classish);
result
}
pub fn parse_script(&mut self) -> S::R {
let header = self.parse_leading_markup_section();
let mut declarations = vec![];
if let Some(x) = header {
declarations.push(x)
};
loop {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::EndOfFile => {
let token = self.next_token();
let token = S!(make_token, self, token);
let end_of_file = S!(make_end_of_file, self, token);
declarations.push(end_of_file);
break;
}
_ => declarations.push(self.parse_declaration()),
}
}
let declarations = S!(make_list, self, declarations, self.pos());
let result = S!(make_script, self, declarations);
assert_eq!(self.peek_token_kind(), TokenKind::EndOfFile);
result
}
}
| 39.172319 | 111 | 0.561923 |
d7fee33ae1cd748ae5a5822e1b9c0390cf2cc8f1
| 6,814 |
// Wuille's secp
use coins_core::hashes::{Hash256Digest, MarkedDigestOutput};
use crate::{curve::model::*, Bip32Error};
pub(crate) type Error = secp256k1::Error;
#[cfg_attr(tarpaulin, skip)]
#[allow(clippy::all)]
lazy_static! {
static ref CONTEXT: secp256k1::Secp256k1<secp256k1::All> = secp256k1::Secp256k1::new();
pub static ref BACKEND: Secp256k1<'static> = Default::default();
}
/// A Secp256k1Backend struct using Sipa's C implementation of Secp256k1.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Secp256k1<'a>(&'a secp256k1::Secp256k1<secp256k1::All>);
impl Default for Secp256k1<'static> {
fn default() -> Self {
Secp256k1(&CONTEXT)
}
}
impl Secp256k1<'static> {
/// Init a backend, setting up any context necessary. This is implemented as a lazy_static
/// context initialized on the first call. As such, the first call to init will be expensive,
/// while successive calls will be cheap.
pub fn static_ref() -> &'static Self {
&BACKEND
}
}
impl<'a> Secp256k1<'a> {
/// Instantiate a backend from a context. Useful for managing your own backend lifespan
pub fn from_context(context: &'a secp256k1::Secp256k1<secp256k1::All>) -> Self {
Self(context)
}
}
/// A Private Key
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Privkey(secp256k1::SecretKey);
impl ScalarSerialize for Privkey {
fn privkey_array(&self) -> [u8; 32] {
let mut buf = [0u8; 32];
buf.copy_from_slice(&self.0[..32]);
buf
}
}
impl ScalarDeserialize for Privkey {
fn from_privkey_array(buf: [u8; 32]) -> Result<Self, Bip32Error> {
Ok(secp256k1::SecretKey::from_slice(&buf)?.into())
}
}
impl From<secp256k1::SecretKey> for Privkey {
fn from(k: secp256k1::SecretKey) -> Self {
Self(k)
}
}
/// A Public Key
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Pubkey(secp256k1::PublicKey);
impl From<secp256k1::PublicKey> for Pubkey {
fn from(k: secp256k1::PublicKey) -> Self {
Self(k)
}
}
impl PointSerialize for Pubkey {
fn pubkey_array(&self) -> [u8; 33] {
self.0.serialize()
}
fn pubkey_array_uncompressed(&self) -> [u8; 65] {
self.0.serialize_uncompressed()
}
}
impl PointDeserialize for Pubkey {
fn from_pubkey_array(buf: [u8; 33]) -> Result<Self, Bip32Error> {
Ok(secp256k1::PublicKey::from_slice(&buf)?.into())
}
fn from_pubkey_array_uncompressed(buf: [u8; 65]) -> Result<Self, Bip32Error> {
Ok(secp256k1::PublicKey::from_slice(&buf)?.into())
}
}
/// Type alias for underlyin signature type
pub type Signature = secp256k1::Signature;
impl SigSerialize for secp256k1::Signature {
fn to_der(&self) -> Vec<u8> {
secp256k1::Signature::serialize_der(self).to_vec()
}
fn try_from_der(der: &[u8]) -> Result<Self, Bip32Error> {
Ok(Self::from_der(der)?)
}
}
impl SigSerialize for secp256k1::recovery::RecoverableSignature {
fn to_der(&self) -> Vec<u8> {
self.without_recovery().to_der()
}
fn try_from_der(_der: &[u8]) -> Result<Self, Bip32Error> {
Err(Bip32Error::NoRecoveryID)
}
}
/// Type alias for underlyin RecoverableSigSerialize signature type
pub type RecoverableSignature = secp256k1::recovery::RecoverableSignature;
impl RecoverableSigSerialize for secp256k1::recovery::RecoverableSignature {
type Signature = secp256k1::Signature;
fn serialize_vrs(&self) -> (u8, [u8; 32], [u8; 32]) {
let (rec_id, sig) = self.serialize_compact();
let mut r = [0u8; 32];
let mut s = [0u8; 32];
r.copy_from_slice(&sig[..32]);
s.copy_from_slice(&sig[32..]);
(rec_id.to_i32() as u8, r, s)
}
fn deserialize_vrs(vrs: (u8, [u8; 32], [u8; 32])) -> Result<Self, Bip32Error> {
let mut data = [0u8; 64];
data[..32].copy_from_slice(&vrs.1);
data[32..].copy_from_slice(&vrs.2);
let rec_id = secp256k1::recovery::RecoveryId::from_i32(vrs.0 as i32)?;
Ok(Self::from_compact(&data, rec_id)?)
}
fn without_recovery(&self) -> Self::Signature {
// full disambiguation
secp256k1::recovery::RecoverableSignature::to_standard(self)
}
}
impl<'a> Secp256k1Backend for Secp256k1<'a> {
type Error = Bip32Error;
type Context = secp256k1::Secp256k1<secp256k1::All>;
type Privkey = Privkey;
type Pubkey = Pubkey;
type Signature = secp256k1::Signature;
type RecoverableSignature = secp256k1::recovery::RecoverableSignature;
fn derive_pubkey(&self, k: &Self::Privkey) -> Self::Pubkey {
secp256k1::PublicKey::from_secret_key(&self.0, &k.0).into()
}
fn tweak_pubkey(&self, k: &Self::Pubkey, tweak: [u8; 32]) -> Result<Self::Pubkey, Bip32Error> {
let mut key = k.0;
key.add_exp_assign(&self.0, &tweak)?;
Ok(key.into())
}
fn tweak_privkey(
&self,
k: &Self::Privkey,
tweak: [u8; 32],
) -> Result<Self::Privkey, Bip32Error> {
let mut key = k.0;
key.add_assign(&tweak)?;
Ok(key.into())
}
fn sign_digest(&self, k: &Self::Privkey, digest: Hash256Digest) -> Self::Signature {
let m = secp256k1::Message::from_slice(digest.as_slice()).expect("digest is 32 bytes");
self.0.sign(&m, &k.0)
}
fn sign_digest_recoverable(
&self,
k: &Self::Privkey,
digest: Hash256Digest,
) -> Self::RecoverableSignature {
let m = secp256k1::Message::from_slice(digest.as_slice()).expect("digest is 32 bytes");
self.0.sign_recoverable(&m, &k.0)
}
fn verify_digest(
&self,
k: &Self::Pubkey,
digest: Hash256Digest,
sig: &Self::Signature,
) -> Result<(), Bip32Error> {
let m = secp256k1::Message::from_slice(digest.as_slice()).expect("digest is 32 bytes");
Ok(self.0.verify(&m, sig, &k.0)?)
}
fn verify_digest_recoverable(
&self,
k: &Self::Pubkey,
digest: Hash256Digest,
sig: &Self::RecoverableSignature,
) -> Result<(), Bip32Error> {
let m = secp256k1::Message::from_slice(digest.as_slice()).expect("digest is 32 bytes");
Ok(self.0.verify(&m, &sig.to_standard(), &k.0)?)
}
fn recover_pubkey(
&self,
digest: Hash256Digest,
sig: &Self::RecoverableSignature,
) -> Result<Self::Pubkey, Bip32Error> {
let m = secp256k1::Message::from_slice(digest.as_slice()).expect("digest is 32 bytes");
Ok(self.0.recover(&m, sig)?.into())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn it_instantiates_a_backend_from_a_context() {
let context: secp256k1::Secp256k1<secp256k1::All> = { secp256k1::Secp256k1::new() };
Secp256k1::from_context(&context);
}
}
| 29.885965 | 99 | 0.626064 |
29fd0d05e98b41745dd1de0072e76a6065b668cb
| 15,559 |
//! Request extractors
use std::{
convert::Infallible,
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_http::http::{Method, Uri};
use actix_utils::future::{ok, Ready};
use futures_core::ready;
use crate::{dev::Payload, Error, HttpRequest};
/// A type that implements [`FromRequest`] is called an **extractor** and can extract data from
/// the request. Some types that implement this trait are: [`Json`], [`Header`], and [`Path`].
///
/// # Configuration
/// An extractor can be customized by injecting the corresponding configuration with one of:
///
/// - [`App::app_data()`][crate::App::app_data]
/// - [`Scope::app_data()`][crate::Scope::app_data]
/// - [`Resource::app_data()`][crate::Resource::app_data]
///
/// Here are some built-in extractors and their corresponding configuration.
/// Please refer to the respective documentation for details.
///
/// | Extractor | Configuration |
/// |-------------|-------------------|
/// | [`Header`] | _None_ |
/// | [`Path`] | [`PathConfig`] |
/// | [`Json`] | [`JsonConfig`] |
/// | [`Form`] | [`FormConfig`] |
/// | [`Query`] | [`QueryConfig`] |
/// | [`Bytes`] | [`PayloadConfig`] |
/// | [`String`] | [`PayloadConfig`] |
/// | [`Payload`] | [`PayloadConfig`] |
///
/// # Implementing An Extractor
/// To reduce duplicate code in handlers where extracting certain parts of a request has a common
/// structure, you can implement `FromRequest` for your own types.
///
/// Note that the request payload can only be consumed by one extractor.
///
/// [`Header`]: crate::web::Header
/// [`Json`]: crate::web::Json
/// [`JsonConfig`]: crate::web::JsonConfig
/// [`Form`]: crate::web::Form
/// [`FormConfig`]: crate::web::FormConfig
/// [`Path`]: crate::web::Path
/// [`PathConfig`]: crate::web::PathConfig
/// [`Query`]: crate::web::Query
/// [`QueryConfig`]: crate::web::QueryConfig
/// [`Payload`]: crate::web::Payload
/// [`PayloadConfig`]: crate::web::PayloadConfig
/// [`String`]: FromRequest#impl-FromRequest-for-String
/// [`Bytes`]: crate::web::Bytes#impl-FromRequest
/// [`Either`]: crate::web::Either
#[doc(alias = "extract", alias = "extractor")]
pub trait FromRequest: Sized {
/// The associated error which can be returned.
type Error: Into<Error>;
/// Future that resolves to a Self.
type Future: Future<Output = Result<Self, Self::Error>>;
/// Create a Self from request parts asynchronously.
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future;
/// Create a Self from request head asynchronously.
///
/// This method is short for `T::from_request(req, &mut Payload::None)`.
fn extract(req: &HttpRequest) -> Self::Future {
Self::from_request(req, &mut Payload::None)
}
}
/// Optionally extract a field from the request
///
/// If the FromRequest for T fails, return None rather than returning an error response
///
/// # Examples
/// ```
/// use actix_web::{web, dev, App, Error, HttpRequest, FromRequest};
/// use actix_web::error::ErrorBadRequest;
/// use futures_util::future::{ok, err, Ready};
/// use serde::Deserialize;
/// use rand;
///
/// #[derive(Debug, Deserialize)]
/// struct Thing {
/// name: String
/// }
///
/// impl FromRequest for Thing {
/// type Error = Error;
/// type Future = Ready<Result<Self, Self::Error>>;
///
/// fn from_request(req: &HttpRequest, payload: &mut dev::Payload) -> Self::Future {
/// if rand::random() {
/// ok(Thing { name: "thingy".into() })
/// } else {
/// err(ErrorBadRequest("no luck"))
/// }
///
/// }
/// }
///
/// /// extract `Thing` from request
/// async fn index(supplied_thing: Option<Thing>) -> String {
/// match supplied_thing {
/// // Puns not intended
/// Some(thing) => format!("Got something: {:?}", thing),
/// None => format!("No thing!")
/// }
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/users/:first").route(
/// web::post().to(index))
/// );
/// }
/// ```
impl<T: 'static> FromRequest for Option<T>
where
T: FromRequest,
T::Future: 'static,
{
type Error = Error;
type Future = FromRequestOptFuture<T::Future>;
#[inline]
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
FromRequestOptFuture {
fut: T::from_request(req, payload),
}
}
}
#[pin_project::pin_project]
pub struct FromRequestOptFuture<Fut> {
#[pin]
fut: Fut,
}
impl<Fut, T, E> Future for FromRequestOptFuture<Fut>
where
Fut: Future<Output = Result<T, E>>,
E: Into<Error>,
{
type Output = Result<Option<T>, Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let res = ready!(this.fut.poll(cx));
match res {
Ok(t) => Poll::Ready(Ok(Some(t))),
Err(e) => {
log::debug!("Error for Option<T> extractor: {}", e.into());
Poll::Ready(Ok(None))
}
}
}
}
/// Optionally extract a field from the request or extract the Error if unsuccessful
///
/// If the `FromRequest` for T fails, inject Err into handler rather than returning an error response
///
/// # Examples
/// ```
/// use actix_web::{web, dev, App, Result, Error, HttpRequest, FromRequest};
/// use actix_web::error::ErrorBadRequest;
/// use futures_util::future::{ok, err, Ready};
/// use serde::Deserialize;
/// use rand;
///
/// #[derive(Debug, Deserialize)]
/// struct Thing {
/// name: String
/// }
///
/// impl FromRequest for Thing {
/// type Error = Error;
/// type Future = Ready<Result<Thing, Error>>;
///
/// fn from_request(req: &HttpRequest, payload: &mut dev::Payload) -> Self::Future {
/// if rand::random() {
/// ok(Thing { name: "thingy".into() })
/// } else {
/// err(ErrorBadRequest("no luck"))
/// }
/// }
/// }
///
/// /// extract `Thing` from request
/// async fn index(supplied_thing: Result<Thing>) -> String {
/// match supplied_thing {
/// Ok(thing) => format!("Got thing: {:?}", thing),
/// Err(e) => format!("Error extracting thing: {}", e)
/// }
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/users/:first").route(web::post().to(index))
/// );
/// }
/// ```
impl<T> FromRequest for Result<T, T::Error>
where
T: FromRequest + 'static,
T::Error: 'static,
T::Future: 'static,
{
type Error = Error;
type Future = FromRequestResFuture<T::Future>;
#[inline]
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
FromRequestResFuture {
fut: T::from_request(req, payload),
}
}
}
#[pin_project::pin_project]
pub struct FromRequestResFuture<Fut> {
#[pin]
fut: Fut,
}
impl<Fut, T, E> Future for FromRequestResFuture<Fut>
where
Fut: Future<Output = Result<T, E>>,
{
type Output = Result<Result<T, E>, Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let res = ready!(this.fut.poll(cx));
Poll::Ready(Ok(res))
}
}
/// Extract the request's URI.
///
/// # Examples
/// ```
/// use actix_web::{http::Uri, web, App, Responder};
///
/// async fn handler(uri: Uri) -> impl Responder {
/// format!("Requested path: {}", uri.path())
/// }
///
/// let app = App::new().default_service(web::to(handler));
/// ```
impl FromRequest for Uri {
type Error = Infallible;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ok(req.uri().clone())
}
}
/// Extract the request's method.
///
/// # Examples
/// ```
/// use actix_web::{http::Method, web, App, Responder};
///
/// async fn handler(method: Method) -> impl Responder {
/// format!("Request method: {}", method)
/// }
///
/// let app = App::new().default_service(web::to(handler));
/// ```
impl FromRequest for Method {
type Error = Infallible;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ok(req.method().clone())
}
}
#[doc(hidden)]
impl FromRequest for () {
type Error = Infallible;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(_: &HttpRequest, _: &mut Payload) -> Self::Future {
ok(())
}
}
macro_rules! tuple_from_req ({$fut_type:ident, $(($n:tt, $T:ident)),+} => {
// This module is a trick to get around the inability of
// `macro_rules!` macros to make new idents. We want to make
// a new `FutWrapper` struct for each distinct invocation of
// this macro. Ideally, we would name it something like
// `FutWrapper_$fut_type`, but this can't be done in a macro_rules
// macro.
//
// Instead, we put everything in a module named `$fut_type`, thus allowing
// us to use the name `FutWrapper` without worrying about conflicts.
// This macro only exists to generate trait impls for tuples - these
// are inherently global, so users don't have to care about this
// weird trick.
#[allow(non_snake_case)]
mod $fut_type {
// Bring everything into scope, so we don't need
// redundant imports
use super::*;
/// A helper struct to allow us to pin-project through
/// to individual fields
#[pin_project::pin_project]
struct FutWrapper<$($T: FromRequest),+>($(#[pin] $T::Future),+);
/// FromRequest implementation for tuple
#[doc(hidden)]
#[allow(unused_parens)]
impl<$($T: FromRequest + 'static),+> FromRequest for ($($T,)+)
{
type Error = Error;
type Future = $fut_type<$($T),+>;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
$fut_type {
items: <($(Option<$T>,)+)>::default(),
futs: FutWrapper($($T::from_request(req, payload),)+),
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct $fut_type<$($T: FromRequest),+> {
items: ($(Option<$T>,)+),
#[pin]
futs: FutWrapper<$($T,)+>,
}
impl<$($T: FromRequest),+> Future for $fut_type<$($T),+>
{
type Output = Result<($($T,)+), Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
let mut ready = true;
$(
if this.items.$n.is_none() {
match this.futs.as_mut().project().$n.poll(cx) {
Poll::Ready(Ok(item)) => {
this.items.$n = Some(item);
}
Poll::Pending => ready = false,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
}
}
)+
if ready {
Poll::Ready(Ok(
($(this.items.$n.take().unwrap(),)+)
))
} else {
Poll::Pending
}
}
}
}
});
#[rustfmt::skip]
mod m {
use super::*;
tuple_from_req!(TupleFromRequest1, (0, A));
tuple_from_req!(TupleFromRequest2, (0, A), (1, B));
tuple_from_req!(TupleFromRequest3, (0, A), (1, B), (2, C));
tuple_from_req!(TupleFromRequest4, (0, A), (1, B), (2, C), (3, D));
tuple_from_req!(TupleFromRequest5, (0, A), (1, B), (2, C), (3, D), (4, E));
tuple_from_req!(TupleFromRequest6, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F));
tuple_from_req!(TupleFromRequest7, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G));
tuple_from_req!(TupleFromRequest8, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G), (7, H));
tuple_from_req!(TupleFromRequest9, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G), (7, H), (8, I));
tuple_from_req!(TupleFromRequest10, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G), (7, H), (8, I), (9, J));
}
#[cfg(test)]
mod tests {
use actix_http::http::header;
use bytes::Bytes;
use serde::Deserialize;
use super::*;
use crate::test::TestRequest;
use crate::types::{Form, FormConfig};
#[derive(Deserialize, Debug, PartialEq)]
struct Info {
hello: String,
}
#[actix_rt::test]
async fn test_option() {
let (req, mut pl) = TestRequest::default()
.insert_header((header::CONTENT_TYPE, "application/x-www-form-urlencoded"))
.data(FormConfig::default().limit(4096))
.to_http_parts();
let r = Option::<Form<Info>>::from_request(&req, &mut pl)
.await
.unwrap();
assert_eq!(r, None);
let (req, mut pl) = TestRequest::default()
.insert_header((header::CONTENT_TYPE, "application/x-www-form-urlencoded"))
.insert_header((header::CONTENT_LENGTH, "9"))
.set_payload(Bytes::from_static(b"hello=world"))
.to_http_parts();
let r = Option::<Form<Info>>::from_request(&req, &mut pl)
.await
.unwrap();
assert_eq!(
r,
Some(Form(Info {
hello: "world".into()
}))
);
let (req, mut pl) = TestRequest::default()
.insert_header((header::CONTENT_TYPE, "application/x-www-form-urlencoded"))
.insert_header((header::CONTENT_LENGTH, "9"))
.set_payload(Bytes::from_static(b"bye=world"))
.to_http_parts();
let r = Option::<Form<Info>>::from_request(&req, &mut pl)
.await
.unwrap();
assert_eq!(r, None);
}
#[actix_rt::test]
async fn test_result() {
let (req, mut pl) = TestRequest::default()
.insert_header((header::CONTENT_TYPE, "application/x-www-form-urlencoded"))
.insert_header((header::CONTENT_LENGTH, "11"))
.set_payload(Bytes::from_static(b"hello=world"))
.to_http_parts();
let r = Result::<Form<Info>, Error>::from_request(&req, &mut pl)
.await
.unwrap()
.unwrap();
assert_eq!(
r,
Form(Info {
hello: "world".into()
})
);
let (req, mut pl) = TestRequest::default()
.insert_header((header::CONTENT_TYPE, "application/x-www-form-urlencoded"))
.insert_header((header::CONTENT_LENGTH, 9))
.set_payload(Bytes::from_static(b"bye=world"))
.to_http_parts();
let r = Result::<Form<Info>, Error>::from_request(&req, &mut pl)
.await
.unwrap();
assert!(r.is_err());
}
#[actix_rt::test]
async fn test_uri() {
let req = TestRequest::default().uri("/foo/bar").to_http_request();
let uri = Uri::extract(&req).await.unwrap();
assert_eq!(uri.path(), "/foo/bar");
}
#[actix_rt::test]
async fn test_method() {
let req = TestRequest::default().method(Method::GET).to_http_request();
let method = Method::extract(&req).await.unwrap();
assert_eq!(method, Method::GET);
}
}
| 31.242972 | 120 | 0.546565 |
fcb598eac1eb4f7990358938dadb0fcd6da06253
| 4,173 |
use byteorder::Error as ByteOrderError;
use image::ImageError;
use name::WadName;
use std::error::Error as StdError;
use std::fmt::Result as FmtResult;
use std::fmt::{Display, Formatter};
use std::io::Error as IoError;
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use toml::DecodeError as TomlDecodeError;
use toml::ParserError as TomlParserError;
pub type Result<T> = StdResult<T, Error>;
#[derive(Debug)]
pub struct Error {
file: Option<PathBuf>,
kind: ErrorKind,
}
impl StdError for Error {
fn description(&self) -> &str {
self.kind.description()
}
}
impl Display for Error {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
match self.file {
Some(ref path) => write!(fmt, "in '{}': {}", path.to_string_lossy(), self.kind),
None => write!(fmt, "{}", self.kind),
}
}
}
#[derive(Debug)]
pub enum ErrorKind {
Io(IoError),
ByteOrder(ByteOrderError),
BadWadHeader,
BadWadName(Vec<u8>),
MissingRequiredLump(String),
// MissingRequiredPatch(WadName, WadName),
BadMetadataSchema(TomlDecodeError),
BadMetadataSyntax(Vec<TomlParserError>),
BadImage(WadName, ImageError),
}
impl ErrorKind {
fn description(&self) -> &str {
match *self {
ErrorKind::Io(ref inner) => inner.description(),
ErrorKind::ByteOrder(ref inner) => inner.description(),
ErrorKind::BadWadHeader => "invalid header",
ErrorKind::BadWadName(..) => "invalid wad name",
ErrorKind::MissingRequiredLump(..) => "missing required lump",
// ErrorKind::MissingRequiredPatch(..) => "missing required patch",
ErrorKind::BadMetadataSchema(..) => "invalid data in metadata",
ErrorKind::BadMetadataSyntax(..) => "TOML syntax error in metadata",
ErrorKind::BadImage(..) => "Bad image",
}
}
}
impl Display for ErrorKind {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
let desc = self.description();
match *self {
ErrorKind::Io(ref inner) => write!(fmt, "{}", inner),
ErrorKind::ByteOrder(ref inner) => write!(fmt, "{}", inner),
ErrorKind::BadWadHeader => write!(fmt, "{}", desc),
ErrorKind::BadWadName(ref name) => write!(fmt, "{} ({:?})", desc, name),
ErrorKind::MissingRequiredLump(ref name) => write!(fmt, "{} ({})", desc, name),
// ErrorKind::MissingRequiredPatch(ref patch, ref texture) => {
// write!(fmt, "{} ({}, required by {})", desc, patch, texture)
// },
ErrorKind::BadMetadataSchema(ref err) => write!(fmt, "{}: {}", desc, err),
ErrorKind::BadMetadataSyntax(ref errors) => write!(fmt, "{}: {:?}", desc, errors),
ErrorKind::BadImage(ref name, ref inner) => {
write!(fmt, "{}: in {}: {}", desc, name, inner)
}
}
}
}
impl From<IoError> for Error {
fn from(cause: IoError) -> Error {
ErrorKind::Io(cause).into()
}
}
impl From<ByteOrderError> for Error {
fn from(cause: ByteOrderError) -> Error {
ErrorKind::ByteOrder(cause).into()
}
}
impl From<TomlDecodeError> for Error {
fn from(cause: TomlDecodeError) -> Error {
ErrorKind::BadMetadataSchema(cause).into()
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error {
kind: kind,
file: None,
}
}
}
pub trait InFile {
type Output;
fn in_file(self, file: &Path) -> Self::Output;
}
impl InFile for Error {
type Output = Error;
fn in_file(self, file: &Path) -> Error {
Error {
file: Some(file.to_owned()),
kind: self.kind,
}
}
}
impl InFile for ErrorKind {
type Output = Error;
fn in_file(self, file: &Path) -> Error {
Error {
file: Some(file.to_owned()),
kind: self,
}
}
}
impl<S, E: Into<Error>> InFile for StdResult<S, E> {
type Output = Result<S>;
fn in_file(self, file: &Path) -> Result<S> {
self.map_err(|e| e.into().in_file(file))
}
}
| 28.979167 | 94 | 0.574407 |
6a1a7e6ca0223680de0ce01b5d381eb3b66637a6
| 16,960 |
//! Transaction types
use crate::{
types::{Address, Bloom, Bytes, Log, NameOrAddress, Signature, H256, U256, U64},
utils::keccak256,
};
use rlp::RlpStream;
use serde::{Deserialize, Serialize};
// Number of tx fields before signing
#[cfg(not(feature = "celo"))]
const UNSIGNED_TX_FIELDS: usize = 6;
// Celo has 3 additional fields
#[cfg(feature = "celo")]
const UNSIGNED_TX_FIELDS: usize = 9;
// Unsigned fields + signature [r s v]
const SIGNED_TX_FIELDS: usize = UNSIGNED_TX_FIELDS + 3;
/// Parameters for sending a transaction
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Debug)]
pub struct TransactionRequest {
/// Sender address or ENS name
#[serde(skip_serializing_if = "Option::is_none")]
pub from: Option<Address>,
/// Recipient address (None for contract creation)
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<NameOrAddress>,
/// Supplied gas (None for sensible default)
#[serde(skip_serializing_if = "Option::is_none")]
pub gas: Option<U256>,
/// Gas price (None for sensible default)
#[serde(rename = "gasPrice")]
#[serde(skip_serializing_if = "Option::is_none")]
pub gas_price: Option<U256>,
/// Transfered value (None for no transfer)
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<U256>,
/// The compiled code of a contract OR the first 4 bytes of the hash of the
/// invoked method signature and encoded parameters. For details see Ethereum Contract ABI
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Bytes>,
/// Transaction nonce (None for next available nonce)
#[serde(skip_serializing_if = "Option::is_none")]
pub nonce: Option<U256>,
///////////////// Celo-specific transaction fields /////////////////
/// The currency fees are paid in (None for native currency)
#[cfg(feature = "celo")]
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
#[serde(skip_serializing_if = "Option::is_none")]
pub fee_currency: Option<Address>,
/// Gateway fee recipient (None for no gateway fee paid)
#[cfg(feature = "celo")]
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
#[serde(skip_serializing_if = "Option::is_none")]
pub gateway_fee_recipient: Option<Address>,
/// Gateway fee amount (None for no gateway fee paid)
#[cfg(feature = "celo")]
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
#[serde(skip_serializing_if = "Option::is_none")]
pub gateway_fee: Option<U256>,
}
impl TransactionRequest {
/// Creates an empty transaction request with all fields left empty
pub fn new() -> Self {
Self::default()
}
/// Convenience function for sending a new payment transaction to the receiver.
pub fn pay<T: Into<NameOrAddress>, V: Into<U256>>(to: T, value: V) -> Self {
TransactionRequest {
to: Some(to.into()),
value: Some(value.into()),
..Default::default()
}
}
// Builder pattern helpers
/// Sets the `from` field in the transaction to the provided value
pub fn from<T: Into<Address>>(mut self, from: T) -> Self {
self.from = Some(from.into());
self
}
/// Sets the `to` field in the transaction to the provided value
pub fn to<T: Into<NameOrAddress>>(mut self, to: T) -> Self {
self.to = Some(to.into());
self
}
/// Sets the `gas` field in the transaction to the provided value
pub fn gas<T: Into<U256>>(mut self, gas: T) -> Self {
self.gas = Some(gas.into());
self
}
/// Sets the `gas_price` field in the transaction to the provided value
pub fn gas_price<T: Into<U256>>(mut self, gas_price: T) -> Self {
self.gas_price = Some(gas_price.into());
self
}
/// Sets the `value` field in the transaction to the provided value
pub fn value<T: Into<U256>>(mut self, value: T) -> Self {
self.value = Some(value.into());
self
}
/// Sets the `data` field in the transaction to the provided value
pub fn data<T: Into<Bytes>>(mut self, data: T) -> Self {
self.data = Some(data.into());
self
}
/// Sets the `nonce` field in the transaction to the provided value
pub fn nonce<T: Into<U256>>(mut self, nonce: T) -> Self {
self.nonce = Some(nonce.into());
self
}
/// Hashes the transaction's data with the provided chain id
pub fn sighash<T: Into<U64>>(&self, chain_id: Option<T>) -> H256 {
keccak256(self.rlp(chain_id).as_ref()).into()
}
/// Gets the unsigned transaction's RLP encoding
pub fn rlp<T: Into<U64>>(&self, chain_id: Option<T>) -> Bytes {
let mut rlp = RlpStream::new();
// "If [..] CHAIN_ID is available, then when computing the hash of a
// transaction for the purposes of signing, instead of hashing only
// six rlp encoded elements (nonce, gasprice, startgas, to, value, data),
// you SHOULD hash nine rlp encoded elements
// (nonce, gasprice, startgas, to, value, data, chainid, 0, 0)"
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md#specification
let num_els = if chain_id.is_some() {
UNSIGNED_TX_FIELDS + 3
} else {
UNSIGNED_TX_FIELDS
};
rlp.begin_list(num_els);
self.rlp_base(&mut rlp);
// Only hash the 3 extra fields when preparing the
// data to sign if chain_id is present
if let Some(chain_id) = chain_id {
rlp.append(&chain_id.into());
rlp.append(&0u8);
rlp.append(&0u8);
}
rlp.out().freeze().into()
}
/// Produces the RLP encoding of the transaction with the provided signature
pub fn rlp_signed(&self, signature: &Signature) -> Bytes {
let mut rlp = RlpStream::new();
// construct the RLP body
rlp.begin_list(SIGNED_TX_FIELDS);
self.rlp_base(&mut rlp);
// append the signature
rlp.append(&signature.v);
rlp.append(&signature.r);
rlp.append(&signature.s);
rlp.out().freeze().into()
}
fn rlp_base(&self, rlp: &mut RlpStream) {
rlp_opt(rlp, self.nonce);
rlp_opt(rlp, self.gas_price);
rlp_opt(rlp, self.gas);
#[cfg(feature = "celo")]
self.inject_celo_metadata(rlp);
rlp_opt(rlp, self.to.as_ref());
rlp_opt(rlp, self.value);
rlp_opt(rlp, self.data.as_ref().map(|d| d.as_ref()));
}
}
// Separate impl block for the celo-specific fields
#[cfg(feature = "celo")]
impl TransactionRequest {
// modifies the RLP stream with the Celo-specific information
fn inject_celo_metadata(&self, rlp: &mut RlpStream) {
rlp_opt(rlp, self.fee_currency);
rlp_opt(rlp, self.gateway_fee_recipient);
rlp_opt(rlp, self.gateway_fee);
}
/// Sets the `fee_currency` field in the transaction to the provided value
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
pub fn fee_currency<T: Into<Address>>(mut self, fee_currency: T) -> Self {
self.fee_currency = Some(fee_currency.into());
self
}
/// Sets the `gateway_fee` field in the transaction to the provided value
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
pub fn gateway_fee<T: Into<U256>>(mut self, gateway_fee: T) -> Self {
self.gateway_fee = Some(gateway_fee.into());
self
}
/// Sets the `gateway_fee_recipient` field in the transaction to the provided value
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
pub fn gateway_fee_recipient<T: Into<Address>>(mut self, gateway_fee_recipient: T) -> Self {
self.gateway_fee_recipient = Some(gateway_fee_recipient.into());
self
}
}
fn rlp_opt<T: rlp::Encodable>(rlp: &mut RlpStream, opt: Option<T>) {
if let Some(ref inner) = opt {
rlp.append(inner);
} else {
rlp.append(&"");
}
}
/// Details of a signed transaction
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
pub struct Transaction {
/// The transaction's hash
pub hash: H256,
/// The transaction's nonce
pub nonce: U256,
/// Block hash. None when pending.
#[serde(rename = "blockHash")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_hash: Option<H256>,
/// Block number. None when pending.
#[serde(rename = "blockNumber")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_number: Option<U64>,
/// Transaction Index. None when pending.
#[serde(rename = "transactionIndex")]
#[serde(skip_serializing_if = "Option::is_none")]
pub transaction_index: Option<U64>,
/// Sender
pub from: Address,
/// Recipient (None when contract creation)
#[serde(skip_serializing_if = "Option::is_none")]
pub to: Option<Address>,
/// Transfered value
pub value: U256,
/// Gas Price
#[serde(rename = "gasPrice")]
pub gas_price: U256,
/// Gas amount
pub gas: U256,
/// Input data
pub input: Bytes,
/// ECDSA recovery id
pub v: U64,
/// ECDSA signature r
pub r: U256,
/// ECDSA signature s
pub s: U256,
///////////////// Celo-specific transaction fields /////////////////
/// The currency fees are paid in (None for native currency)
#[cfg(feature = "celo")]
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
#[serde(skip_serializing_if = "Option::is_none", rename = "feeCurrency")]
pub fee_currency: Option<Address>,
/// Gateway fee recipient (None for no gateway fee paid)
#[cfg(feature = "celo")]
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
#[serde(
skip_serializing_if = "Option::is_none",
rename = "gatewayFeeRecipient"
)]
pub gateway_fee_recipient: Option<Address>,
/// Gateway fee amount (None for no gateway fee paid)
#[cfg(feature = "celo")]
#[cfg_attr(docsrs, doc(cfg(feature = "celo")))]
#[serde(skip_serializing_if = "Option::is_none", rename = "gatewayFee")]
pub gateway_fee: Option<U256>,
}
impl Transaction {
// modifies the RLP stream with the Celo-specific information
// This is duplicated from TransactionRequest. Is there a good way to get rid
// of this code duplication?
#[cfg(feature = "celo")]
fn inject_celo_metadata(&self, rlp: &mut RlpStream) {
rlp_opt(rlp, self.fee_currency);
rlp_opt(rlp, self.gateway_fee_recipient);
rlp_opt(rlp, self.gateway_fee);
}
pub fn hash(&self) -> H256 {
keccak256(&self.rlp().as_ref()).into()
}
pub fn rlp(&self) -> Bytes {
let mut rlp = RlpStream::new();
rlp.begin_unbounded_list();
match self.transaction_type {
// EIP-2930 (0x01)
Some(x) if x == U64::from(1) => {
rlp_opt(&mut rlp, &self.chain_id);
rlp.append(&self.nonce);
rlp_opt(&mut rlp, &self.gas_price);
rlp.append(&self.gas);
#[cfg(feature = "celo")]
self.inject_celo_metadata(&mut rlp);
rlp_opt(&mut rlp, &self.to);
rlp.append(&self.value);
rlp.append(&self.input.as_ref());
rlp_opt(&mut rlp, &self.access_list);
rlp.append(&(*&self.v.as_u64() != 0));
}
// EIP-1559 (0x02)
Some(x) if x == U64::from(2) => {
rlp_opt(&mut rlp, &self.chain_id);
rlp.append(&self.nonce);
rlp_opt(&mut rlp, &self.max_priority_fee_per_gas);
rlp_opt(&mut rlp, &self.max_fee_per_gas);
rlp.append(&self.gas);
rlp_opt(&mut rlp, &self.to);
rlp.append(&self.value);
rlp.append(&self.input.as_ref());
rlp_opt(&mut rlp, &self.access_list);
rlp.append(&(*&self.v.as_u64() != 0));
}
// Legacy (0x00)
_ => {
rlp.append(&self.nonce);
rlp_opt(&mut rlp, &self.gas_price);
rlp.append(&self.gas);
#[cfg(feature = "celo")]
self.inject_celo_metadata(&mut rlp);
rlp_opt(&mut rlp, &self.to);
rlp.append(&self.value);
rlp.append(&self.input.as_ref());
rlp.append(&self.v);
}
}
rlp.append(&self.r);
rlp.append(&self.s);
rlp.finalize_unbounded_list();
let rlp_bytes: Bytes = rlp.out().freeze().into();
let mut encoded = vec![];
match self.transaction_type {
Some(x) if x == U64::from(1) => {
encoded.extend_from_slice(&[0x1]);
encoded.extend_from_slice(rlp_bytes.as_ref());
encoded.into()
}
Some(x) if x == U64::from(2) => {
encoded.extend_from_slice(&[0x2]);
encoded.extend_from_slice(rlp_bytes.as_ref());
encoded.into()
}
_ => rlp_bytes,
}
}
}
/// "Receipt" of an executed transaction: details of its execution.
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
pub struct TransactionReceipt {
/// Transaction hash.
#[serde(rename = "transactionHash")]
pub transaction_hash: H256,
/// Index within the block.
#[serde(rename = "transactionIndex")]
pub transaction_index: U64,
/// Hash of the block this transaction was included within.
#[serde(rename = "blockHash")]
pub block_hash: Option<H256>,
/// Number of the block this transaction was included within.
#[serde(rename = "blockNumber")]
pub block_number: Option<U64>,
/// Cumulative gas used within the block after this was executed.
#[serde(rename = "cumulativeGasUsed")]
pub cumulative_gas_used: U256,
/// Gas used by this transaction alone.
///
/// Gas used is `None` if the the client is running in light client mode.
#[serde(rename = "gasUsed")]
pub gas_used: Option<U256>,
/// Contract address created, or `None` if not a deployment.
#[serde(rename = "contractAddress")]
pub contract_address: Option<Address>,
/// Logs generated within this transaction.
pub logs: Vec<Log>,
/// Status: either 1 (success) or 0 (failure). Only present after activation of [EIP-658](https://eips.ethereum.org/EIPS/eip-658)
pub status: Option<U64>,
/// State root. Only present before activation of [EIP-658](https://eips.ethereum.org/EIPS/eip-658)
pub root: Option<H256>,
/// Logs bloom
#[serde(rename = "logsBloom")]
pub logs_bloom: Bloom,
}
#[cfg(test)]
#[cfg(not(feature = "celo"))]
mod tests {
use super::*;
#[test]
fn decode_unsigned_transaction() {
let _res: TransactionRequest = serde_json::from_str(
r#"{
"gas":"0xc350",
"gasPrice":"0x4a817c800",
"hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b",
"input":"0x68656c6c6f21",
"nonce":"0x15",
"to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb",
"transactionIndex":"0x41",
"value":"0xf3dbb76162000",
"chain_id": "0x1"
}"#,
)
.unwrap();
}
#[test]
fn decode_transaction_response() {
let _res: Transaction = serde_json::from_str(
r#"{
"blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2",
"blockNumber":"0x5daf3b",
"from":"0xa7d9ddbe1f17865597fbd27ec712455208b6b76d",
"gas":"0xc350",
"gasPrice":"0x4a817c800",
"hash":"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b",
"input":"0x68656c6c6f21",
"nonce":"0x15",
"to":"0xf02c1c8e6114b1dbe8937a39260b5b0a374432bb",
"transactionIndex":"0x41",
"value":"0xf3dbb76162000",
"v":"0x25",
"r":"0x1b5e176d927f8e9ab405058b2d2457392da3e20f328b16ddabcebc33eaac5fea",
"s":"0x4ba69724e8f69de52f0125ad8b3c5c2cef33019bac3249e2c0a2192766d1721c"
}"#,
)
.unwrap();
let _res: Transaction = serde_json::from_str(
r#"{
"hash":"0xdd79ab0f996150aa3c9f135bbb9272cf0dedb830fafcbbf0c06020503565c44f",
"nonce":"0xe",
"blockHash":"0xef3fe1f532c3d8783a6257619bc123e9453aa8d6614e4cdb4cc8b9e1ed861404",
"blockNumber":"0xf",
"transactionIndex":"0x0",
"from":"0x1b67b03cdccfae10a2d80e52d3d026dbe2960ad0",
"to":"0x986ee0c8b91a58e490ee59718cca41056cf55f24",
"value":"0x2710",
"gas":"0x5208",
"gasPrice":"0x186a0",
"input":"0x",
"v":"0x25",
"r":"0x75188beb2f601bb8cf52ef89f92a6ba2bb7edcf8e3ccde90548cc99cbea30b1e",
"s":"0xc0559a540f16d031f3404d5df2bb258084eee56ed1193d8b534bb6affdb3c2c"
}"#,
)
.unwrap();
}
}
| 33.717694 | 133 | 0.604363 |
5d1ba9b066b904336b03ab605c2db79482748981
| 16,290 |
use chrono::prelude::*;
use chrono::Duration;
use chrono_tz::Tz;
use core::ops::{Add, Sub};
use std::time;
use crate::time::chrono::{Offset, Time};
impl Add<Duration> for Time {
type Output = Self;
fn add(self, duration: Duration) -> Self::Output {
let Self {
timestamp,
sub_second_nanos,
offset,
} = self;
let naive = NaiveDateTime::from_timestamp(timestamp, sub_second_nanos);
match offset {
Offset::Utc => {
let aware = DateTime::<Utc>::from_utc(naive, Utc);
(aware + duration).into()
}
Offset::Local => {
let offset = Local.offset_from_utc_datetime(&naive);
let aware = DateTime::<Local>::from_utc(naive, offset);
(aware + duration).into()
}
Offset::Tz(timezone) => {
let offset = timezone.offset_from_utc_datetime(&naive);
let aware = DateTime::<Tz>::from_utc(naive, offset);
(aware + duration).into()
}
Offset::Fixed(offset) => {
let aware = DateTime::<FixedOffset>::from_utc(naive, offset);
(aware + duration).into()
}
}
}
}
impl Add<time::Duration> for Time {
type Output = Self;
fn add(self, duration: time::Duration) -> Self::Output {
let duration = Duration::from_std(duration).expect("Duration too large");
self + duration
}
}
impl Add<i8> for Time {
type Output = Self;
fn add(self, seconds: i8) -> Self::Output {
self + i64::from(seconds)
}
}
impl Add<u8> for Time {
type Output = Self;
fn add(self, seconds: u8) -> Self::Output {
self + u64::from(seconds)
}
}
impl Add<i16> for Time {
type Output = Self;
fn add(self, seconds: i16) -> Self::Output {
self + i64::from(seconds)
}
}
impl Add<u16> for Time {
type Output = Self;
fn add(self, seconds: u16) -> Self::Output {
self + u64::from(seconds)
}
}
impl Add<i32> for Time {
type Output = Self;
fn add(self, seconds: i32) -> Self::Output {
self + i64::from(seconds)
}
}
impl Add<u32> for Time {
type Output = Self;
fn add(self, seconds: u32) -> Self::Output {
self + u64::from(seconds)
}
}
impl Add<i64> for Time {
type Output = Self;
fn add(self, seconds: i64) -> Self::Output {
let duration = if let Ok(seconds) = u64::try_from(seconds) {
let duration = time::Duration::from_secs(seconds);
Duration::from_std(duration).expect("Duration too large")
} else {
let seconds = seconds
.checked_neg()
.and_then(|secs| u64::try_from(secs).ok())
.expect("Duration too large");
let duration = time::Duration::from_secs(seconds);
let duration = Duration::from_std(duration).expect("Duration too large");
-duration
};
self + duration
}
}
impl Add<u64> for Time {
type Output = Self;
fn add(self, seconds: u64) -> Self::Output {
let duration = time::Duration::from_secs(seconds);
let duration = Duration::from_std(duration).expect("Duration too large");
self + duration
}
}
impl Add<f32> for Time {
type Output = Self;
fn add(self, seconds: f32) -> Self::Output {
let duration = if seconds > 0.0 {
let duration = time::Duration::from_secs_f32(seconds);
Duration::from_std(duration).expect("Duration too large")
} else {
let seconds = -seconds;
let duration = time::Duration::from_secs_f32(seconds);
Duration::from_std(duration).expect("Duration too large")
};
self + duration
}
}
impl Add<f64> for Time {
type Output = Self;
fn add(self, seconds: f64) -> Self::Output {
let duration = if seconds > 0.0 {
let duration = time::Duration::from_secs_f64(seconds);
Duration::from_std(duration).expect("Duration too large")
} else {
let seconds = -seconds;
let duration = time::Duration::from_secs_f64(seconds);
Duration::from_std(duration).expect("Duration too large")
};
self + duration
}
}
impl Sub<Time> for Time {
type Output = Self;
fn sub(self, other: Time) -> Self::Output {
let duration = Duration::from(other);
self - duration
}
}
impl Sub<Duration> for Time {
type Output = Self;
fn sub(self, duration: Duration) -> Self::Output {
let Self {
timestamp,
sub_second_nanos,
offset,
} = self;
let naive = NaiveDateTime::from_timestamp(timestamp, sub_second_nanos);
match offset {
Offset::Utc => {
let aware = DateTime::<Utc>::from_utc(naive, Utc);
(aware - duration).into()
}
Offset::Local => {
let offset = Local.offset_from_utc_datetime(&naive);
let aware = DateTime::<Local>::from_utc(naive, offset);
(aware - duration).into()
}
Offset::Tz(timezone) => {
let offset = timezone.offset_from_utc_datetime(&naive);
let aware = DateTime::<Tz>::from_utc(naive, offset);
(aware - duration).into()
}
Offset::Fixed(offset) => {
let aware = DateTime::<FixedOffset>::from_utc(naive, offset);
(aware - duration).into()
}
}
}
}
impl Sub<time::Duration> for Time {
type Output = Self;
fn sub(self, duration: time::Duration) -> Self::Output {
let duration = Duration::from_std(duration).expect("Duration too large");
self - duration
}
}
impl Sub<i8> for Time {
type Output = Self;
fn sub(self, seconds: i8) -> Self::Output {
self - i64::from(seconds)
}
}
impl Sub<u8> for Time {
type Output = Self;
fn sub(self, seconds: u8) -> Self::Output {
self - u64::from(seconds)
}
}
impl Sub<i16> for Time {
type Output = Self;
fn sub(self, seconds: i16) -> Self::Output {
self - i64::from(seconds)
}
}
impl Sub<u16> for Time {
type Output = Self;
fn sub(self, seconds: u16) -> Self::Output {
self - u64::from(seconds)
}
}
impl Sub<i32> for Time {
type Output = Self;
fn sub(self, seconds: i32) -> Self::Output {
self - i64::from(seconds)
}
}
impl Sub<u32> for Time {
type Output = Self;
fn sub(self, seconds: u32) -> Self::Output {
self - u64::from(seconds)
}
}
impl Sub<i64> for Time {
type Output = Self;
fn sub(self, seconds: i64) -> Self::Output {
let duration = if let Ok(seconds) = u64::try_from(seconds) {
let duration = time::Duration::from_secs(seconds);
Duration::from_std(duration).expect("Duration too large")
} else {
let seconds = seconds
.checked_neg()
.and_then(|secs| u64::try_from(secs).ok())
.expect("Duration too large");
let duration = time::Duration::from_secs(seconds);
let duration = Duration::from_std(duration).expect("Duration too large");
-duration
};
self - duration
}
}
impl Sub<u64> for Time {
type Output = Self;
fn sub(self, seconds: u64) -> Self::Output {
let duration = time::Duration::from_secs(seconds);
let duration = Duration::from_std(duration).expect("Duration too large");
self - duration
}
}
impl Sub<f32> for Time {
type Output = Self;
fn sub(self, seconds: f32) -> Self::Output {
let duration = if seconds > 0.0 {
let duration = time::Duration::from_secs_f32(seconds);
Duration::from_std(duration).expect("Duration too large")
} else {
let seconds = -seconds;
let duration = time::Duration::from_secs_f32(seconds);
Duration::from_std(duration).expect("Duration too large")
};
self - duration
}
}
impl Sub<f64> for Time {
type Output = Self;
fn sub(self, seconds: f64) -> Self::Output {
let duration = if seconds > 0.0 {
let duration = time::Duration::from_secs_f64(seconds);
Duration::from_std(duration).expect("Duration too large")
} else {
let seconds = -seconds;
let duration = time::Duration::from_secs_f64(seconds);
Duration::from_std(duration).expect("Duration too large")
};
self - duration
}
}
#[cfg(test)]
mod tests {
use super::Time;
use chrono::prelude::*;
fn datetime() -> DateTime<Utc> {
// halfway through a second
let time = NaiveTime::from_hms_nano(23, 59, 59, 500_000_000);
let date = NaiveDate::from_ymd(2019, 4, 7);
let datetime = NaiveDateTime::new(date, time);
DateTime::<Utc>::from_utc(datetime, Utc)
}
#[test]
fn add_int_to_time() {
let dt = Time::from(datetime());
let succ: Time = dt + 1;
assert_eq!(dt.timestamp + 1, succ.timestamp);
assert_eq!(dt.to_int() + 1, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_ne!(dt.day(), succ.day());
assert_ne!(dt.hour(), succ.hour());
assert_ne!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 0);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 500_000_000 {
assert!(succ.nanosecond() - 500_000_000 < 50);
} else {
assert!(500_000_000 - succ.nanosecond() < 50);
}
}
#[test]
fn add_subsec_float_to_time() {
let dt = Time::from(datetime());
let succ: Time = dt + 0.2;
assert_eq!(dt.timestamp, succ.timestamp);
assert_eq!(dt.to_int(), succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_eq!(dt.day(), succ.day());
assert_eq!(dt.hour(), succ.hour());
assert_eq!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 59);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 700_000_000 {
assert!(succ.nanosecond() - 700_000_000 < 50);
} else {
assert!(700_000_000 - succ.nanosecond() < 50);
}
let dt = Time::from(datetime());
let succ: Time = dt + 0.7;
assert_eq!(dt.timestamp + 1, succ.timestamp);
assert_eq!(dt.to_int() + 1, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_ne!(dt.day(), succ.day());
assert_ne!(dt.hour(), succ.hour());
assert_ne!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 0);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 200_000_000 {
assert!(succ.nanosecond() - 200_000_000 < 50);
} else {
assert!(200_000_000 - succ.nanosecond() < 50);
}
}
#[test]
fn add_float_to_time() {
let dt = Time::from(datetime());
let succ: Time = dt + 1.2;
assert_eq!(dt.timestamp + 1, succ.timestamp);
assert_eq!(dt.to_int() + 1, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_ne!(dt.day(), succ.day());
assert_ne!(dt.hour(), succ.hour());
assert_ne!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 0);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 700_000_000 {
assert!(succ.nanosecond() - 700_000_000 < 50);
} else {
assert!(700_000_000 - succ.nanosecond() < 50);
}
let dt = Time::from(datetime());
let succ: Time = dt + 1.7;
assert_eq!(dt.timestamp + 2, succ.timestamp);
assert_eq!(dt.to_int() + 2, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_ne!(dt.day(), succ.day());
assert_ne!(dt.hour(), succ.hour());
assert_ne!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 1);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 200_000_000 {
assert!(succ.nanosecond() - 200_000_000 < 50);
} else {
assert!(200_000_000 - succ.nanosecond() < 50);
}
}
#[test]
fn sub_int_to_time() {
let dt = Time::from(datetime());
let succ: Time = dt - 1;
assert_eq!(dt.timestamp - 1, succ.timestamp);
assert_eq!(dt.to_int() - 1, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_eq!(dt.day(), succ.day());
assert_eq!(dt.hour(), succ.hour());
assert_eq!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 58);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 500_000_000 {
assert!(succ.nanosecond() - 500_000_000 < 50);
} else {
assert!(500_000_000 - succ.nanosecond() < 50);
}
}
#[test]
fn sub_subsec_float_to_time() {
let dt = Time::from(datetime());
let succ: Time = dt - 0.2;
assert_eq!(dt.timestamp, succ.timestamp);
assert_eq!(dt.to_int(), succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_eq!(dt.day(), succ.day());
assert_eq!(dt.hour(), succ.hour());
assert_eq!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 59);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 300_000_000 {
assert!(succ.nanosecond() - 300_000_000 < 50);
} else {
assert!(300_000_000 - succ.nanosecond() < 50);
}
let dt = Time::from(datetime());
let succ: Time = dt - 0.7;
assert_eq!(dt.timestamp - 1, succ.timestamp);
assert_eq!(dt.to_int() - 1, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_eq!(dt.day(), succ.day());
assert_eq!(dt.hour(), succ.hour());
assert_eq!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 58);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 800_000_000 {
assert!(succ.nanosecond() - 800_000_000 < 50);
} else {
assert!(800_000_000 - succ.nanosecond() < 50);
}
}
#[test]
fn sub_float_to_time() {
let dt = Time::from(datetime());
let succ: Time = dt - 1.2;
assert_eq!(dt.timestamp - 1, succ.timestamp);
assert_eq!(dt.to_int() - 1, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_eq!(dt.day(), succ.day());
assert_eq!(dt.hour(), succ.hour());
assert_eq!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 58);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 300_000_000 {
assert!(succ.nanosecond() - 300_000_000 < 50);
} else {
assert!(300_000_000 - succ.nanosecond() < 50);
}
let dt = Time::from(datetime());
let succ: Time = dt - 1.7;
assert_eq!(dt.timestamp - 2, succ.timestamp);
assert_eq!(dt.to_int() - 2, succ.to_int());
assert_eq!(dt.year(), succ.year());
assert_eq!(dt.month(), succ.month());
assert_eq!(dt.day(), succ.day());
assert_eq!(dt.hour(), succ.hour());
assert_eq!(dt.minute(), succ.minute());
assert_eq!(succ.second(), 57);
// handle in-exactitude of float arithmetic
if succ.nanosecond() > 800_000_000 {
assert!(succ.nanosecond() - 800_000_000 < 50);
} else {
assert!(800_000_000 - succ.nanosecond() < 50);
}
}
}
| 31.147228 | 85 | 0.544813 |
1150ad688b618aefe5f95744f5f20fa980ce099b
| 267 |
use juniper::ID;
use crate::models::ArtistUrl;
#[juniper::object]
impl ArtistUrl {
fn id(&self) -> ID {
ID::from(format!("{}", self.id))
}
fn url(&self) -> &str {
&self.url
}
fn name(&self) -> &str {
&self.name
}
}
| 14.052632 | 40 | 0.483146 |
f477a05eac134e02c2e30923076e39c8c1258309
| 3,709 |
// SPDX-License-Identifier: MIT
// Copyright (C) 2018-present iced project and contributors
use crate::block_enc::instr::*;
use crate::block_enc::*;
use crate::iced_error::IcedError;
use core::cell::RefCell;
use core::cmp;
pub(super) struct CallInstr {
bitness: u8,
instruction: Instruction,
target_instr: TargetInstr,
orig_instruction_size: u8,
pointer_data: Option<Rc<RefCell<BlockData>>>,
use_orig_instruction: bool,
}
impl CallInstr {
pub(super) fn new(block_encoder: &mut BlockEncInt, base: &mut InstrBase, instruction: &Instruction) -> Self {
let mut instr_copy = *instruction;
instr_copy.set_near_branch64(0);
let orig_instruction_size = block_encoder.get_instruction_size(&instr_copy, 0) as u8;
let mut use_orig_instruction = false;
base.size = if !block_encoder.fix_branches() {
use_orig_instruction = true;
orig_instruction_size
} else if block_encoder.bitness() == 64 {
// Make sure it's not shorter than the real instruction. It can happen if there are extra prefixes.
cmp::max(orig_instruction_size, InstrUtils::CALL_OR_JMP_POINTER_DATA_INSTRUCTION_SIZE64 as u8)
} else {
orig_instruction_size
} as u32;
Self {
bitness: block_encoder.bitness() as u8,
instruction: *instruction,
target_instr: TargetInstr::default(),
orig_instruction_size,
pointer_data: None,
use_orig_instruction,
}
}
fn try_optimize<'a>(&mut self, base: &mut InstrBase, ctx: &mut InstrContext<'a>, gained: u64) -> bool {
if base.done || self.use_orig_instruction {
base.done = true;
return false;
}
// If it's in the same block, we assume the target is at most 2GB away.
let mut use_short = self.bitness != 64 || self.target_instr.is_in_block(ctx.block);
if !use_short {
let target_address = self.target_instr.address(ctx);
let next_rip = ctx.ip.wrapping_add(self.orig_instruction_size as u64);
let diff = target_address.wrapping_sub(next_rip) as i64;
let diff = correct_diff(self.target_instr.is_in_block(ctx.block), diff, gained);
use_short = i32::MIN as i64 <= diff && diff <= i32::MAX as i64;
}
if use_short {
if let Some(ref pointer_data) = self.pointer_data {
pointer_data.borrow_mut().is_valid = false;
}
base.size = self.orig_instruction_size as u32;
self.use_orig_instruction = true;
base.done = true;
return true;
}
if self.pointer_data.is_none() {
self.pointer_data = Some(ctx.block.alloc_pointer_location());
}
false
}
}
impl Instr for CallInstr {
fn get_target_instr(&mut self) -> (&mut TargetInstr, u64) {
(&mut self.target_instr, self.instruction.near_branch_target())
}
fn optimize<'a>(&mut self, base: &mut InstrBase, ctx: &mut InstrContext<'a>, gained: u64) -> bool {
self.try_optimize(base, ctx, gained)
}
fn encode<'a>(&mut self, base: &mut InstrBase, ctx: &mut InstrContext<'a>) -> Result<(ConstantOffsets, bool), IcedError> {
if self.use_orig_instruction {
self.instruction.set_near_branch64(self.target_instr.address(ctx));
ctx.block.encoder.encode(&self.instruction, ctx.ip).map_or_else(
|err| Err(IcedError::with_string(InstrUtils::create_error_message(&err, &self.instruction))),
|_| Ok((ctx.block.encoder.get_constant_offsets(), true)),
)
} else {
debug_assert!(self.pointer_data.is_some());
let pointer_data = self.pointer_data.clone().ok_or_else(|| IcedError::new("Internal error"))?;
pointer_data.borrow_mut().data = self.target_instr.address(ctx);
InstrUtils::encode_branch_to_pointer_data(ctx.block, true, ctx.ip, pointer_data, base.size).map_or_else(
|err| Err(IcedError::with_string(InstrUtils::create_error_message(&err, &self.instruction))),
|_| Ok((ConstantOffsets::default(), false)),
)
}
}
}
| 35.663462 | 123 | 0.721758 |
29b4dd76498912482eea77959373f13e70b8e9ad
| 23,808 |
pub type time_t = i64;
pub type mode_t = u32;
pub type nlink_t = ::uint32_t;
pub type ino_t = ::uint64_t;
pub type pthread_key_t = ::c_int;
pub type rlim_t = u64;
pub type speed_t = ::c_uint;
pub type tcflag_t = ::c_uint;
pub type nl_item = c_long;
pub type clockid_t = ::c_int;
pub type id_t = ::uint32_t;
pub type sem_t = *mut sem;
#[cfg_attr(feature = "extra_traits", derive(Debug))]
pub enum timezone {}
impl ::Copy for timezone {}
impl ::Clone for timezone {
fn clone(&self) -> timezone { *self }
}
#[cfg_attr(feature = "extra_traits", derive(Debug))]
pub enum sem {}
impl ::Copy for sem {}
impl ::Clone for sem {
fn clone(&self) -> sem { *self }
}
s! {
pub struct sigaction {
pub sa_sigaction: ::sighandler_t,
pub sa_mask: ::sigset_t,
pub sa_flags: ::c_int,
}
pub struct stack_t {
pub ss_sp: *mut ::c_void,
pub ss_size: ::size_t,
pub ss_flags: ::c_int,
}
pub struct in6_pktinfo {
pub ipi6_addr: ::in6_addr,
pub ipi6_ifindex: ::c_uint,
}
pub struct termios {
pub c_iflag: ::tcflag_t,
pub c_oflag: ::tcflag_t,
pub c_cflag: ::tcflag_t,
pub c_lflag: ::tcflag_t,
pub c_cc: [::cc_t; ::NCCS],
pub c_ispeed: ::c_int,
pub c_ospeed: ::c_int,
}
pub struct flock {
pub l_start: ::off_t,
pub l_len: ::off_t,
pub l_pid: ::pid_t,
pub l_type: ::c_short,
pub l_whence: ::c_short,
}
}
pub const D_T_FMT: ::nl_item = 0;
pub const D_FMT: ::nl_item = 1;
pub const T_FMT: ::nl_item = 2;
pub const T_FMT_AMPM: ::nl_item = 3;
pub const AM_STR: ::nl_item = 4;
pub const PM_STR: ::nl_item = 5;
pub const DAY_1: ::nl_item = 6;
pub const DAY_2: ::nl_item = 7;
pub const DAY_3: ::nl_item = 8;
pub const DAY_4: ::nl_item = 9;
pub const DAY_5: ::nl_item = 10;
pub const DAY_6: ::nl_item = 11;
pub const DAY_7: ::nl_item = 12;
pub const ABDAY_1: ::nl_item = 13;
pub const ABDAY_2: ::nl_item = 14;
pub const ABDAY_3: ::nl_item = 15;
pub const ABDAY_4: ::nl_item = 16;
pub const ABDAY_5: ::nl_item = 17;
pub const ABDAY_6: ::nl_item = 18;
pub const ABDAY_7: ::nl_item = 19;
pub const MON_1: ::nl_item = 20;
pub const MON_2: ::nl_item = 21;
pub const MON_3: ::nl_item = 22;
pub const MON_4: ::nl_item = 23;
pub const MON_5: ::nl_item = 24;
pub const MON_6: ::nl_item = 25;
pub const MON_7: ::nl_item = 26;
pub const MON_8: ::nl_item = 27;
pub const MON_9: ::nl_item = 28;
pub const MON_10: ::nl_item = 29;
pub const MON_11: ::nl_item = 30;
pub const MON_12: ::nl_item = 31;
pub const ABMON_1: ::nl_item = 32;
pub const ABMON_2: ::nl_item = 33;
pub const ABMON_3: ::nl_item = 34;
pub const ABMON_4: ::nl_item = 35;
pub const ABMON_5: ::nl_item = 36;
pub const ABMON_6: ::nl_item = 37;
pub const ABMON_7: ::nl_item = 38;
pub const ABMON_8: ::nl_item = 39;
pub const ABMON_9: ::nl_item = 40;
pub const ABMON_10: ::nl_item = 41;
pub const ABMON_11: ::nl_item = 42;
pub const ABMON_12: ::nl_item = 43;
pub const RADIXCHAR: ::nl_item = 44;
pub const THOUSEP: ::nl_item = 45;
pub const YESSTR: ::nl_item = 46;
pub const YESEXPR: ::nl_item = 47;
pub const NOSTR: ::nl_item = 48;
pub const NOEXPR: ::nl_item = 49;
pub const CRNCYSTR: ::nl_item = 50;
pub const CODESET: ::nl_item = 51;
pub const EXIT_FAILURE : ::c_int = 1;
pub const EXIT_SUCCESS : ::c_int = 0;
pub const RAND_MAX : ::c_int = 2147483647;
pub const EOF : ::c_int = -1;
pub const SEEK_SET : ::c_int = 0;
pub const SEEK_CUR : ::c_int = 1;
pub const SEEK_END : ::c_int = 2;
pub const _IOFBF : ::c_int = 0;
pub const _IONBF : ::c_int = 2;
pub const _IOLBF : ::c_int = 1;
pub const BUFSIZ : ::c_uint = 1024;
pub const FOPEN_MAX : ::c_uint = 20;
pub const FILENAME_MAX : ::c_uint = 1024;
pub const L_tmpnam : ::c_uint = 1024;
pub const O_NOCTTY : ::c_int = 32768;
pub const S_IFIFO : mode_t = 4096;
pub const S_IFCHR : mode_t = 8192;
pub const S_IFBLK : mode_t = 24576;
pub const S_IFDIR : mode_t = 16384;
pub const S_IFREG : mode_t = 32768;
pub const S_IFLNK : mode_t = 40960;
pub const S_IFSOCK : mode_t = 49152;
pub const S_IFMT : mode_t = 61440;
pub const S_IEXEC : mode_t = 64;
pub const S_IWRITE : mode_t = 128;
pub const S_IREAD : mode_t = 256;
pub const S_IRWXU : mode_t = 448;
pub const S_IXUSR : mode_t = 64;
pub const S_IWUSR : mode_t = 128;
pub const S_IRUSR : mode_t = 256;
pub const S_IRWXG : mode_t = 56;
pub const S_IXGRP : mode_t = 8;
pub const S_IWGRP : mode_t = 16;
pub const S_IRGRP : mode_t = 32;
pub const S_IRWXO : mode_t = 7;
pub const S_IXOTH : mode_t = 1;
pub const S_IWOTH : mode_t = 2;
pub const S_IROTH : mode_t = 4;
pub const F_OK : ::c_int = 0;
pub const R_OK : ::c_int = 4;
pub const W_OK : ::c_int = 2;
pub const X_OK : ::c_int = 1;
pub const STDIN_FILENO : ::c_int = 0;
pub const STDOUT_FILENO : ::c_int = 1;
pub const STDERR_FILENO : ::c_int = 2;
pub const F_LOCK : ::c_int = 1;
pub const F_TEST : ::c_int = 3;
pub const F_TLOCK : ::c_int = 2;
pub const F_ULOCK : ::c_int = 0;
pub const F_GETLK: ::c_int = 7;
pub const F_SETLK: ::c_int = 8;
pub const F_SETLKW: ::c_int = 9;
pub const SIGHUP : ::c_int = 1;
pub const SIGINT : ::c_int = 2;
pub const SIGQUIT : ::c_int = 3;
pub const SIGILL : ::c_int = 4;
pub const SIGABRT : ::c_int = 6;
pub const SIGEMT: ::c_int = 7;
pub const SIGFPE : ::c_int = 8;
pub const SIGKILL : ::c_int = 9;
pub const SIGSEGV : ::c_int = 11;
pub const SIGPIPE : ::c_int = 13;
pub const SIGALRM : ::c_int = 14;
pub const SIGTERM : ::c_int = 15;
pub const PROT_NONE : ::c_int = 0;
pub const PROT_READ : ::c_int = 1;
pub const PROT_WRITE : ::c_int = 2;
pub const PROT_EXEC : ::c_int = 4;
pub const MAP_FILE : ::c_int = 0x0000;
pub const MAP_SHARED : ::c_int = 0x0001;
pub const MAP_PRIVATE : ::c_int = 0x0002;
pub const MAP_FIXED : ::c_int = 0x0010;
pub const MAP_ANON : ::c_int = 0x1000;
pub const MAP_FAILED : *mut ::c_void = !0 as *mut ::c_void;
pub const MCL_CURRENT : ::c_int = 0x0001;
pub const MCL_FUTURE : ::c_int = 0x0002;
pub const MS_ASYNC : ::c_int = 0x0001;
pub const EPERM : ::c_int = 1;
pub const ENOENT : ::c_int = 2;
pub const ESRCH : ::c_int = 3;
pub const EINTR : ::c_int = 4;
pub const EIO : ::c_int = 5;
pub const ENXIO : ::c_int = 6;
pub const E2BIG : ::c_int = 7;
pub const ENOEXEC : ::c_int = 8;
pub const EBADF : ::c_int = 9;
pub const ECHILD : ::c_int = 10;
pub const EDEADLK : ::c_int = 11;
pub const ENOMEM : ::c_int = 12;
pub const EACCES : ::c_int = 13;
pub const EFAULT : ::c_int = 14;
pub const ENOTBLK : ::c_int = 15;
pub const EBUSY : ::c_int = 16;
pub const EEXIST : ::c_int = 17;
pub const EXDEV : ::c_int = 18;
pub const ENODEV : ::c_int = 19;
pub const ENOTDIR : ::c_int = 20;
pub const EISDIR : ::c_int = 21;
pub const EINVAL : ::c_int = 22;
pub const ENFILE : ::c_int = 23;
pub const EMFILE : ::c_int = 24;
pub const ENOTTY : ::c_int = 25;
pub const ETXTBSY : ::c_int = 26;
pub const EFBIG : ::c_int = 27;
pub const ENOSPC : ::c_int = 28;
pub const ESPIPE : ::c_int = 29;
pub const EROFS : ::c_int = 30;
pub const EMLINK : ::c_int = 31;
pub const EPIPE : ::c_int = 32;
pub const EDOM : ::c_int = 33;
pub const ERANGE : ::c_int = 34;
pub const EAGAIN : ::c_int = 35;
pub const EWOULDBLOCK : ::c_int = 35;
pub const EINPROGRESS : ::c_int = 36;
pub const EALREADY : ::c_int = 37;
pub const ENOTSOCK : ::c_int = 38;
pub const EDESTADDRREQ : ::c_int = 39;
pub const EMSGSIZE : ::c_int = 40;
pub const EPROTOTYPE : ::c_int = 41;
pub const ENOPROTOOPT : ::c_int = 42;
pub const EPROTONOSUPPORT : ::c_int = 43;
pub const ESOCKTNOSUPPORT : ::c_int = 44;
pub const EOPNOTSUPP : ::c_int = 45;
pub const EPFNOSUPPORT : ::c_int = 46;
pub const EAFNOSUPPORT : ::c_int = 47;
pub const EADDRINUSE : ::c_int = 48;
pub const EADDRNOTAVAIL : ::c_int = 49;
pub const ENETDOWN : ::c_int = 50;
pub const ENETUNREACH : ::c_int = 51;
pub const ENETRESET : ::c_int = 52;
pub const ECONNABORTED : ::c_int = 53;
pub const ECONNRESET : ::c_int = 54;
pub const ENOBUFS : ::c_int = 55;
pub const EISCONN : ::c_int = 56;
pub const ENOTCONN : ::c_int = 57;
pub const ESHUTDOWN : ::c_int = 58;
pub const ETOOMANYREFS : ::c_int = 59;
pub const ETIMEDOUT : ::c_int = 60;
pub const ECONNREFUSED : ::c_int = 61;
pub const ELOOP : ::c_int = 62;
pub const ENAMETOOLONG : ::c_int = 63;
pub const EHOSTDOWN : ::c_int = 64;
pub const EHOSTUNREACH : ::c_int = 65;
pub const ENOTEMPTY : ::c_int = 66;
pub const EPROCLIM : ::c_int = 67;
pub const EUSERS : ::c_int = 68;
pub const EDQUOT : ::c_int = 69;
pub const ESTALE : ::c_int = 70;
pub const EREMOTE : ::c_int = 71;
pub const EBADRPC : ::c_int = 72;
pub const ERPCMISMATCH : ::c_int = 73;
pub const EPROGUNAVAIL : ::c_int = 74;
pub const EPROGMISMATCH : ::c_int = 75;
pub const EPROCUNAVAIL : ::c_int = 76;
pub const ENOLCK : ::c_int = 77;
pub const ENOSYS : ::c_int = 78;
pub const EFTYPE : ::c_int = 79;
pub const EAUTH : ::c_int = 80;
pub const ENEEDAUTH : ::c_int = 81;
pub const F_DUPFD : ::c_int = 0;
pub const F_GETFD : ::c_int = 1;
pub const F_SETFD : ::c_int = 2;
pub const F_GETFL : ::c_int = 3;
pub const F_SETFL : ::c_int = 4;
pub const SIGTRAP : ::c_int = 5;
pub const GLOB_APPEND : ::c_int = 0x0001;
pub const GLOB_DOOFFS : ::c_int = 0x0002;
pub const GLOB_ERR : ::c_int = 0x0004;
pub const GLOB_MARK : ::c_int = 0x0008;
pub const GLOB_NOCHECK : ::c_int = 0x0010;
pub const GLOB_NOSORT : ::c_int = 0x0020;
pub const GLOB_NOESCAPE : ::c_int = 0x1000;
pub const GLOB_NOSPACE : ::c_int = -1;
pub const GLOB_ABORTED : ::c_int = -2;
pub const GLOB_NOMATCH : ::c_int = -3;
pub const GLOB_NOSYS : ::c_int = -4;
pub const POSIX_MADV_NORMAL : ::c_int = 0;
pub const POSIX_MADV_RANDOM : ::c_int = 1;
pub const POSIX_MADV_SEQUENTIAL : ::c_int = 2;
pub const POSIX_MADV_WILLNEED : ::c_int = 3;
pub const POSIX_MADV_DONTNEED : ::c_int = 4;
pub const PTHREAD_CREATE_JOINABLE : ::c_int = 0;
pub const PTHREAD_CREATE_DETACHED : ::c_int = 1;
pub const PT_TRACE_ME: ::c_int = 0;
pub const PT_READ_I: ::c_int = 1;
pub const PT_READ_D: ::c_int = 2;
pub const PT_WRITE_I: ::c_int = 4;
pub const PT_WRITE_D: ::c_int = 5;
pub const PT_CONTINUE: ::c_int = 7;
pub const PT_KILL: ::c_int = 8;
pub const PT_ATTACH: ::c_int = 9;
pub const PT_DETACH: ::c_int = 10;
pub const PT_IO: ::c_int = 11;
// http://man.openbsd.org/OpenBSD-current/man2/clock_getres.2
// The man page says clock_gettime(3) can accept various values as clockid_t but
// http://fxr.watson.org/fxr/source/kern/kern_time.c?v=OPENBSD;im=excerpts#L161
// the implementation rejects anything other than the below two
//
// http://netbsd.gw.com/cgi-bin/man-cgi?clock_gettime
// https://github.com/jsonn/src/blob/HEAD/sys/kern/subr_time.c#L222
// Basically the same goes for NetBSD
pub const CLOCK_REALTIME: ::clockid_t = 0;
pub const CLOCK_MONOTONIC: ::clockid_t = 3;
pub const RLIMIT_CPU: ::c_int = 0;
pub const RLIMIT_FSIZE: ::c_int = 1;
pub const RLIMIT_DATA: ::c_int = 2;
pub const RLIMIT_STACK: ::c_int = 3;
pub const RLIMIT_CORE: ::c_int = 4;
pub const RLIMIT_RSS: ::c_int = 5;
pub const RLIMIT_MEMLOCK: ::c_int = 6;
pub const RLIMIT_NPROC: ::c_int = 7;
pub const RLIMIT_NOFILE: ::c_int = 8;
pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff;
pub const RLIM_SAVED_MAX: rlim_t = RLIM_INFINITY;
pub const RLIM_SAVED_CUR: rlim_t = RLIM_INFINITY;
pub const RUSAGE_SELF: ::c_int = 0;
pub const RUSAGE_CHILDREN: ::c_int = -1;
pub const MADV_NORMAL : ::c_int = 0;
pub const MADV_RANDOM : ::c_int = 1;
pub const MADV_SEQUENTIAL : ::c_int = 2;
pub const MADV_WILLNEED : ::c_int = 3;
pub const MADV_DONTNEED : ::c_int = 4;
pub const MADV_FREE : ::c_int = 6;
pub const AF_UNSPEC: ::c_int = 0;
pub const AF_LOCAL: ::c_int = 1;
pub const AF_UNIX: ::c_int = AF_LOCAL;
pub const AF_INET: ::c_int = 2;
pub const AF_IMPLINK: ::c_int = 3;
pub const AF_PUP: ::c_int = 4;
pub const AF_CHAOS: ::c_int = 5;
pub const AF_NS: ::c_int = 6;
pub const AF_ISO: ::c_int = 7;
pub const AF_OSI: ::c_int = AF_ISO;
pub const AF_DATAKIT: ::c_int = 9;
pub const AF_CCITT: ::c_int = 10;
pub const AF_SNA: ::c_int = 11;
pub const AF_DECnet: ::c_int = 12;
pub const AF_DLI: ::c_int = 13;
pub const AF_LAT: ::c_int = 14;
pub const AF_HYLINK: ::c_int = 15;
pub const AF_APPLETALK: ::c_int = 16;
pub const AF_LINK: ::c_int = 18;
pub const pseudo_AF_XTP: ::c_int = 19;
pub const AF_COIP: ::c_int = 20;
pub const AF_CNT: ::c_int = 21;
pub const pseudo_AF_RTIP: ::c_int = 22;
pub const AF_IPX: ::c_int = 23;
pub const AF_INET6: ::c_int = 24;
pub const pseudo_AF_PIP: ::c_int = 25;
pub const AF_ISDN: ::c_int = 26;
pub const AF_E164: ::c_int = AF_ISDN;
pub const AF_NATM: ::c_int = 27;
pub const PF_UNSPEC: ::c_int = AF_UNSPEC;
pub const PF_LOCAL: ::c_int = AF_LOCAL;
pub const PF_UNIX: ::c_int = PF_LOCAL;
pub const PF_INET: ::c_int = AF_INET;
pub const PF_IMPLINK: ::c_int = AF_IMPLINK;
pub const PF_PUP: ::c_int = AF_PUP;
pub const PF_CHAOS: ::c_int = AF_CHAOS;
pub const PF_NS: ::c_int = AF_NS;
pub const PF_ISO: ::c_int = AF_ISO;
pub const PF_OSI: ::c_int = AF_ISO;
pub const PF_DATAKIT: ::c_int = AF_DATAKIT;
pub const PF_CCITT: ::c_int = AF_CCITT;
pub const PF_SNA: ::c_int = AF_SNA;
pub const PF_DECnet: ::c_int = AF_DECnet;
pub const PF_DLI: ::c_int = AF_DLI;
pub const PF_LAT: ::c_int = AF_LAT;
pub const PF_HYLINK: ::c_int = AF_HYLINK;
pub const PF_APPLETALK: ::c_int = AF_APPLETALK;
pub const PF_LINK: ::c_int = AF_LINK;
pub const PF_XTP: ::c_int = pseudo_AF_XTP;
pub const PF_COIP: ::c_int = AF_COIP;
pub const PF_CNT: ::c_int = AF_CNT;
pub const PF_IPX: ::c_int = AF_IPX;
pub const PF_INET6: ::c_int = AF_INET6;
pub const PF_RTIP: ::c_int = pseudo_AF_RTIP;
pub const PF_PIP: ::c_int = pseudo_AF_PIP;
pub const PF_ISDN: ::c_int = AF_ISDN;
pub const PF_NATM: ::c_int = AF_NATM;
pub const SOCK_STREAM: ::c_int = 1;
pub const SOCK_DGRAM: ::c_int = 2;
pub const SOCK_RAW: ::c_int = 3;
pub const SOCK_RDM: ::c_int = 4;
pub const SOCK_SEQPACKET: ::c_int = 5;
pub const IP_TTL: ::c_int = 4;
pub const IP_HDRINCL: ::c_int = 2;
pub const IP_ADD_MEMBERSHIP: ::c_int = 12;
pub const IP_DROP_MEMBERSHIP: ::c_int = 13;
pub const IPV6_RECVPKTINFO: ::c_int = 36;
pub const IPV6_PKTINFO: ::c_int = 46;
pub const IPV6_RECVTCLASS: ::c_int = 57;
pub const IPV6_TCLASS: ::c_int = 61;
pub const SOL_SOCKET: ::c_int = 0xffff;
pub const SO_DEBUG: ::c_int = 0x01;
pub const SO_ACCEPTCONN: ::c_int = 0x0002;
pub const SO_REUSEADDR: ::c_int = 0x0004;
pub const SO_KEEPALIVE: ::c_int = 0x0008;
pub const SO_DONTROUTE: ::c_int = 0x0010;
pub const SO_BROADCAST: ::c_int = 0x0020;
pub const SO_USELOOPBACK: ::c_int = 0x0040;
pub const SO_LINGER: ::c_int = 0x0080;
pub const SO_OOBINLINE: ::c_int = 0x0100;
pub const SO_REUSEPORT: ::c_int = 0x0200;
pub const SO_SNDBUF: ::c_int = 0x1001;
pub const SO_RCVBUF: ::c_int = 0x1002;
pub const SO_SNDLOWAT: ::c_int = 0x1003;
pub const SO_RCVLOWAT: ::c_int = 0x1004;
pub const SO_ERROR: ::c_int = 0x1007;
pub const SO_TYPE: ::c_int = 0x1008;
pub const SOMAXCONN: ::c_int = 128;
pub const MSG_OOB: ::c_int = 0x1;
pub const MSG_PEEK: ::c_int = 0x2;
pub const MSG_DONTROUTE: ::c_int = 0x4;
pub const MSG_EOR: ::c_int = 0x8;
pub const MSG_TRUNC: ::c_int = 0x10;
pub const MSG_CTRUNC: ::c_int = 0x20;
pub const MSG_WAITALL: ::c_int = 0x40;
pub const MSG_DONTWAIT: ::c_int = 0x80;
pub const MSG_BCAST: ::c_int = 0x100;
pub const MSG_MCAST: ::c_int = 0x200;
pub const MSG_NOSIGNAL: ::c_int = 0x400;
pub const MSG_CMSG_CLOEXEC: ::c_int = 0x800;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const LOCK_SH: ::c_int = 1;
pub const LOCK_EX: ::c_int = 2;
pub const LOCK_NB: ::c_int = 4;
pub const LOCK_UN: ::c_int = 8;
pub const IPPROTO_RAW : ::c_int = 255;
pub const _SC_ARG_MAX : ::c_int = 1;
pub const _SC_CHILD_MAX : ::c_int = 2;
pub const _SC_NGROUPS_MAX : ::c_int = 4;
pub const _SC_OPEN_MAX : ::c_int = 5;
pub const _SC_JOB_CONTROL : ::c_int = 6;
pub const _SC_SAVED_IDS : ::c_int = 7;
pub const _SC_VERSION : ::c_int = 8;
pub const _SC_BC_BASE_MAX : ::c_int = 9;
pub const _SC_BC_DIM_MAX : ::c_int = 10;
pub const _SC_BC_SCALE_MAX : ::c_int = 11;
pub const _SC_BC_STRING_MAX : ::c_int = 12;
pub const _SC_COLL_WEIGHTS_MAX : ::c_int = 13;
pub const _SC_EXPR_NEST_MAX : ::c_int = 14;
pub const _SC_LINE_MAX : ::c_int = 15;
pub const _SC_RE_DUP_MAX : ::c_int = 16;
pub const _SC_2_VERSION : ::c_int = 17;
pub const _SC_2_C_BIND : ::c_int = 18;
pub const _SC_2_C_DEV : ::c_int = 19;
pub const _SC_2_CHAR_TERM : ::c_int = 20;
pub const _SC_2_FORT_DEV : ::c_int = 21;
pub const _SC_2_FORT_RUN : ::c_int = 22;
pub const _SC_2_LOCALEDEF : ::c_int = 23;
pub const _SC_2_SW_DEV : ::c_int = 24;
pub const _SC_2_UPE : ::c_int = 25;
pub const _SC_STREAM_MAX : ::c_int = 26;
pub const _SC_TZNAME_MAX : ::c_int = 27;
pub const _SC_PAGESIZE : ::c_int = 28;
pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE;
pub const _SC_FSYNC : ::c_int = 29;
pub const _SC_XOPEN_SHM : ::c_int = 30;
pub const Q_GETQUOTA: ::c_int = 0x300;
pub const Q_SETQUOTA: ::c_int = 0x400;
pub const RTLD_GLOBAL: ::c_int = 0x100;
pub const LOG_NFACILITIES: ::c_int = 24;
pub const HW_NCPU: ::c_int = 3;
pub const B0: speed_t = 0;
pub const B50: speed_t = 50;
pub const B75: speed_t = 75;
pub const B110: speed_t = 110;
pub const B134: speed_t = 134;
pub const B150: speed_t = 150;
pub const B200: speed_t = 200;
pub const B300: speed_t = 300;
pub const B600: speed_t = 600;
pub const B1200: speed_t = 1200;
pub const B1800: speed_t = 1800;
pub const B2400: speed_t = 2400;
pub const B4800: speed_t = 4800;
pub const B9600: speed_t = 9600;
pub const B19200: speed_t = 19200;
pub const B38400: speed_t = 38400;
pub const B7200: speed_t = 7200;
pub const B14400: speed_t = 14400;
pub const B28800: speed_t = 28800;
pub const B57600: speed_t = 57600;
pub const B76800: speed_t = 76800;
pub const B115200: speed_t = 115200;
pub const B230400: speed_t = 230400;
pub const EXTA: speed_t = 19200;
pub const EXTB: speed_t = 38400;
pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t;
pub const CRTSCTS: ::tcflag_t = 0x00010000;
pub const CRTS_IFLOW: ::tcflag_t = CRTSCTS;
pub const CCTS_OFLOW: ::tcflag_t = CRTSCTS;
pub const OCRNL: ::tcflag_t = 0x10;
pub const TIOCEXCL: ::c_ulong = 0x2000740d;
pub const TIOCNXCL: ::c_ulong = 0x2000740e;
pub const TIOCFLUSH: ::c_ulong = 0x80047410;
pub const TIOCGETA: ::c_ulong = 0x402c7413;
pub const TIOCSETA: ::c_ulong = 0x802c7414;
pub const TIOCSETAW: ::c_ulong = 0x802c7415;
pub const TIOCSETAF: ::c_ulong = 0x802c7416;
pub const TIOCGETD: ::c_ulong = 0x4004741a;
pub const TIOCSETD: ::c_ulong = 0x8004741b;
pub const TIOCMGET: ::c_ulong = 0x4004746a;
pub const TIOCMBIC: ::c_ulong = 0x8004746b;
pub const TIOCMBIS: ::c_ulong = 0x8004746c;
pub const TIOCMSET: ::c_ulong = 0x8004746d;
pub const TIOCSTART: ::c_ulong = 0x2000746e;
pub const TIOCSTOP: ::c_ulong = 0x2000746f;
pub const TIOCSCTTY: ::c_ulong = 0x20007461;
pub const TIOCGWINSZ: ::c_ulong = 0x40087468;
pub const TIOCSWINSZ: ::c_ulong = 0x80087467;
pub const TIOCM_LE: ::c_int = 0o0001;
pub const TIOCM_DTR: ::c_int = 0o0002;
pub const TIOCM_RTS: ::c_int = 0o0004;
pub const TIOCM_ST: ::c_int = 0o0010;
pub const TIOCM_SR: ::c_int = 0o0020;
pub const TIOCM_CTS: ::c_int = 0o0040;
pub const TIOCM_CAR: ::c_int = 0o0100;
pub const TIOCM_RNG: ::c_int = 0o0200;
pub const TIOCM_DSR: ::c_int = 0o0400;
pub const TIOCM_CD: ::c_int = TIOCM_CAR;
pub const TIOCM_RI: ::c_int = TIOCM_RNG;
// Flags for chflags(2)
pub const UF_SETTABLE: ::c_ulong = 0x0000ffff;
pub const UF_NODUMP: ::c_ulong = 0x00000001;
pub const UF_IMMUTABLE: ::c_ulong = 0x00000002;
pub const UF_APPEND: ::c_ulong = 0x00000004;
pub const UF_OPAQUE: ::c_ulong = 0x00000008;
pub const SF_SETTABLE: ::c_ulong = 0xffff0000;
pub const SF_ARCHIVED: ::c_ulong = 0x00010000;
pub const SF_IMMUTABLE: ::c_ulong = 0x00020000;
pub const SF_APPEND: ::c_ulong = 0x00040000;
pub const TIMER_ABSTIME: ::c_int = 1;
#[link(name = "util")]
extern {
pub fn sem_destroy(sem: *mut sem_t) -> ::c_int;
pub fn sem_init(sem: *mut sem_t,
pshared: ::c_int,
value: ::c_uint)
-> ::c_int;
pub fn daemon(nochdir: ::c_int, noclose: ::c_int) -> ::c_int;
pub fn mincore(addr: *mut ::c_void, len: ::size_t,
vec: *mut ::c_char) -> ::c_int;
#[cfg_attr(target_os = "netbsd", link_name = "__clock_getres50")]
pub fn clock_getres(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int;
#[cfg_attr(target_os = "netbsd", link_name = "__clock_gettime50")]
pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int;
#[cfg_attr(target_os = "netbsd", link_name = "__clock_settime50")]
pub fn clock_settime(clk_id: ::clockid_t, tp: *const ::timespec) -> ::c_int;
pub fn __errno() -> *mut ::c_int;
pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t)
-> ::c_int;
pub fn memrchr(cx: *const ::c_void,
c: ::c_int,
n: ::size_t) -> *mut ::c_void;
pub fn mkostemp(template: *mut ::c_char, flags: ::c_int) -> ::c_int;
pub fn mkostemps(template: *mut ::c_char,
suffixlen: ::c_int,
flags: ::c_int) -> ::c_int;
pub fn pwritev(fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
offset: ::off_t) -> ::ssize_t;
pub fn preadv(fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
offset: ::off_t) -> ::ssize_t;
pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
pub fn utimensat(dirfd: ::c_int, path: *const ::c_char,
times: *const ::timespec, flag: ::c_int) -> ::c_int;
pub fn fdatasync(fd: ::c_int) -> ::c_int;
pub fn openpty(amaster: *mut ::c_int,
aslave: *mut ::c_int,
name: *mut ::c_char,
termp: *mut termios,
winp: *mut ::winsize) -> ::c_int;
pub fn forkpty(amaster: *mut ::c_int,
name: *mut ::c_char,
termp: *mut termios,
winp: *mut ::winsize) -> ::pid_t;
pub fn login_tty(fd: ::c_int) -> ::c_int;
pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int;
pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int;
pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char,
mode: ::mode_t, dev: dev_t) -> ::c_int;
pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char,
mode: ::mode_t) -> ::c_int;
pub fn sem_timedwait(sem: *mut sem_t,
abstime: *const ::timespec) -> ::c_int;
pub fn sem_getvalue(sem: *mut sem_t,
sval: *mut ::c_int) -> ::c_int;
pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t,
clock_id: ::clockid_t) -> ::c_int;
pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int;
pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t,
abstime: *const ::timespec) -> ::c_int;
pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int;
pub fn getgrouplist(name: *const ::c_char,
basegid: ::gid_t,
groups: *mut ::gid_t,
ngroups: *mut ::c_int) -> ::c_int;
pub fn initgroups(name: *const ::c_char, basegid: ::gid_t) -> ::c_int;
pub fn getdomainname(name: *mut ::c_char, len: ::size_t) -> ::c_int;
pub fn setdomainname(name: *const ::c_char, len: ::size_t) -> ::c_int;
pub fn uname(buf: *mut ::utsname) -> ::c_int;
}
cfg_if! {
if #[cfg(target_os = "netbsd")] {
mod netbsd;
pub use self::netbsd::*;
} else if #[cfg(target_os = "openbsd")] {
mod openbsdlike;
pub use self::openbsdlike::*;
} else {
// Unknown target_os
}
}
| 34.705539 | 80 | 0.654948 |
fb7260ba9c9d9fcfb3e1371fd09cf86c6f3548ca
| 5,983 |
use std::fs::copy;
use std::path::PathBuf;
use std::process::Command;
use tempfile::{tempdir, TempDir};
use crate::*;
fn compile_kernel_module() -> (PathBuf, String, TempDir) {
let _m = crate::FORK_MTX
.lock()
.expect("Mutex got poisoned by another test");
let tmp_dir = tempdir().expect("unable to create temporary build directory");
copy(
"test/test_kmod/hello_mod/hello.c",
&tmp_dir.path().join("hello.c"),
).expect("unable to copy hello.c to temporary build directory");
copy(
"test/test_kmod/hello_mod/Makefile",
&tmp_dir.path().join("Makefile"),
).expect("unable to copy Makefile to temporary build directory");
let status = Command::new("make")
.current_dir(tmp_dir.path())
.status()
.expect("failed to run make");
assert!(status.success());
// Return the relative path of the build kernel module
(tmp_dir.path().join("hello.ko"), "hello".to_owned(), tmp_dir)
}
use nix::errno::Errno;
use nix::kmod::{delete_module, DeleteModuleFlags};
use nix::kmod::{finit_module, init_module, ModuleInitFlags};
use nix::Error;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
#[test]
fn test_finit_and_delete_module() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let (kmod_path, kmod_name, _kmod_dir) = compile_kernel_module();
let f = File::open(kmod_path).expect("unable to open kernel module");
finit_module(&f, &CString::new("").unwrap(), ModuleInitFlags::empty())
.expect("unable to load kernel module");
delete_module(
&CString::new(kmod_name).unwrap(),
DeleteModuleFlags::empty(),
).expect("unable to unload kernel module");
}
#[test]
fn test_finit_and_delete_modul_with_params() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let (kmod_path, kmod_name, _kmod_dir) = compile_kernel_module();
let f = File::open(kmod_path).expect("unable to open kernel module");
finit_module(
&f,
&CString::new("who=Rust number=2018").unwrap(),
ModuleInitFlags::empty(),
).expect("unable to load kernel module");
delete_module(
&CString::new(kmod_name).unwrap(),
DeleteModuleFlags::empty(),
).expect("unable to unload kernel module");
}
#[test]
fn test_init_and_delete_module() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let (kmod_path, kmod_name, _kmod_dir) = compile_kernel_module();
let mut f = File::open(kmod_path).expect("unable to open kernel module");
let mut contents: Vec<u8> = Vec::new();
f.read_to_end(&mut contents)
.expect("unable to read kernel module content to buffer");
init_module(&mut contents, &CString::new("").unwrap()).expect("unable to load kernel module");
delete_module(
&CString::new(kmod_name).unwrap(),
DeleteModuleFlags::empty(),
).expect("unable to unload kernel module");
}
#[test]
fn test_init_and_delete_module_with_params() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let (kmod_path, kmod_name, _kmod_dir) = compile_kernel_module();
let mut f = File::open(kmod_path).expect("unable to open kernel module");
let mut contents: Vec<u8> = Vec::new();
f.read_to_end(&mut contents)
.expect("unable to read kernel module content to buffer");
init_module(&mut contents, &CString::new("who=Nix number=2015").unwrap())
.expect("unable to load kernel module");
delete_module(
&CString::new(kmod_name).unwrap(),
DeleteModuleFlags::empty(),
).expect("unable to unload kernel module");
}
#[test]
fn test_finit_module_invalid() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let kmod_path = "/dev/zero";
let f = File::open(kmod_path).expect("unable to open kernel module");
let result = finit_module(&f, &CString::new("").unwrap(), ModuleInitFlags::empty());
assert_eq!(result.unwrap_err(), Error::Sys(Errno::EINVAL));
}
#[test]
fn test_finit_module_twice_and_delete_module() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let (kmod_path, kmod_name, _kmod_dir) = compile_kernel_module();
let f = File::open(kmod_path).expect("unable to open kernel module");
finit_module(&f, &CString::new("").unwrap(), ModuleInitFlags::empty())
.expect("unable to load kernel module");
let result = finit_module(&f, &CString::new("").unwrap(), ModuleInitFlags::empty());
assert_eq!(result.unwrap_err(), Error::Sys(Errno::EEXIST));
delete_module(
&CString::new(kmod_name).unwrap(),
DeleteModuleFlags::empty(),
).expect("unable to unload kernel module");
}
#[test]
fn test_delete_module_not_loaded() {
require_capability!(CAP_SYS_MODULE);
let _m0 = crate::KMOD_MTX.lock().expect("Mutex got poisoned by another test");
let _m1 = crate::CWD_LOCK.read().expect("Mutex got poisoned by another test");
let result = delete_module(&CString::new("hello").unwrap(), DeleteModuleFlags::empty());
assert_eq!(result.unwrap_err(), Error::Sys(Errno::ENOENT));
}
| 35.613095 | 98 | 0.675247 |
e6c1ca82bbc25f1049dce292f25e20ecf60123e6
| 214 |
//! Project page.
use crate::error::TelescopeError;
use actix_web::HttpResponse;
#[get("/projects")]
pub async fn get() -> Result<HttpResponse, TelescopeError> {
return Err(TelescopeError::NotImplemented);
}
| 21.4 | 60 | 0.724299 |
28b1c59cae92d7ca1cb6ec0be5df57016f9e1796
| 7,822 |
#![warn(clippy::all)]
use futures::StreamExt;
use log::{debug, error, info};
use serde::Deserialize;
use std::collections::HashMap as Map;
use std::error::Error;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use structopt::StructOpt;
use tokio::sync::Mutex;
use warp::{ws::WebSocket, Filter};
#[derive(Debug, Deserialize)]
enum Gender {
Male,
Female,
TransMale,
TransFemale,
}
#[derive(Debug, Deserialize)]
enum Age {
Age23,
Age24,
Age25,
Age26,
Age27,
Age28,
Age29,
Age30,
Age31,
Age32,
Age33,
Age34,
Age35,
Age36,
Age37,
Age38,
Age39,
Age40,
Age41,
Age42,
Age43,
Age44,
Age45,
Age46,
Age47,
Age48,
Age49,
Age50,
}
impl From<Age> for u8 {
fn from(a: Age) -> Self {
match a {
Age::Age23 => 23,
Age::Age24 => 24,
Age::Age25 => 25,
Age::Age26 => 26,
Age::Age27 => 27,
Age::Age28 => 28,
Age::Age29 => 29,
Age::Age30 => 30,
Age::Age31 => 31,
Age::Age32 => 32,
Age::Age33 => 33,
Age::Age34 => 34,
Age::Age35 => 35,
Age::Age36 => 36,
Age::Age37 => 37,
Age::Age38 => 38,
Age::Age39 => 39,
Age::Age40 => 40,
Age::Age41 => 41,
Age::Age42 => 42,
Age::Age43 => 43,
Age::Age44 => 44,
Age::Age45 => 45,
Age::Age46 => 46,
Age::Age47 => 47,
Age::Age48 => 48,
Age::Age49 => 49,
Age::Age50 => 50,
}
}
}
#[derive(Debug, Deserialize)]
struct Query {
age: Age,
gender: Gender,
search_genders: Vec<Gender>,
search_age: Age,
search_sexual: bool,
}
struct Info {
query: Query,
reports: u8,
reported: Vec<IpAddr>,
}
enum Status {
NotMatched { info: Info },
Matched { info: Info, connected_to: IpAddr },
}
type Users = Arc<Mutex<Map<IpAddr, Status>>>;
#[derive(Debug, StructOpt)]
struct TvDotCom {
#[structopt(short, long, parse(from_occurrences))]
verbose: u8,
}
#[tokio::main]
pub async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
let opt = TvDotCom::from_args();
let log_level = match opt.verbose {
0 => log::LevelFilter::Warn,
1 => log::LevelFilter::Info,
2 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
};
env_logger::Builder::new()
.filter_level(log_level)
.try_init()?;
let index_page = {
let css_normalize = include_str!("../frontend/normalize.min.css");
let css_skeleton = include_str!("../frontend/skeleton.min.css");
let js = include_str!("../frontend/codez.min.js");
Arc::new(
include_str!("../frontend/index.min.html")
.replace(
"$$$style$$$",
&format!("<style>{} {}</style>", css_normalize, css_skeleton),
)
.replace(
"$$$script$$$",
&format!(r#"<script type="text/javascript">{}</script>"#, js),
),
)
};
let users = Arc::new(Mutex::new(Map::new()));
let index = warp::path::end()
.and(warp::any().map(move || index_page.clone()))
.map(|page: Arc<String>| Box::new(warp::reply::html(page.to_string())));
let ws = warp::path("ws")
.and(warp::ws())
.and(warp::filters::addr::remote())
.and(warp::any().map(move || users.clone()))
.and_then(handle_ws);
warp::serve(index.or(ws)).run(([0, 0, 0, 0], 8000)).await;
Ok(())
}
async fn handle_ws(
ws: warp::ws::Ws,
remote: Option<SocketAddr>,
users: Users,
) -> Result<Box<dyn warp::Reply>, warp::Rejection> {
if let Some(remote) = remote {
info!("Got a websocket connection from {}", remote);
Ok(Box::new(ws.on_upgrade(move |s: WebSocket| {
user_conn(s, remote, users)
})))
} else {
error!("Got a websocket connection without a remote");
Ok(Box::new(warp::http::StatusCode::FORBIDDEN))
}
}
async fn user_conn(ws: WebSocket, remote: SocketAddr, users: Users) {
info!("Got a connection from {}", remote);
let (_user_ws_tx, mut user_ws_rx) = ws.split();
let query = match tokio::time::timeout(Duration::from_secs(10), user_ws_rx.next()).await {
Ok(Some(query)) => query,
Ok(None) => return,
Err(e) => {
error!("Waiting for query from {} timed out: {}", remote, e);
return;
}
};
let query = match query {
Ok(query) => query,
Err(e) => {
error!("Error reading query: {}", e);
return;
}
};
if !query.is_text() {
error!("Query message is not text");
return;
}
let query = match query.to_str() {
Ok(query) => query,
Err(_) => {
error!("Cannot convert query message to string ({:?})", query);
return;
}
};
let query: Query = match serde_json::from_str(&query) {
Ok(query) => query,
Err(e) => {
error!("Invalid query ({:?}): {}", query, e);
return;
}
};
let ip = remote.ip();
info!("Query from {} = {:?}", ip, query);
{
let mut users = users.lock().await;
if let Some(_) = users.get(&ip) {
error!("User {} already in DB", remote);
return;
} else {
users.insert(
ip,
Status::NotMatched {
info: Info {
query,
reports: 0,
reported: Vec::new(),
},
},
);
}
}
while let Some(msg) = user_ws_rx.next().await {
match msg {
Ok(msg) => {
if msg.is_close() {
info!("User {} closed connection", ip);
disconnect_user(ip, users.clone()).await;
return;
} else {
error!("Not accepting data from user {} at this point", ip);
disconnect_user(ip, users.clone()).await;
return;
}
}
Err(e) => {
error!("User {} error: {}", ip, e);
disconnect_user(ip, users).await;
return;
}
}
}
// while let Some(result) = user_ws_rx.next().await {
// let msg = match result {
// Ok(msg) => msg,
// Err(e) => {
// error!("Websocket error: {}", e);
// break;
// }
// };
// if !msg.is_text() {
// error!("Message is not text, invalid");
// break;
// }
// let msg = match msg.to_str() {
// Ok(msg) => msg,
// Err(_) => {
// error!("Cannot convert message to string ({:?})", msg);
// break;
// }
// };
// let query: Query = match serde_json::from_str(&msg) {
// Ok(query) => query,
// Err(e) => {
// error!("Invalid query ({:?}): {}", msg, e);
// break;
// }
// };
// info!("Query from {} = {:?}", remote.ip(), query);
// }
}
async fn disconnect_user(ip: IpAddr, users: Users) {
let mut users = users.lock().await;
if let None = users.remove(&ip) {
error!("Bad state, user {} was not in the DB", ip);
return;
}
debug!("DB now has {} users", users.len());
}
| 25.478827 | 94 | 0.461391 |
28381bb4679d09debb77bcaaa6c9acc5d82614d2
| 1,282 |
use anyhow::Result;
use serde::Deserialize;
use std::collections::HashMap;
use super::github_fetch::github_fetch;
#[derive(Deserialize, Debug)]
struct ResponseBody {
author: ResponseBodyAuthor,
}
#[derive(Deserialize, Debug)]
struct ResponseBodyAuthor {
login: String,
}
#[derive(Debug, Default)]
pub struct GitHubUsersFetcher {
cache: HashMap<String, Option<String>>,
}
impl GitHubUsersFetcher {
pub fn fetch_user_by_commit_author(
&mut self,
key: impl Into<String>,
commit: impl AsRef<str>,
token: Option<String>,
) -> Option<&str> {
self.cache
.entry(key.into())
.or_insert_with(|| match Self::inner_fetch(commit, token) {
Ok(value) => value,
Err(err) => {
eprintln!("fetch_user_by_commit_author Error: {}", err);
None
}
})
.as_deref()
}
fn inner_fetch(commit: impl AsRef<str>, token: Option<String>) -> Result<Option<String>> {
let url = format!(
"https://api.github.com/repos/yewstack/yew/commits/{}",
commit.as_ref(),
);
let body: ResponseBody = github_fetch(&url, token)?;
Ok(Some(body.author.login))
}
}
| 25.64 | 94 | 0.574883 |
1a41a028ef10c914888fc00275580389a1d79c9f
| 1,414 |
#[doc = "Reader of register DMAC_CH0_EXTADDR"]
pub type R = crate::R<u32, super::DMAC_CH0_EXTADDR>;
#[doc = "Writer for register DMAC_CH0_EXTADDR"]
pub type W = crate::W<u32, super::DMAC_CH0_EXTADDR>;
#[doc = "Register DMAC_CH0_EXTADDR `reset()`'s with value 0"]
impl crate::ResetValue for super::DMAC_CH0_EXTADDR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `ADDR`"]
pub type ADDR_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `ADDR`"]
pub struct ADDR_W<'a> {
w: &'a mut W,
}
impl<'a> ADDR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31 - 31:0\\] Channel external address value When read during operation, it holds the last updated external address after being sent to the master interface."]
#[inline(always)]
pub fn addr(&self) -> ADDR_R {
ADDR_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31 - 31:0\\] Channel external address value When read during operation, it holds the last updated external address after being sent to the master interface."]
#[inline(always)]
pub fn addr(&mut self) -> ADDR_W {
ADDR_W { w: self }
}
}
| 34.487805 | 178 | 0.631542 |
cc0e85d85f54d501c4c41e62d88e3444eb399a0c
| 2,865 |
use rand::{Rand, SeedableRng, XorShiftRng};
use std::ops::{AddAssign, MulAssign, SubAssign};
use algebra::fields::{bls12_377::fq12::Fq12, Field};
#[bench]
fn bench_fq12_add_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq12, Fq12)> = (0..SAMPLES)
.map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.add_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_sub_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq12, Fq12)> = (0..SAMPLES)
.map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.sub_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_mul_assign(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<(Fq12, Fq12)> = (0..SAMPLES)
.map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng)))
.collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count].0;
tmp.mul_assign(&v[count].1);
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_double(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq12> = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.double_in_place();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_square(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq12> = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let mut tmp = v[count];
tmp.square_in_place();
count = (count + 1) % SAMPLES;
tmp
});
}
#[bench]
fn bench_fq12_inverse(b: &mut ::test::Bencher) {
const SAMPLES: usize = 1000;
let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let v: Vec<Fq12> = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect();
let mut count = 0;
b.iter(|| {
let tmp = v[count].inverse();
count = (count + 1) % SAMPLES;
tmp
});
}
| 25.580357 | 91 | 0.569983 |
21e783aff5dbd0353257103bf4002556925c9aa8
| 10,992 |
use amethyst::{
core::ecs::{DispatcherBuilder, World},
error::Error,
prelude::*,
renderer::{
bundle::{RenderOrder, RenderPlan, RenderPlugin, Target},
pipeline::{PipelineDescBuilder, PipelinesBuilder},
rendy::{
command::{QueueId, RenderPassEncoder},
factory::Factory,
graph::{
render::{PrepareResult, RenderGroup, RenderGroupDesc},
GraphContext, NodeBuffer, NodeImage,
},
hal::{self, device::Device, format::Format, pso, pso::ShaderStageFlags},
mesh::{AsVertex, VertexFormat},
shader::{Shader, SpirvShader},
},
submodules::{DynamicUniform, DynamicVertexBuffer},
system::GraphAuxData,
types::Backend,
util, ChangeDetection,
},
};
use derivative::Derivative;
use glsl_layout::*;
lazy_static::lazy_static! {
// These uses the precompiled shaders.
// These can be obtained using glslc.exe in the vulkan sdk.
static ref VERTEX: SpirvShader = SpirvShader::from_bytes(
include_bytes!("./assets/shaders/compiled/vertex/custom.vert.spv"),
ShaderStageFlags::VERTEX,
"main",
).unwrap();
static ref FRAGMENT: SpirvShader = SpirvShader::from_bytes(
include_bytes!("./assets/shaders/compiled/fragment/custom.frag.spv"),
ShaderStageFlags::FRAGMENT,
"main",
).unwrap();
}
/// Example code of using a custom shader
///
/// Requires "shader-compiler" flag
///
/// ''' rust
/// use std::path::PathBuf;
/// use amethyst::renderer::rendy::shader::{PathBufShaderInfo, ShaderKind, SourceLanguage};
///
/// lazy_static::lazy_static! {
/// static ref VERTEX: SpirvShader = PathBufShaderInfo::new(
/// PathBuf::from(concat!(env!("CARGO_MANIFEST_DIR"), "/examples/assets/shaders/src/vertex/custom.vert")),
/// ShaderKind::Vertex,
/// SourceLanguage::GLSL,
/// "main",
/// ).precompile().unwrap();
///
/// static ref FRAGMENT: SpirvShader = PathBufShaderInfo::new(
/// PathBuf::from(concat!(env!("CARGO_MANIFEST_DIR"), "/examples/assets/shaders/src/fragment/custom.frag")),
/// ShaderKind::Fragment,
/// SourceLanguage::GLSL,
/// "main",
/// ).precompile().unwrap();
/// }
/// '''
/// Draw triangles.
#[derive(Clone, Debug, PartialEq, Derivative)]
#[derivative(Default(bound = ""))]
pub struct DrawCustomDesc;
impl DrawCustomDesc {
/// Create instance of `DrawCustomDesc` render group
pub fn new() -> Self {
Default::default()
}
}
impl<B: Backend> RenderGroupDesc<B, GraphAuxData> for DrawCustomDesc {
fn build(
self,
_ctx: &GraphContext<B>,
factory: &mut Factory<B>,
_queue: QueueId,
_world: &GraphAuxData,
framebuffer_width: u32,
framebuffer_height: u32,
subpass: hal::pass::Subpass<'_, B>,
_buffers: Vec<NodeBuffer>,
_images: Vec<NodeImage>,
) -> Result<Box<dyn RenderGroup<B, GraphAuxData>>, pso::CreationError> {
let env = DynamicUniform::new(factory, pso::ShaderStageFlags::VERTEX)?;
let vertex = DynamicVertexBuffer::new();
let (pipeline, pipeline_layout) = build_custom_pipeline(
factory,
subpass,
framebuffer_width,
framebuffer_height,
vec![env.raw_layout()],
)?;
Ok(Box::new(DrawCustom::<B> {
pipeline,
pipeline_layout,
env,
vertex,
vertex_count: 0,
change: Default::default(),
}))
}
}
/// Draws triangles to the screen.
#[derive(Debug)]
pub struct DrawCustom<B: Backend> {
pipeline: B::GraphicsPipeline,
pipeline_layout: B::PipelineLayout,
env: DynamicUniform<B, CustomUniformArgs>,
vertex: DynamicVertexBuffer<B, CustomArgs>,
vertex_count: usize,
change: ChangeDetection,
}
impl<B: Backend> RenderGroup<B, GraphAuxData> for DrawCustom<B> {
fn prepare(
&mut self,
factory: &Factory<B>,
_queue: QueueId,
index: usize,
_subpass: hal::pass::Subpass<'_, B>,
aux: &GraphAuxData,
) -> PrepareResult {
let mut triangles = <&Triangle>::query();
// Get our scale value
let scale = aux.resources.get::<CustomUniformArgs>().unwrap();
// Write to our DynamicUniform
self.env.write(factory, index, scale.std140());
//Update vertex count and see if it has changed
let old_vertex_count = self.vertex_count;
self.vertex_count = triangles.iter(aux.world).count() * 3;
let changed = old_vertex_count != self.vertex_count;
// Create an iterator over the Triangle vertices
let vertex_data_iter = triangles
.iter(aux.world)
.flat_map(|triangle| triangle.get_args());
// Write the vector to a Vertex buffer
self.vertex.write(
factory,
index,
self.vertex_count as u64,
Some(vertex_data_iter.collect::<Box<[CustomArgs]>>()),
);
// Return with we can reuse the draw buffers using the utility struct ChangeDetection
self.change.prepare_result(index, changed)
}
fn draw_inline(
&mut self,
mut encoder: RenderPassEncoder<'_, B>,
index: usize,
_subpass: hal::pass::Subpass<'_, B>,
_aux: &GraphAuxData,
) {
// Don't worry about drawing if there are no vertices. Like before the state adds them to the screen.
if self.vertex_count == 0 {
return;
}
// Bind the pipeline to the the encoder
encoder.bind_graphics_pipeline(&self.pipeline);
// Bind the Dynamic buffer with the scale to the encoder
self.env.bind(index, &self.pipeline_layout, 0, &mut encoder);
// Bind the vertex buffer to the encoder
self.vertex.bind(index, 0, 0, &mut encoder);
// Draw the vertices
unsafe {
encoder.draw(0..self.vertex_count as u32, 0..1);
}
}
fn dispose(self: Box<Self>, factory: &mut Factory<B>, _aux: &GraphAuxData) {
unsafe {
factory.device().destroy_graphics_pipeline(self.pipeline);
factory
.device()
.destroy_pipeline_layout(self.pipeline_layout);
}
}
}
fn build_custom_pipeline<B: Backend>(
factory: &Factory<B>,
subpass: hal::pass::Subpass<'_, B>,
framebuffer_width: u32,
framebuffer_height: u32,
layouts: Vec<&B::DescriptorSetLayout>,
) -> Result<(B::GraphicsPipeline, B::PipelineLayout), pso::CreationError> {
let pipeline_layout = unsafe {
factory
.device()
.create_pipeline_layout(layouts, None as Option<(_, _)>)
}?;
// Load the shaders
let shader_vertex = unsafe { VERTEX.module(factory).unwrap() };
let shader_fragment = unsafe { FRAGMENT.module(factory).unwrap() };
// Build the pipeline
let pipes = PipelinesBuilder::new()
.with_pipeline(
PipelineDescBuilder::new()
// This Pipeline uses our custom vertex description and does not use instancing
.with_vertex_desc(&[(CustomArgs::vertex(), pso::VertexInputRate::Vertex)])
.with_input_assembler(pso::InputAssemblerDesc::new(pso::Primitive::TriangleList))
// Add the shaders
.with_shaders(util::simple_shader_set(
&shader_vertex,
Some(&shader_fragment),
))
.with_layout(&pipeline_layout)
.with_subpass(subpass)
.with_framebuffer_size(framebuffer_width, framebuffer_height)
// We are using alpha blending
.with_blend_targets(vec![pso::ColorBlendDesc {
mask: pso::ColorMask::ALL,
blend: Some(pso::BlendState::ALPHA),
}]),
)
.build(factory, None);
// Destoy the shaders once loaded
unsafe {
factory.destroy_shader_module(shader_vertex);
factory.destroy_shader_module(shader_fragment);
}
// Handle the Errors
match pipes {
Err(e) => {
unsafe {
factory.device().destroy_pipeline_layout(pipeline_layout);
}
Err(e)
}
Ok(mut pipes) => Ok((pipes.remove(0), pipeline_layout)),
}
}
/// A [RenderPlugin] for our custom plugin
#[derive(Default, Debug)]
pub struct RenderCustom {}
impl<B: Backend> RenderPlugin<B> for RenderCustom {
fn on_build<'a, 'b>(
&mut self,
_world: &mut World,
resources: &mut Resources,
_builder: &mut DispatcherBuilder,
) -> Result<(), Error> {
// Add the required components to the world ECS
resources.insert(CustomUniformArgs { scale: 1.0 });
Ok(())
}
fn on_plan(
&mut self,
plan: &mut RenderPlan<B>,
_factory: &mut Factory<B>,
_world: &World,
_resources: &Resources,
) -> Result<(), Error> {
plan.extend_target(Target::Main, |ctx| {
ctx.add(RenderOrder::Transparent, DrawCustomDesc::new().builder())?;
Ok(())
});
Ok(())
}
}
/// Vertex Arguments to pass into shader.
/// VertexData in shader:
/// layout(location = 0) out VertexData {
/// vec2 pos;
/// vec4 color;
/// } vertex;
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, AsStd140)]
#[repr(C, align(4))]
pub struct CustomArgs {
/// vec2 pos;
pub pos: vec2,
/// vec4 color;
pub color: vec4,
}
/// Required to send data into the shader.
/// These names must match the shader.
impl AsVertex for CustomArgs {
fn vertex() -> VertexFormat {
VertexFormat::new((
// vec2 pos;
(Format::Rg32Sfloat, "pos"),
// vec4 color;
(Format::Rgba32Sfloat, "color"),
))
}
}
/// CustomUniformArgs
/// A Uniform we pass into the shader containing the current scale.
/// Uniform in shader:
/// layout(std140, set = 0, binding = 0) uniform CustomUniformArgs {
/// uniform float scale;
/// };
#[derive(Clone, Copy, Debug, AsStd140)]
#[repr(C, align(4))]
pub struct CustomUniformArgs {
/// The value each vertex is scaled by.
pub scale: float,
}
/// Component for the triangles we wish to draw to the screen
#[derive(Debug, Default)]
pub struct Triangle {
// The points of the triangle
pub points: [[f32; 2]; 3],
// The colors for each point of the triangle
pub colors: [[f32; 4]; 3],
}
impl Triangle {
/// Helper function to convert triangle into 3 vertices
pub fn get_args(&self) -> Vec<CustomArgs> {
let mut vec = Vec::new();
vec.extend((0..3).map(|i| {
CustomArgs {
pos: self.points[i].into(),
color: self.colors[i].into(),
}
}));
vec
}
}
| 30.96338 | 116 | 0.588792 |
1e38efc30840c72641d12fb9544a930d944d499e
| 5,592 |
//! Functionality related to the .pcap file format
//!
//! Asynchronously parse uncompressed pcap bytes as a `futures::stream::Stream<Item=(u64, Bytes)>` of `(timestamp, packet)` tuples.
//!
use anyhow::Result;
use bytes::buf::BufMut;
use bytes::{Bytes, BytesMut};
use futures::io::{AsyncRead, AsyncReadExt};
use futures::stream::Stream;
use futures::task::Poll;
use nom::{self, IResult};
use pcap_parser::pcap::{
parse_pcap_frame, parse_pcap_frame_be, parse_pcap_header, LegacyPcapBlock,
};
use pcap_parser::PcapError;
use std::pin::Pin;
use std::task::Context;
#[pin_project::pin_project(project = PacketsProj)]
/// [AsyncRead] combinator type for parsing pcap files into a [Stream] of timestamped [Bytes] for each packet present in the file.
///
/// Wrapped types implementing [AsyncRead] are expected to yield uncompressed data in .pcap form with packet timestamps which never decrease (i.e. the file is already time-ordered). If the file is detected to be unordered or corrupt,
/// an error will be returned and TODO: define and test error return behavior for corrupt or unordered files.
pub struct Packets<R> {
ts_usec_multiplier: u16, // we will always emit nanosecond-precision values. ts_usec_multiplier will
// be 1000 for all microsecond-precision pcap files
#[pin]
reader: R,
buffer: BytesMut,
reader_exhausted: bool,
parse: LegacyParseFn,
}
type LegacyParseFn = fn(&[u8]) -> IResult<&[u8], LegacyPcapBlock, PcapError>;
impl<R> Packets<R>
where
R: AsyncRead + std::marker::Unpin,
{
/// Given an internal buffer `capacity` and an [AsyncRead] reader which yields bytes in uncompressed .pcap format, validate
/// the pcap file header and, on success, construct a [`Packets<R>`].
pub async fn new(capacity: usize, mut reader: R) -> Result<Packets<R>, PcapError> {
let mut header_bytes = [0; 24];
let mut n_header_bytes_read = 0;
let is_bigendian: bool;
let ts_usec_multiplier;
loop {
n_header_bytes_read += reader
.read(&mut header_bytes[n_header_bytes_read..])
.await
.or(Err(PcapError::ReadError))?;
// TODO: handle getting less data than a pcap header??
let (_, header) = match parse_pcap_header(&header_bytes) {
Ok((r, h)) => Ok((r, h)),
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => Err(e),
Err(_) => continue, //incomplete. TODO: bail if we continue to fail our read
}?;
ts_usec_multiplier = if header.is_nanosecond_precision() {
1
} else {
1000
};
is_bigendian = header.is_bigendian();
break;
}
let parse = if is_bigendian {
parse_pcap_frame_be
} else {
parse_pcap_frame
};
Ok(Packets {
ts_usec_multiplier,
reader,
buffer: BytesMut::with_capacity(capacity),
reader_exhausted: false,
parse,
})
}
}
impl<R: AsyncRead> Stream for Packets<R>
where
R: AsyncRead,
{
type Item = Result<(u64, Bytes), nom::Err<PcapError>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.buffer.is_empty() && self.reader_exhausted {
return Poll::Ready(None); // EOF
}
loop {
match (self.as_mut().parse)(&self.buffer) {
Ok((rem, packet)) => {
// TODO: write the nanosecond timestamp into the data??
let nanosecond_ts = packet.ts_sec as u64 * 1000000000
+ packet.ts_usec as u64 * self.ts_usec_multiplier as u64;
let packet_n_bytes = self.buffer.len() - rem.len();
return Poll::Ready(Some(Ok((
nanosecond_ts,
self.as_mut()
.project()
.buffer
.split_to(packet_n_bytes)
.freeze(),
))));
}
Err(nom::Err::Error(e)) | Err(nom::Err::Failure(e)) => {
return Poll::Ready(Some(Err(nom::Err::Error(e))))
}
Err(_) => {
// incomplete. get some more data from our underlying reader
let PacketsProj {
ts_usec_multiplier: _,
reader,
buffer,
reader_exhausted: _,
parse: _,
} = self.as_mut().project();
let to_read = unsafe {
&mut *(buffer.bytes_mut() as *mut [std::mem::MaybeUninit<u8>]
as *mut [u8])
};
if let Poll::Ready(Ok(n_bytes_read)) = reader.poll_read(cx, to_read) {
// got more data! loop around to see whether we now have a complete packet
if n_bytes_read == 0 {
return Poll::Ready(None);
}
unsafe {
self.as_mut().project().buffer.advance_mut(n_bytes_read);
}
} else {
return Poll::Pending; // our poll_read call will have scheduled our next wakeup for us
}
}
}
}
}
}
| 39.659574 | 233 | 0.532546 |
ab996b76b75d0c3b83c6cf1c500b0164ec73464c
| 307 |
//! All common type definitions through the bakerVM ecosystem
//!
//! # Example
//! ```
//! use core::typedef::*;
//! ```
pub type Byte = u8;
pub type Float = f64;
pub type Integer = i64;
pub type Address = usize;
pub type ImageData = Vec<Byte>;
pub type Color = (u8, u8, u8);
pub type Frame = Vec<Color>;
| 20.466667 | 61 | 0.638436 |
69504c5d5a0b8e3ad7c5bf83db17d6c03eb0572c
| 23,113 |
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::Accessible;
use crate::AccessibleRole;
use crate::Align;
use crate::AppChooser;
use crate::Application;
use crate::Buildable;
use crate::ConstraintTarget;
use crate::Dialog;
use crate::DialogFlags;
use crate::LayoutManager;
use crate::Native;
use crate::Overflow;
use crate::Root;
use crate::ShortcutManager;
use crate::Widget;
use crate::Window;
use glib::object::Cast;
use glib::object::IsA;
use glib::object::ObjectType as ObjectType_;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "GtkAppChooserDialog")]
pub struct AppChooserDialog(Object<ffi::GtkAppChooserDialog>) @extends Dialog, Window, Widget, @implements Accessible, Buildable, ConstraintTarget, Native, Root, ShortcutManager, AppChooser;
match fn {
type_ => || ffi::gtk_app_chooser_dialog_get_type(),
}
}
impl AppChooserDialog {
#[doc(alias = "gtk_app_chooser_dialog_new")]
pub fn new(
parent: Option<&impl IsA<Window>>,
flags: DialogFlags,
file: &impl IsA<gio::File>,
) -> AppChooserDialog {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_app_chooser_dialog_new(
parent.map(|p| p.as_ref()).to_glib_none().0,
flags.into_glib(),
file.as_ref().to_glib_none().0,
))
.unsafe_cast()
}
}
#[doc(alias = "gtk_app_chooser_dialog_new_for_content_type")]
#[doc(alias = "new_for_content_type")]
pub fn for_content_type(
parent: Option<&impl IsA<Window>>,
flags: DialogFlags,
content_type: &str,
) -> AppChooserDialog {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_app_chooser_dialog_new_for_content_type(
parent.map(|p| p.as_ref()).to_glib_none().0,
flags.into_glib(),
content_type.to_glib_none().0,
))
.unsafe_cast()
}
}
// rustdoc-stripper-ignore-next
/// Creates a new builder-pattern struct instance to construct [`AppChooserDialog`] objects.
///
/// This method returns an instance of [`AppChooserDialogBuilder`] which can be used to create [`AppChooserDialog`] objects.
pub fn builder() -> AppChooserDialogBuilder {
AppChooserDialogBuilder::default()
}
#[doc(alias = "gtk_app_chooser_dialog_get_heading")]
#[doc(alias = "get_heading")]
pub fn heading(&self) -> Option<glib::GString> {
unsafe {
from_glib_none(ffi::gtk_app_chooser_dialog_get_heading(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_app_chooser_dialog_get_widget")]
#[doc(alias = "get_widget")]
pub fn widget(&self) -> Widget {
unsafe {
from_glib_none(ffi::gtk_app_chooser_dialog_get_widget(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_app_chooser_dialog_set_heading")]
pub fn set_heading(&self, heading: &str) {
unsafe {
ffi::gtk_app_chooser_dialog_set_heading(
self.to_glib_none().0,
heading.to_glib_none().0,
);
}
}
pub fn gfile(&self) -> Option<gio::File> {
unsafe {
let mut value = glib::Value::from_type(<gio::File as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(
self.as_ptr() as *mut glib::gobject_ffi::GObject,
b"gfile\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `gfile` getter")
}
}
#[doc(alias = "heading")]
pub fn connect_heading_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_heading_trampoline<F: Fn(&AppChooserDialog) + 'static>(
this: *mut ffi::GtkAppChooserDialog,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::heading\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_heading_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl Default for AppChooserDialog {
fn default() -> Self {
glib::object::Object::new::<Self>(&[])
.expect("Can't construct AppChooserDialog object with default parameters")
}
}
#[derive(Clone, Default)]
// rustdoc-stripper-ignore-next
/// A [builder-pattern] type to construct [`AppChooserDialog`] objects.
///
/// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
pub struct AppChooserDialogBuilder {
gfile: Option<gio::File>,
heading: Option<String>,
use_header_bar: Option<i32>,
application: Option<Application>,
child: Option<Widget>,
decorated: Option<bool>,
default_height: Option<i32>,
default_widget: Option<Widget>,
default_width: Option<i32>,
deletable: Option<bool>,
destroy_with_parent: Option<bool>,
display: Option<gdk::Display>,
focus_visible: Option<bool>,
focus_widget: Option<Widget>,
fullscreened: Option<bool>,
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
handle_menubar_accel: Option<bool>,
hide_on_close: Option<bool>,
icon_name: Option<String>,
maximized: Option<bool>,
mnemonics_visible: Option<bool>,
modal: Option<bool>,
resizable: Option<bool>,
startup_id: Option<String>,
title: Option<String>,
#[cfg(any(feature = "v4_6", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_6")))]
titlebar: Option<Widget>,
transient_for: Option<Window>,
can_focus: Option<bool>,
can_target: Option<bool>,
css_classes: Option<Vec<String>>,
css_name: Option<String>,
cursor: Option<gdk::Cursor>,
focus_on_click: Option<bool>,
focusable: Option<bool>,
halign: Option<Align>,
has_tooltip: Option<bool>,
height_request: Option<i32>,
hexpand: Option<bool>,
hexpand_set: Option<bool>,
layout_manager: Option<LayoutManager>,
margin_bottom: Option<i32>,
margin_end: Option<i32>,
margin_start: Option<i32>,
margin_top: Option<i32>,
name: Option<String>,
opacity: Option<f64>,
overflow: Option<Overflow>,
receives_default: Option<bool>,
sensitive: Option<bool>,
tooltip_markup: Option<String>,
tooltip_text: Option<String>,
valign: Option<Align>,
vexpand: Option<bool>,
vexpand_set: Option<bool>,
visible: Option<bool>,
width_request: Option<i32>,
accessible_role: Option<AccessibleRole>,
content_type: Option<String>,
}
impl AppChooserDialogBuilder {
// rustdoc-stripper-ignore-next
/// Create a new [`AppChooserDialogBuilder`].
pub fn new() -> Self {
Self::default()
}
// rustdoc-stripper-ignore-next
/// Build the [`AppChooserDialog`].
pub fn build(self) -> AppChooserDialog {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref gfile) = self.gfile {
properties.push(("gfile", gfile));
}
if let Some(ref heading) = self.heading {
properties.push(("heading", heading));
}
if let Some(ref use_header_bar) = self.use_header_bar {
properties.push(("use-header-bar", use_header_bar));
}
if let Some(ref application) = self.application {
properties.push(("application", application));
}
if let Some(ref child) = self.child {
properties.push(("child", child));
}
if let Some(ref decorated) = self.decorated {
properties.push(("decorated", decorated));
}
if let Some(ref default_height) = self.default_height {
properties.push(("default-height", default_height));
}
if let Some(ref default_widget) = self.default_widget {
properties.push(("default-widget", default_widget));
}
if let Some(ref default_width) = self.default_width {
properties.push(("default-width", default_width));
}
if let Some(ref deletable) = self.deletable {
properties.push(("deletable", deletable));
}
if let Some(ref destroy_with_parent) = self.destroy_with_parent {
properties.push(("destroy-with-parent", destroy_with_parent));
}
if let Some(ref display) = self.display {
properties.push(("display", display));
}
if let Some(ref focus_visible) = self.focus_visible {
properties.push(("focus-visible", focus_visible));
}
if let Some(ref focus_widget) = self.focus_widget {
properties.push(("focus-widget", focus_widget));
}
if let Some(ref fullscreened) = self.fullscreened {
properties.push(("fullscreened", fullscreened));
}
#[cfg(any(feature = "v4_2", feature = "dox"))]
if let Some(ref handle_menubar_accel) = self.handle_menubar_accel {
properties.push(("handle-menubar-accel", handle_menubar_accel));
}
if let Some(ref hide_on_close) = self.hide_on_close {
properties.push(("hide-on-close", hide_on_close));
}
if let Some(ref icon_name) = self.icon_name {
properties.push(("icon-name", icon_name));
}
if let Some(ref maximized) = self.maximized {
properties.push(("maximized", maximized));
}
if let Some(ref mnemonics_visible) = self.mnemonics_visible {
properties.push(("mnemonics-visible", mnemonics_visible));
}
if let Some(ref modal) = self.modal {
properties.push(("modal", modal));
}
if let Some(ref resizable) = self.resizable {
properties.push(("resizable", resizable));
}
if let Some(ref startup_id) = self.startup_id {
properties.push(("startup-id", startup_id));
}
if let Some(ref title) = self.title {
properties.push(("title", title));
}
#[cfg(any(feature = "v4_6", feature = "dox"))]
if let Some(ref titlebar) = self.titlebar {
properties.push(("titlebar", titlebar));
}
if let Some(ref transient_for) = self.transient_for {
properties.push(("transient-for", transient_for));
}
if let Some(ref can_focus) = self.can_focus {
properties.push(("can-focus", can_focus));
}
if let Some(ref can_target) = self.can_target {
properties.push(("can-target", can_target));
}
if let Some(ref css_classes) = self.css_classes {
properties.push(("css-classes", css_classes));
}
if let Some(ref css_name) = self.css_name {
properties.push(("css-name", css_name));
}
if let Some(ref cursor) = self.cursor {
properties.push(("cursor", cursor));
}
if let Some(ref focus_on_click) = self.focus_on_click {
properties.push(("focus-on-click", focus_on_click));
}
if let Some(ref focusable) = self.focusable {
properties.push(("focusable", focusable));
}
if let Some(ref halign) = self.halign {
properties.push(("halign", halign));
}
if let Some(ref has_tooltip) = self.has_tooltip {
properties.push(("has-tooltip", has_tooltip));
}
if let Some(ref height_request) = self.height_request {
properties.push(("height-request", height_request));
}
if let Some(ref hexpand) = self.hexpand {
properties.push(("hexpand", hexpand));
}
if let Some(ref hexpand_set) = self.hexpand_set {
properties.push(("hexpand-set", hexpand_set));
}
if let Some(ref layout_manager) = self.layout_manager {
properties.push(("layout-manager", layout_manager));
}
if let Some(ref margin_bottom) = self.margin_bottom {
properties.push(("margin-bottom", margin_bottom));
}
if let Some(ref margin_end) = self.margin_end {
properties.push(("margin-end", margin_end));
}
if let Some(ref margin_start) = self.margin_start {
properties.push(("margin-start", margin_start));
}
if let Some(ref margin_top) = self.margin_top {
properties.push(("margin-top", margin_top));
}
if let Some(ref name) = self.name {
properties.push(("name", name));
}
if let Some(ref opacity) = self.opacity {
properties.push(("opacity", opacity));
}
if let Some(ref overflow) = self.overflow {
properties.push(("overflow", overflow));
}
if let Some(ref receives_default) = self.receives_default {
properties.push(("receives-default", receives_default));
}
if let Some(ref sensitive) = self.sensitive {
properties.push(("sensitive", sensitive));
}
if let Some(ref tooltip_markup) = self.tooltip_markup {
properties.push(("tooltip-markup", tooltip_markup));
}
if let Some(ref tooltip_text) = self.tooltip_text {
properties.push(("tooltip-text", tooltip_text));
}
if let Some(ref valign) = self.valign {
properties.push(("valign", valign));
}
if let Some(ref vexpand) = self.vexpand {
properties.push(("vexpand", vexpand));
}
if let Some(ref vexpand_set) = self.vexpand_set {
properties.push(("vexpand-set", vexpand_set));
}
if let Some(ref visible) = self.visible {
properties.push(("visible", visible));
}
if let Some(ref width_request) = self.width_request {
properties.push(("width-request", width_request));
}
if let Some(ref accessible_role) = self.accessible_role {
properties.push(("accessible-role", accessible_role));
}
if let Some(ref content_type) = self.content_type {
properties.push(("content-type", content_type));
}
glib::Object::new::<AppChooserDialog>(&properties)
.expect("Failed to create an instance of AppChooserDialog")
}
pub fn gfile(mut self, gfile: &impl IsA<gio::File>) -> Self {
self.gfile = Some(gfile.clone().upcast());
self
}
pub fn heading(mut self, heading: &str) -> Self {
self.heading = Some(heading.to_string());
self
}
pub fn use_header_bar(mut self, use_header_bar: i32) -> Self {
self.use_header_bar = Some(use_header_bar);
self
}
pub fn application(mut self, application: &impl IsA<Application>) -> Self {
self.application = Some(application.clone().upcast());
self
}
pub fn child(mut self, child: &impl IsA<Widget>) -> Self {
self.child = Some(child.clone().upcast());
self
}
pub fn decorated(mut self, decorated: bool) -> Self {
self.decorated = Some(decorated);
self
}
pub fn default_height(mut self, default_height: i32) -> Self {
self.default_height = Some(default_height);
self
}
pub fn default_widget(mut self, default_widget: &impl IsA<Widget>) -> Self {
self.default_widget = Some(default_widget.clone().upcast());
self
}
pub fn default_width(mut self, default_width: i32) -> Self {
self.default_width = Some(default_width);
self
}
pub fn deletable(mut self, deletable: bool) -> Self {
self.deletable = Some(deletable);
self
}
pub fn destroy_with_parent(mut self, destroy_with_parent: bool) -> Self {
self.destroy_with_parent = Some(destroy_with_parent);
self
}
pub fn display(mut self, display: &impl IsA<gdk::Display>) -> Self {
self.display = Some(display.clone().upcast());
self
}
pub fn focus_visible(mut self, focus_visible: bool) -> Self {
self.focus_visible = Some(focus_visible);
self
}
pub fn focus_widget(mut self, focus_widget: &impl IsA<Widget>) -> Self {
self.focus_widget = Some(focus_widget.clone().upcast());
self
}
pub fn fullscreened(mut self, fullscreened: bool) -> Self {
self.fullscreened = Some(fullscreened);
self
}
#[cfg(any(feature = "v4_2", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_2")))]
pub fn handle_menubar_accel(mut self, handle_menubar_accel: bool) -> Self {
self.handle_menubar_accel = Some(handle_menubar_accel);
self
}
pub fn hide_on_close(mut self, hide_on_close: bool) -> Self {
self.hide_on_close = Some(hide_on_close);
self
}
pub fn icon_name(mut self, icon_name: &str) -> Self {
self.icon_name = Some(icon_name.to_string());
self
}
pub fn maximized(mut self, maximized: bool) -> Self {
self.maximized = Some(maximized);
self
}
pub fn mnemonics_visible(mut self, mnemonics_visible: bool) -> Self {
self.mnemonics_visible = Some(mnemonics_visible);
self
}
pub fn modal(mut self, modal: bool) -> Self {
self.modal = Some(modal);
self
}
pub fn resizable(mut self, resizable: bool) -> Self {
self.resizable = Some(resizable);
self
}
pub fn startup_id(mut self, startup_id: &str) -> Self {
self.startup_id = Some(startup_id.to_string());
self
}
pub fn title(mut self, title: &str) -> Self {
self.title = Some(title.to_string());
self
}
#[cfg(any(feature = "v4_6", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v4_6")))]
pub fn titlebar(mut self, titlebar: &impl IsA<Widget>) -> Self {
self.titlebar = Some(titlebar.clone().upcast());
self
}
pub fn transient_for(mut self, transient_for: &impl IsA<Window>) -> Self {
self.transient_for = Some(transient_for.clone().upcast());
self
}
pub fn can_focus(mut self, can_focus: bool) -> Self {
self.can_focus = Some(can_focus);
self
}
pub fn can_target(mut self, can_target: bool) -> Self {
self.can_target = Some(can_target);
self
}
pub fn css_classes(mut self, css_classes: Vec<String>) -> Self {
self.css_classes = Some(css_classes);
self
}
pub fn css_name(mut self, css_name: &str) -> Self {
self.css_name = Some(css_name.to_string());
self
}
pub fn cursor(mut self, cursor: &gdk::Cursor) -> Self {
self.cursor = Some(cursor.clone());
self
}
pub fn focus_on_click(mut self, focus_on_click: bool) -> Self {
self.focus_on_click = Some(focus_on_click);
self
}
pub fn focusable(mut self, focusable: bool) -> Self {
self.focusable = Some(focusable);
self
}
pub fn halign(mut self, halign: Align) -> Self {
self.halign = Some(halign);
self
}
pub fn has_tooltip(mut self, has_tooltip: bool) -> Self {
self.has_tooltip = Some(has_tooltip);
self
}
pub fn height_request(mut self, height_request: i32) -> Self {
self.height_request = Some(height_request);
self
}
pub fn hexpand(mut self, hexpand: bool) -> Self {
self.hexpand = Some(hexpand);
self
}
pub fn hexpand_set(mut self, hexpand_set: bool) -> Self {
self.hexpand_set = Some(hexpand_set);
self
}
pub fn layout_manager(mut self, layout_manager: &impl IsA<LayoutManager>) -> Self {
self.layout_manager = Some(layout_manager.clone().upcast());
self
}
pub fn margin_bottom(mut self, margin_bottom: i32) -> Self {
self.margin_bottom = Some(margin_bottom);
self
}
pub fn margin_end(mut self, margin_end: i32) -> Self {
self.margin_end = Some(margin_end);
self
}
pub fn margin_start(mut self, margin_start: i32) -> Self {
self.margin_start = Some(margin_start);
self
}
pub fn margin_top(mut self, margin_top: i32) -> Self {
self.margin_top = Some(margin_top);
self
}
pub fn name(mut self, name: &str) -> Self {
self.name = Some(name.to_string());
self
}
pub fn opacity(mut self, opacity: f64) -> Self {
self.opacity = Some(opacity);
self
}
pub fn overflow(mut self, overflow: Overflow) -> Self {
self.overflow = Some(overflow);
self
}
pub fn receives_default(mut self, receives_default: bool) -> Self {
self.receives_default = Some(receives_default);
self
}
pub fn sensitive(mut self, sensitive: bool) -> Self {
self.sensitive = Some(sensitive);
self
}
pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self {
self.tooltip_markup = Some(tooltip_markup.to_string());
self
}
pub fn tooltip_text(mut self, tooltip_text: &str) -> Self {
self.tooltip_text = Some(tooltip_text.to_string());
self
}
pub fn valign(mut self, valign: Align) -> Self {
self.valign = Some(valign);
self
}
pub fn vexpand(mut self, vexpand: bool) -> Self {
self.vexpand = Some(vexpand);
self
}
pub fn vexpand_set(mut self, vexpand_set: bool) -> Self {
self.vexpand_set = Some(vexpand_set);
self
}
pub fn visible(mut self, visible: bool) -> Self {
self.visible = Some(visible);
self
}
pub fn width_request(mut self, width_request: i32) -> Self {
self.width_request = Some(width_request);
self
}
pub fn accessible_role(mut self, accessible_role: AccessibleRole) -> Self {
self.accessible_role = Some(accessible_role);
self
}
pub fn content_type(mut self, content_type: &str) -> Self {
self.content_type = Some(content_type.to_string());
self
}
}
impl fmt::Display for AppChooserDialog {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("AppChooserDialog")
}
}
| 32.41655 | 194 | 0.592956 |
ff97117b6532d076e7988ba937ca6ac10d03596e
| 481 |
pub mod admin;
pub mod datetime_operators;
pub mod discovery;
pub mod game;
pub mod message;
pub mod skip;
pub mod standings;
pub mod tmou22;
use std::vec::Vec;
use crate::db as db;
use crate::api as api;
// helper function for controllers
pub fn get_team_level(items: &Vec<db::Item>) -> i16 {
items.iter().map(|item| item.level).max().unwrap_or(-1)
}
pub fn get_team_level_api(items: &Vec<api::Item>) -> i16 {
items.iter().map(|item| item.level).max().unwrap_or(-1)
}
| 20.913043 | 59 | 0.692308 |
f79cbf666dfc901bc9122c380d917a378f795861
| 17,970 |
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A HashMap wrapper that holds key-value pairs in insertion order.
//!
//! # Examples
//!
//! ```rust
//! use collect::LinkedHashMap;
//!
//! let mut map = LinkedHashMap::new();
//! map.insert(2, 20);
//! map.insert(1, 10);
//! map.insert(3, 30);
//! assert_eq!(*map.get(&1).unwrap(), 10);
//! assert_eq!(*map.get(&2).unwrap(), 20);
//! assert_eq!(*map.get(&3).unwrap(), 30);
//!
//! let items: Vec<(i32, i32)> = map.iter().map(|t| (*t.0, *t.1)).collect();
//! assert_eq!(vec![(2, 20), (1, 10), (3, 30)], items);
//! ```
use std::cmp::{PartialEq, Eq};
use std::collections::HashMap;
use std::fmt;
use std::hash::{Hash, Hasher, Writer};
use std::collections::hash_map::Hasher as HmHasher;
use std::iter::{Iterator, Extend};
use std::iter;
use std::marker;
use std::mem;
use std::ptr;
// FIXME(conventions): implement indexing?
struct KeyRef<K> { k: *const K }
struct LinkedHashMapEntry<K, V> {
next: *mut LinkedHashMapEntry<K, V>,
prev: *mut LinkedHashMapEntry<K, V>,
key: K,
value: V,
}
/// A linked hash map.
pub struct LinkedHashMap<K, V> {
map: HashMap<KeyRef<K>, Box<LinkedHashMapEntry<K, V>>>,
head: *mut LinkedHashMapEntry<K, V>,
}
impl<S: Hasher+Writer, K: Hash<S>> Hash<S> for KeyRef<K> {
fn hash(&self, state: &mut S) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: PartialEq> PartialEq for KeyRef<K> {
fn eq(&self, other: &KeyRef<K>) -> bool {
unsafe{ (*self.k).eq(&*other.k) }
}
}
impl<K: Eq> Eq for KeyRef<K> {}
impl<K, V> LinkedHashMapEntry<K, V> {
fn new(k: K, v: V) -> LinkedHashMapEntry<K, V> {
LinkedHashMapEntry {
key: k,
value: v,
next: ptr::null_mut(),
prev: ptr::null_mut(),
}
}
}
impl<K: Hash<HmHasher> + Eq, V> LinkedHashMap<K, V> {
/// Creates a linked hash map.
pub fn new() -> LinkedHashMap<K, V> {
let map = LinkedHashMap {
map: HashMap::new(),
head: unsafe{ mem::transmute(box mem::uninitialized::<LinkedHashMapEntry<K, V>>()) },
};
unsafe {
(*map.head).next = map.head;
(*map.head).prev = map.head;
}
return map;
}
/// Inserts a key-value pair into the map. If the key already existed, the old value is
/// returned.
///
/// # Examples
///
/// ```rust
/// use collect::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), Some(&"b"));
/// ```
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
let (node_ptr, node_opt, old_val) = match self.map.get_mut(&KeyRef{k: &k}) {
Some(node) => {
let old_val = mem::replace(&mut node.value, v);
let node_ptr: *mut LinkedHashMapEntry<K, V> = &mut **node;
(node_ptr, None, Some(old_val))
}
None => {
let mut node = box LinkedHashMapEntry::new(k, v);
let node_ptr: *mut LinkedHashMapEntry<K, V> = &mut *node;
(node_ptr, Some(node), None)
}
};
match node_opt {
None => {
// Existing node, just update LRU position
self.detach(node_ptr);
self.attach(node_ptr);
}
Some(node) => {
let keyref = unsafe { &(*node_ptr).key };
self.map.insert(KeyRef{k: keyref}, node);
self.attach(node_ptr);
}
}
old_val
}
/// Returns the value corresponding to the key in the map.
///
/// # Examples
///
/// ```rust
/// use collect::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(2, "c");
/// map.insert(3, "d");
///
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), Some(&"c"));
/// ```
pub fn get(&self, k: &K) -> Option<&V> {
self.map.get(&KeyRef{k: k}).map(|e| &e.value)
}
/// Returns the value corresponding to the key in the map.
///
/// If value is found, it is moved to the end of the list.
/// This operation can be used in implemenation of LRU cache.
///
/// # Examples
///
/// ```rust
/// use collect::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(3, "d");
///
/// assert_eq!(map.get_refresh(&2), Some(&"b"));
///
/// assert_eq!((&2, &"b"), map.iter().rev().next().unwrap());
/// ```
pub fn get_refresh(&mut self, k: &K) -> Option<&V> {
let (value, node_ptr_opt) = match self.map.get_mut(&KeyRef{k: k}) {
None => (None, None),
Some(node) => {
let node_ptr: *mut LinkedHashMapEntry<K, V> = &mut **node;
(Some(unsafe { &(*node_ptr).value }), Some(node_ptr))
}
};
match node_ptr_opt {
None => (),
Some(node_ptr) => {
self.detach(node_ptr);
self.attach(node_ptr);
}
}
return value;
}
/// Removes and returns the value corresponding to the key from the map.
///
/// # Examples
///
/// ```rust
/// use collect::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(2, "a");
///
/// assert_eq!(map.remove(&1), None);
/// assert_eq!(map.remove(&2), Some("a"));
/// assert_eq!(map.remove(&2), None);
/// assert_eq!(map.len(), 0);
/// ```
pub fn remove(&mut self, k: &K) -> Option<V> {
let removed = self.map.remove(&KeyRef{k: k});
removed.map(|mut node| {
let node_ptr: *mut LinkedHashMapEntry<K,V> = &mut *node;
self.detach(node_ptr);
node.value
})
}
/// Returns the maximum number of key-value pairs the map can hold without reallocating.
///
/// # Examples
///
/// ```rust
/// use collect::LinkedHashMap;
/// let mut map: LinkedHashMap<i32, &str> = LinkedHashMap::new();
/// let capacity = map.capacity();
/// ```
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Removes the first entry.
///
/// Can be used in implementation of LRU cache.
///
/// # Examples
///
/// ```rust
/// use collect::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// map.pop_front();
/// assert_eq!(map.get(&1), None);
/// assert_eq!(map.get(&2), Some(&20));
/// ```
#[inline]
pub fn pop_front(&mut self) {
if self.len() > 0 {
let lru = unsafe { (*self.head).prev };
self.detach(lru);
self.map.remove(&KeyRef{k: unsafe { &(*lru).key }});
}
}
/// Returns the number of key-value pairs in the map.
pub fn len(&self) -> usize { self.map.len() }
/// Returns whether the map is currently empty.
pub fn is_empty(&self) -> bool { self.len() == 0 }
/// Clear the map of all key-value pairs.
pub fn clear(&mut self) {
self.map.clear();
unsafe {
(*self.head).prev = self.head;
(*self.head).next = self.head;
}
}
/// A double-ended iterator visiting all key-value pairs in order of insertion.
/// Iterator element type is `(&'a K, &'a V)`
///
/// # Examples
/// ```rust
/// use collect::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// let mut iter = map.iter();
/// assert_eq!((&"a", &10), iter.next().unwrap());
/// assert_eq!((&"c", &30), iter.next().unwrap());
/// assert_eq!((&"b", &20), iter.next().unwrap());
/// assert_eq!(None, iter.next());
/// ```
pub fn iter(&self) -> Iter<K, V> {
Iter {
head: unsafe { (*self.head).prev },
tail: self.head,
remaining: self.len(),
marker: marker::ContravariantLifetime,
}
}
/// A double-ended iterator visiting all key in order of insertion.
///
/// # Examples
/// ```rust
/// use collect::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
/// map.insert('c', 30);
/// map.insert('b', 20);
///
/// let mut keys = map.keys();
/// assert_eq!(&'a', keys.next().unwrap());
/// assert_eq!(&'c', keys.next().unwrap());
/// assert_eq!(&'b', keys.next().unwrap());
/// assert_eq!(None, keys.next());
/// ```
pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
fn first<A, B>((a, _): (A, B)) -> A { a }
let first: fn((&'a K, &'a V)) -> &'a K = first; // coerce to fn ptr
Keys { inner: self.iter().map(first) }
}
/// A double-ended iterator visiting all values in order of insertion.
///
/// # Examples
/// ```rust
/// use collect::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
/// map.insert('c', 30);
/// map.insert('b', 20);
///
/// let mut values = map.values();
/// assert_eq!(&10, values.next().unwrap());
/// assert_eq!(&30, values.next().unwrap());
/// assert_eq!(&20, values.next().unwrap());
/// assert_eq!(None, values.next());
/// ```
pub fn values<'a>(&'a self) -> Values<'a, K, V> {
fn second<A, B>((_, b): (A, B)) -> B { b }
let second: fn((&'a K, &'a V)) -> &'a V = second; // coerce to fn ptr
Values { inner: self.iter().map(second) }
}
}
impl<K: Hash<HmHasher> + Eq, V> LinkedHashMap<K, V> {
#[inline]
fn detach(&mut self, node: *mut LinkedHashMapEntry<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut LinkedHashMapEntry<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
}
impl<K: Hash<HmHasher> + Eq, V> Extend<(K, V)> for LinkedHashMap<K, V> {
fn extend<T: Iterator<Item=(K, V)>>(&mut self, mut iter: T) {
for (k, v) in iter{
self.insert(k, v);
}
}
}
impl<A: fmt::Show + Hash<HmHasher> + Eq, B: fmt::Show> fmt::Show for LinkedHashMap<A, B> {
/// Returns a string that lists the key-value pairs in insertion order.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "{{"));
for (i, (k, v)) in self.iter().enumerate() {
if i != 0 { try!(write!(f, ", ")); }
try!(write!(f, "{:?}: {:?}", *k, *v));
}
write!(f, "}}")
}
}
unsafe impl<K: Send, V: Send> Send for LinkedHashMap<K, V> {}
unsafe impl<K: Sync, V: Sync> Sync for LinkedHashMap<K, V> {}
#[unsafe_destructor]
impl<K, V> Drop for LinkedHashMap<K, V> {
fn drop(&mut self) {
unsafe {
let node: Box<LinkedHashMapEntry<K, V>> = mem::transmute(self.head);
// Prevent compiler from trying to drop the un-initialized field in the sigil node.
let box internal_node = node;
let LinkedHashMapEntry { next: _, prev: _, key: k, value: v } = internal_node;
mem::forget(k);
mem::forget(v);
}
}
}
pub struct Iter<'a, K: 'a, V: 'a> {
head: *const LinkedHashMapEntry<K, V>,
tail: *const LinkedHashMapEntry<K, V>,
remaining: usize,
marker: marker::ContravariantLifetime<'a>,
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some((&(*self.head).key, &(*self.head).value));
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
self.tail = (*self.tail).next;
let r = Some((&(*self.tail).key, &(*self.tail).value));
r
}
}
}
}
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {}
pub struct Keys<'a, K: 'a, V: 'a> {
inner: iter::Map<(&'a K, &'a V), &'a K, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline] fn next(&mut self) -> Option<(&'a K)> { self.inner.next() }
#[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
#[inline] fn next_back(&mut self) -> Option<(&'a K)> { self.inner.next_back() }
}
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {}
pub struct Values<'a, K: 'a, V: 'a> {
inner: iter::Map<(&'a K, &'a V), &'a V, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline] fn next(&mut self) -> Option<(&'a V)> { self.inner.next() }
#[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
#[inline] fn next_back(&mut self) -> Option<(&'a V)> { self.inner.next_back() }
}
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {}
#[cfg(test)]
mod tests {
use super::LinkedHashMap;
fn assert_opt_eq<V: PartialEq>(opt: Option<&V>, v: V) {
assert!(opt.is_some());
assert!(opt.unwrap() == &v);
}
#[test]
fn test_insert_and_get() {
let mut map = LinkedHashMap::new();
map.insert(1, 10);
map.insert(2, 20);
assert_opt_eq(map.get(&1), 10);
assert_opt_eq(map.get(&2), 20);
assert_eq!(map.len(), 2);
}
#[test]
fn test_insert_update() {
let mut map = LinkedHashMap::new();
map.insert("1".to_string(), vec![10, 10]);
map.insert("1".to_string(), vec![10, 19]);
assert_opt_eq(map.get(&"1".to_string()), vec![10, 19]);
assert_eq!(map.len(), 1);
}
#[test]
fn test_show() {
let mut map = LinkedHashMap::new();
assert_eq!(format!("{:?}", map), "{}");
map.insert(1, 10);
map.insert(2, 20);
map.insert(3, 30);
assert_eq!(format!("{:?}", map), "{1i32: 10i32, 2i32: 20i32, 3i32: 30i32}");
map.insert(2, 22);
assert_eq!(format!("{:?}", map), "{1i32: 10i32, 3i32: 30i32, 2i32: 22i32}");
map.get(&3);
assert_eq!(format!("{:?}", map), "{1i32: 10i32, 3i32: 30i32, 2i32: 22i32}");
map.get_refresh(&3);
assert_eq!(format!("{:?}", map), "{1i32: 10i32, 2i32: 22i32, 3i32: 30i32}");
map.clear();
assert_eq!(format!("{:?}", map), "{}");
}
#[test]
fn test_remove() {
let mut map = LinkedHashMap::new();
map.insert(1, 10);
map.insert(2, 20);
map.insert(3, 30);
map.insert(4, 40);
map.insert(5, 50);
map.remove(&3);
map.remove(&4);
assert!(map.get(&3).is_none());
assert!(map.get(&4).is_none());
map.insert(6, 60);
map.insert(7, 70);
map.insert(8, 80);
assert_opt_eq(map.get(&6), 60);
assert_opt_eq(map.get(&7), 70);
assert_opt_eq(map.get(&8), 80);
}
#[test]
fn test_clear() {
let mut map = LinkedHashMap::new();
map.insert(1, 10);
map.insert(2, 20);
map.clear();
assert!(map.get(&1).is_none());
assert!(map.get(&2).is_none());
assert_eq!(format!("{:?}", map), "{}");
}
#[test]
fn test_iter() {
let mut map = LinkedHashMap::new();
// empty iter
assert_eq!(None, map.iter().next());
map.insert("a", 10);
map.insert("b", 20);
map.insert("c", 30);
// regular iter
let mut iter = map.iter();
assert_eq!((&"a", &10), iter.next().unwrap());
assert_eq!((&"b", &20), iter.next().unwrap());
assert_eq!((&"c", &30), iter.next().unwrap());
assert_eq!(None, iter.next());
assert_eq!(None, iter.next());
// reversed iter
let mut rev_iter = map.iter().rev();
assert_eq!((&"c", &30), rev_iter.next().unwrap());
assert_eq!((&"b", &20), rev_iter.next().unwrap());
assert_eq!((&"a", &10), rev_iter.next().unwrap());
assert_eq!(None, rev_iter.next());
assert_eq!(None, rev_iter.next());
// mixed
let mut mixed_iter = map.iter();
assert_eq!((&"a", &10), mixed_iter.next().unwrap());
assert_eq!((&"c", &30), mixed_iter.next_back().unwrap());
assert_eq!((&"b", &20), mixed_iter.next().unwrap());
assert_eq!(None, mixed_iter.next());
assert_eq!(None, mixed_iter.next_back());
}
}
| 29.702479 | 97 | 0.505008 |
08cb2710ca037066a7ac0ac20ffd30a2690bb6b8
| 3,667 |
use serde::Serialize;
use super::*;
use crate::documents::BuildXML;
use crate::types::*;
use crate::xml_builder::*;
#[derive(Debug, Clone, PartialEq, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct Styles {
doc_defaults: DocDefaults,
styles: Vec<Style>,
}
impl Styles {
pub fn new() -> Styles {
Default::default()
}
pub fn add_style(mut self, style: Style) -> Self {
self.styles.push(style);
self
}
pub fn default_size(mut self, size: usize) -> Self {
self.doc_defaults = self.doc_defaults.size(size);
self
}
pub fn default_spacing(mut self, spacing: i32) -> Self {
self.doc_defaults = self.doc_defaults.spacing(spacing);
self
}
pub fn default_fonts(mut self, font: RunFonts) -> Self {
self.doc_defaults = self.doc_defaults.fonts(font);
self
}
pub(crate) fn doc_defaults(mut self, doc_defaults: DocDefaults) -> Self {
self.doc_defaults = doc_defaults;
self
}
pub fn find_style_by_id(&self, id: &str) -> Option<&Style> {
self.styles.iter().find(|s| s.style_id == id)
}
pub fn create_heading_style_map(&self) -> std::collections::HashMap<String, usize> {
self.styles
.iter()
.filter_map(|s| {
if s.name.is_heading() {
let n = s.name.get_heading_number();
n.map(|n| (s.style_id.clone(), n))
} else {
None
}
})
.collect()
}
}
impl Default for Styles {
fn default() -> Self {
Self {
doc_defaults: DocDefaults::new(),
styles: vec![],
}
}
}
impl BuildXML for Styles {
fn build(&self) -> Vec<u8> {
let b = XMLBuilder::new();
let normal = Style::new("Normal", StyleType::Paragraph).name("Normal");
b.open_styles()
.add_child(&self.doc_defaults)
.add_child(&normal)
.add_children(&self.styles)
.close()
.build()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::StyleType;
#[cfg(test)]
use pretty_assertions::assert_eq;
use std::str;
#[test]
fn test_style() {
let c =
Styles::new().add_style(Style::new("Title", StyleType::Paragraph).name("TitleName"));
let b = c.build();
assert_eq!(
str::from_utf8(&b).unwrap(),
r#"<w:styles xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main" xmlns:w14="http://schemas.microsoft.com/office/word/2010/wordml" xmlns:w15="http://schemas.microsoft.com/office/word/2012/wordml" mc:Ignorable="w14 w15"><w:docDefaults><w:rPrDefault><w:rPr /></w:rPrDefault></w:docDefaults><w:style w:type="paragraph" w:styleId="Normal"><w:name w:val="Normal" /><w:rPr /><w:pPr><w:rPr /></w:pPr><w:basedOn w:val="Normal" /><w:next w:val="Normal" /><w:qFormat /></w:style><w:style w:type="paragraph" w:styleId="Title"><w:name w:val="TitleName" /><w:rPr /><w:pPr><w:rPr /></w:pPr><w:basedOn w:val="Normal" /><w:next w:val="Normal" /><w:qFormat /></w:style></w:styles>"#
);
}
#[test]
fn test_heading_style() {
let c = Styles::new().add_style(Style::new("ToC", StyleType::Paragraph).name("heading 3"));
let mut m = std::collections::HashMap::new();
m.insert("ToC".to_string(), 3);
let b = c.create_heading_style_map();
assert_eq!(b, m);
}
}
| 31.886957 | 844 | 0.576493 |
083623c41c2bbb362b8d342f4b9a4170fcaff637
| 470,613 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(rename = "eTag", default, skip_serializing_if = "Option::is_none")]
pub e_tag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResourceDebugResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Expression {
#[serde(rename = "type")]
pub type_: expression::Type,
pub value: String,
}
pub mod expression {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Expression,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SecureString {
#[serde(flatten)]
pub secret_base: SecretBase,
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureKeyVaultSecretReference {
#[serde(flatten)]
pub secret_base: SecretBase,
pub store: LinkedServiceReference,
#[serde(rename = "secretName")]
pub secret_name: serde_json::Value,
#[serde(rename = "secretVersion", default, skip_serializing_if = "Option::is_none")]
pub secret_version: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SecretBase {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryListResponse {
pub value: Vec<Factory>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeListResponse {
pub value: Vec<IntegrationRuntimeResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeReference {
#[serde(rename = "type")]
pub type_: integration_runtime_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterValueSpecification>,
}
pub mod integration_runtime_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
IntegrationRuntimeReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: IntegrationRuntime,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeDebugResource {
#[serde(flatten)]
pub sub_resource_debug_resource: SubResourceDebugResource,
pub properties: IntegrationRuntime,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeStatusResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
pub properties: IntegrationRuntimeStatus,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeStatusListResponse {
pub value: Vec<IntegrationRuntimeStatusResponse>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateIntegrationRuntimeRequest {
#[serde(rename = "autoUpdate", default, skip_serializing_if = "Option::is_none")]
pub auto_update: Option<IntegrationRuntimeAutoUpdate>,
#[serde(rename = "updateDelayOffset", default, skip_serializing_if = "Option::is_none")]
pub update_delay_offset: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateIntegrationRuntimeNodeRequest {
#[serde(rename = "concurrentJobsLimit", default, skip_serializing_if = "Option::is_none")]
pub concurrent_jobs_limit: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedIntegrationRuntimeRequest {
#[serde(rename = "factoryName")]
pub factory_name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateLinkedIntegrationRuntimeRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "dataFactoryName", default, skip_serializing_if = "Option::is_none")]
pub data_factory_name: Option<String>,
#[serde(rename = "dataFactoryLocation", default, skip_serializing_if = "Option::is_none")]
pub data_factory_location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedServiceListResponse {
pub value: Vec<LinkedServiceResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetListResponse {
pub value: Vec<DatasetResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineListResponse {
pub value: Vec<PipelineResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerQueryResponse {
pub value: Vec<TriggerResource>,
#[serde(rename = "continuationToken", default, skip_serializing_if = "Option::is_none")]
pub continuation_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerListResponse {
pub value: Vec<TriggerResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateRunResponse {
#[serde(rename = "runId")]
pub run_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
pub error: CloudErrorBody,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudErrorBody {
pub code: String,
pub message: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<CloudError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerSubscriptionOperationStatus {
#[serde(rename = "triggerName", default, skip_serializing_if = "Option::is_none")]
pub trigger_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<trigger_subscription_operation_status::Status>,
}
pub mod trigger_subscription_operation_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Enabled,
Provisioning,
Deprovisioning,
Disabled,
Unknown,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VariableDefinitionSpecification {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VariableSpecification {
#[serde(rename = "type")]
pub type_: variable_specification::Type,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<serde_json::Value>,
}
pub mod variable_specification {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
String,
Bool,
Array,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParameterDefinitionSpecification {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParameterSpecification {
#[serde(rename = "type")]
pub type_: parameter_specification::Type,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<serde_json::Value>,
}
pub mod parameter_specification {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Object,
String,
Int,
Float,
Bool,
Array,
SecureString,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParameterValueSpecification {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryRepoConfiguration {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "accountName")]
pub account_name: String,
#[serde(rename = "repositoryName")]
pub repository_name: String,
#[serde(rename = "collaborationBranch")]
pub collaboration_branch: String,
#[serde(rename = "rootFolder")]
pub root_folder: String,
#[serde(rename = "lastCommitId", default, skip_serializing_if = "Option::is_none")]
pub last_commit_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryVstsConfiguration {
#[serde(flatten)]
pub factory_repo_configuration: FactoryRepoConfiguration,
#[serde(rename = "projectName")]
pub project_name: String,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryGitHubConfiguration {
#[serde(flatten)]
pub factory_repo_configuration: FactoryRepoConfiguration,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<GitHubClientSecret>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GitHubClientSecret {
#[serde(rename = "byoaSecretAkvUrl", default, skip_serializing_if = "Option::is_none")]
pub byoa_secret_akv_url: Option<String>,
#[serde(rename = "byoaSecretName", default, skip_serializing_if = "Option::is_none")]
pub byoa_secret_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryRepoUpdate {
#[serde(rename = "factoryResourceId", default, skip_serializing_if = "Option::is_none")]
pub factory_resource_id: Option<String>,
#[serde(rename = "repoConfiguration", default, skip_serializing_if = "Option::is_none")]
pub repo_configuration: Option<FactoryRepoConfiguration>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GitHubAccessTokenRequest {
#[serde(rename = "gitHubAccessCode")]
pub git_hub_access_code: String,
#[serde(rename = "gitHubClientId", default, skip_serializing_if = "Option::is_none")]
pub git_hub_client_id: Option<String>,
#[serde(rename = "gitHubClientSecret", default, skip_serializing_if = "Option::is_none")]
pub git_hub_client_secret: Option<GitHubClientSecret>,
#[serde(rename = "gitHubAccessTokenBaseUrl")]
pub git_hub_access_token_base_url: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GitHubAccessTokenResponse {
#[serde(rename = "gitHubAccessToken", default, skip_serializing_if = "Option::is_none")]
pub git_hub_access_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAccessPolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub permissions: Option<String>,
#[serde(rename = "accessResourcePath", default, skip_serializing_if = "Option::is_none")]
pub access_resource_path: Option<String>,
#[serde(rename = "profileName", default, skip_serializing_if = "Option::is_none")]
pub profile_name: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "expireTime", default, skip_serializing_if = "Option::is_none")]
pub expire_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessPolicyResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policy: Option<UserAccessPolicy>,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<String>,
#[serde(rename = "dataPlaneUrl", default, skip_serializing_if = "Option::is_none")]
pub data_plane_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")]
pub create_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "repoConfiguration", default, skip_serializing_if = "Option::is_none")]
pub repo_configuration: Option<FactoryRepoConfiguration>,
#[serde(rename = "globalParameters", default, skip_serializing_if = "Option::is_none")]
pub global_parameters: Option<GlobalParameterDefinitionSpecification>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<EncryptionConfiguration>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<factory_properties::PublicNetworkAccess>,
}
pub mod factory_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionConfiguration {
#[serde(rename = "keyName")]
pub key_name: String,
#[serde(rename = "vaultBaseUrl")]
pub vault_base_url: String,
#[serde(rename = "keyVersion", default, skip_serializing_if = "Option::is_none")]
pub key_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<CmkIdentityDefinition>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CmkIdentityDefinition {
#[serde(rename = "userAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identity: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GlobalParameterDefinitionSpecification {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GlobalParameterSpecification {
#[serde(rename = "type")]
pub type_: global_parameter_specification::Type,
pub value: serde_json::Value,
}
pub mod global_parameter_specification {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Object,
String,
Int,
Float,
Bool,
Array,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: Pipeline,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineReference {
#[serde(rename = "type")]
pub type_: pipeline_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
pub mod pipeline_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
PipelineReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerPipelineReference {
#[serde(rename = "pipelineReference", default, skip_serializing_if = "Option::is_none")]
pub pipeline_reference: Option<PipelineReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterValueSpecification>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: Trigger,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Factory {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<FactoryIdentity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FactoryProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<FactoryIdentity>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<factory_update_parameters::PublicNetworkAccess>,
}
pub mod factory_update_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FactoryIdentity {
#[serde(rename = "type")]
pub type_: factory_identity::Type,
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<UserAssignedIdentitiesDefinitionSpecification>,
}
pub mod factory_identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
UserAssigned,
#[serde(rename = "SystemAssigned,UserAssigned")]
SystemAssignedUserAssigned,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentitiesDefinitionSpecification {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentitySpecification {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetReference {
#[serde(rename = "type")]
pub type_: dataset_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterValueSpecification>,
}
pub mod dataset_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
DatasetReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: Dataset,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetDebugResource {
#[serde(flatten)]
pub sub_resource_debug_resource: SubResourceDebugResource,
pub properties: Dataset,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedServiceReference {
#[serde(rename = "type")]
pub type_: linked_service_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterValueSpecification>,
}
pub mod linked_service_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
LinkedServiceReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedServiceResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: LinkedService,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedServiceDebugResource {
#[serde(flatten)]
pub sub_resource_debug_resource: SubResourceDebugResource,
pub properties: LinkedService,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerFilterParameters {
#[serde(rename = "continuationToken", default, skip_serializing_if = "Option::is_none")]
pub continuation_token: Option<String>,
#[serde(rename = "parentTriggerName", default, skip_serializing_if = "Option::is_none")]
pub parent_trigger_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunFilterParameters {
#[serde(rename = "continuationToken", default, skip_serializing_if = "Option::is_none")]
pub continuation_token: Option<String>,
#[serde(rename = "lastUpdatedAfter")]
pub last_updated_after: String,
#[serde(rename = "lastUpdatedBefore")]
pub last_updated_before: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub filters: Vec<RunQueryFilter>,
#[serde(rename = "orderBy", default, skip_serializing_if = "Vec::is_empty")]
pub order_by: Vec<RunQueryOrderBy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunQueryFilter {
pub operand: run_query_filter::Operand,
pub operator: run_query_filter::Operator,
pub values: Vec<String>,
}
pub mod run_query_filter {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Operand {
PipelineName,
Status,
RunStart,
RunEnd,
ActivityName,
ActivityRunStart,
ActivityRunEnd,
ActivityType,
TriggerName,
TriggerRunTimestamp,
RunGroupId,
LatestOnly,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Operator {
Equals,
NotEquals,
In,
NotIn,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunQueryOrderBy {
#[serde(rename = "orderBy")]
pub order_by: run_query_order_by::OrderBy,
pub order: run_query_order_by::Order,
}
pub mod run_query_order_by {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OrderBy {
RunStart,
RunEnd,
PipelineName,
Status,
ActivityName,
ActivityRunStart,
ActivityRunEnd,
TriggerName,
TriggerRunTimestamp,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Order {
#[serde(rename = "ASC")]
Asc,
#[serde(rename = "DESC")]
Desc,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineRunsQueryResponse {
pub value: Vec<PipelineRun>,
#[serde(rename = "continuationToken", default, skip_serializing_if = "Option::is_none")]
pub continuation_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineRun {
#[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")]
pub run_id: Option<String>,
#[serde(rename = "runGroupId", default, skip_serializing_if = "Option::is_none")]
pub run_group_id: Option<String>,
#[serde(rename = "isLatest", default, skip_serializing_if = "Option::is_none")]
pub is_latest: Option<bool>,
#[serde(rename = "pipelineName", default, skip_serializing_if = "Option::is_none")]
pub pipeline_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<serde_json::Value>,
#[serde(rename = "runDimensions", default, skip_serializing_if = "Option::is_none")]
pub run_dimensions: Option<serde_json::Value>,
#[serde(rename = "invokedBy", default, skip_serializing_if = "Option::is_none")]
pub invoked_by: Option<PipelineRunInvokedBy>,
#[serde(rename = "lastUpdated", default, skip_serializing_if = "Option::is_none")]
pub last_updated: Option<String>,
#[serde(rename = "runStart", default, skip_serializing_if = "Option::is_none")]
pub run_start: Option<String>,
#[serde(rename = "runEnd", default, skip_serializing_if = "Option::is_none")]
pub run_end: Option<String>,
#[serde(rename = "durationInMs", default, skip_serializing_if = "Option::is_none")]
pub duration_in_ms: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineRunInvokedBy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "invokedByType", default, skip_serializing_if = "Option::is_none")]
pub invoked_by_type: Option<String>,
#[serde(rename = "pipelineName", default, skip_serializing_if = "Option::is_none")]
pub pipeline_name: Option<String>,
#[serde(rename = "pipelineRunId", default, skip_serializing_if = "Option::is_none")]
pub pipeline_run_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActivityRunsQueryResponse {
pub value: Vec<ActivityRun>,
#[serde(rename = "continuationToken", default, skip_serializing_if = "Option::is_none")]
pub continuation_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActivityRun {
#[serde(rename = "pipelineName", default, skip_serializing_if = "Option::is_none")]
pub pipeline_name: Option<String>,
#[serde(rename = "pipelineRunId", default, skip_serializing_if = "Option::is_none")]
pub pipeline_run_id: Option<String>,
#[serde(rename = "activityName", default, skip_serializing_if = "Option::is_none")]
pub activity_name: Option<String>,
#[serde(rename = "activityType", default, skip_serializing_if = "Option::is_none")]
pub activity_type: Option<String>,
#[serde(rename = "activityRunId", default, skip_serializing_if = "Option::is_none")]
pub activity_run_id: Option<String>,
#[serde(rename = "linkedServiceName", default, skip_serializing_if = "Option::is_none")]
pub linked_service_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "activityRunStart", default, skip_serializing_if = "Option::is_none")]
pub activity_run_start: Option<String>,
#[serde(rename = "activityRunEnd", default, skip_serializing_if = "Option::is_none")]
pub activity_run_end: Option<String>,
#[serde(rename = "durationInMs", default, skip_serializing_if = "Option::is_none")]
pub duration_in_ms: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub input: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub output: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerRunsQueryResponse {
pub value: Vec<TriggerRun>,
#[serde(rename = "continuationToken", default, skip_serializing_if = "Option::is_none")]
pub continuation_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerRun {
#[serde(rename = "triggerRunId", default, skip_serializing_if = "Option::is_none")]
pub trigger_run_id: Option<String>,
#[serde(rename = "triggerName", default, skip_serializing_if = "Option::is_none")]
pub trigger_name: Option<String>,
#[serde(rename = "triggerType", default, skip_serializing_if = "Option::is_none")]
pub trigger_type: Option<String>,
#[serde(rename = "triggerRunTimestamp", default, skip_serializing_if = "Option::is_none")]
pub trigger_run_timestamp: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<trigger_run::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[serde(rename = "triggeredPipelines", default, skip_serializing_if = "Option::is_none")]
pub triggered_pipelines: Option<serde_json::Value>,
#[serde(rename = "runDimension", default, skip_serializing_if = "Option::is_none")]
pub run_dimension: Option<serde_json::Value>,
#[serde(rename = "dependencyStatus", default, skip_serializing_if = "Option::is_none")]
pub dependency_status: Option<serde_json::Value>,
}
pub mod trigger_run {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Succeeded,
Failed,
Inprogress,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationProperties>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationProperties {
#[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")]
pub service_specification: Option<OperationServiceSpecification>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationServiceSpecification {
#[serde(rename = "logSpecifications", default, skip_serializing_if = "Vec::is_empty")]
pub log_specifications: Vec<OperationLogSpecification>,
#[serde(rename = "metricSpecifications", default, skip_serializing_if = "Vec::is_empty")]
pub metric_specifications: Vec<OperationMetricSpecification>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationLogSpecification {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")]
pub blob_duration: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationMetricSpecification {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "displayDescription", default, skip_serializing_if = "Option::is_none")]
pub display_description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "aggregationType", default, skip_serializing_if = "Option::is_none")]
pub aggregation_type: Option<String>,
#[serde(rename = "enableRegionalMdmAccount", default, skip_serializing_if = "Option::is_none")]
pub enable_regional_mdm_account: Option<String>,
#[serde(rename = "sourceMdmAccount", default, skip_serializing_if = "Option::is_none")]
pub source_mdm_account: Option<String>,
#[serde(rename = "sourceMdmNamespace", default, skip_serializing_if = "Option::is_none")]
pub source_mdm_namespace: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub availabilities: Vec<OperationMetricAvailability>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub dimensions: Vec<OperationMetricDimension>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationMetricAvailability {
#[serde(rename = "timeGrain", default, skip_serializing_if = "Option::is_none")]
pub time_grain: Option<String>,
#[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")]
pub blob_duration: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationMetricDimension {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "toBeExportedForShoebox", default, skip_serializing_if = "Option::is_none")]
pub to_be_exported_for_shoebox: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetSsisObjectMetadataRequest {
#[serde(rename = "metadataPath", default, skip_serializing_if = "Option::is_none")]
pub metadata_path: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisObjectMetadataStatusResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExposureControlBatchRequest {
#[serde(rename = "exposureControlRequests")]
pub exposure_control_requests: Vec<ExposureControlRequest>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExposureControlRequest {
#[serde(rename = "featureName", default, skip_serializing_if = "Option::is_none")]
pub feature_name: Option<String>,
#[serde(rename = "featureType", default, skip_serializing_if = "Option::is_none")]
pub feature_type: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExposureControlBatchResponse {
#[serde(rename = "exposureControlResponses")]
pub exposure_control_responses: Vec<ExposureControlResponse>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExposureControlResponse {
#[serde(rename = "featureName", default, skip_serializing_if = "Option::is_none")]
pub feature_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowListResponse {
pub value: Vec<DataFlowResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateDataFlowDebugSessionRequest {
#[serde(rename = "computeType", default, skip_serializing_if = "Option::is_none")]
pub compute_type: Option<String>,
#[serde(rename = "coreCount", default, skip_serializing_if = "Option::is_none")]
pub core_count: Option<i64>,
#[serde(rename = "timeToLive", default, skip_serializing_if = "Option::is_none")]
pub time_to_live: Option<i64>,
#[serde(rename = "integrationRuntime", default, skip_serializing_if = "Option::is_none")]
pub integration_runtime: Option<IntegrationRuntimeDebugResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateDataFlowDebugSessionResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(rename = "sessionId", default, skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowSourceSetting {
#[serde(rename = "sourceName", default, skip_serializing_if = "Option::is_none")]
pub source_name: Option<String>,
#[serde(rename = "rowLimit", default, skip_serializing_if = "Option::is_none")]
pub row_limit: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowStagingInfo {
#[serde(rename = "linkedService", default, skip_serializing_if = "Option::is_none")]
pub linked_service: Option<LinkedServiceReference>,
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowDebugPackage {
#[serde(rename = "sessionId", default, skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
#[serde(rename = "dataFlow", default, skip_serializing_if = "Option::is_none")]
pub data_flow: Option<DataFlowDebugResource>,
#[serde(rename = "dataFlows", default, skip_serializing_if = "Vec::is_empty")]
pub data_flows: Vec<DataFlowDebugResource>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub datasets: Vec<DatasetDebugResource>,
#[serde(rename = "linkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub linked_services: Vec<LinkedServiceDebugResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub staging: Option<DataFlowStagingInfo>,
#[serde(rename = "debugSettings", default, skip_serializing_if = "Option::is_none")]
pub debug_settings: Option<data_flow_debug_package::DebugSettings>,
}
pub mod data_flow_debug_package {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DebugSettings {
#[serde(rename = "sourceSettings", default, skip_serializing_if = "Vec::is_empty")]
pub source_settings: Vec<DataFlowSourceSetting>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterValueSpecification>,
#[serde(rename = "datasetParameters", default, skip_serializing_if = "Option::is_none")]
pub dataset_parameters: Option<serde_json::Value>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AddDataFlowToDebugSessionResponse {
#[serde(rename = "jobVersion", default, skip_serializing_if = "Option::is_none")]
pub job_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeleteDataFlowDebugSessionRequest {
#[serde(rename = "sessionId", default, skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowDebugCommandPayload {
#[serde(rename = "streamName")]
pub stream_name: String,
#[serde(rename = "rowLimits", default, skip_serializing_if = "Option::is_none")]
pub row_limits: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub expression: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowDebugCommandRequest {
#[serde(rename = "sessionId", default, skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub command: Option<data_flow_debug_command_request::Command>,
#[serde(rename = "commandPayload", default, skip_serializing_if = "Option::is_none")]
pub command_payload: Option<DataFlowDebugCommandPayload>,
}
pub mod data_flow_debug_command_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Command {
#[serde(rename = "executePreviewQuery")]
ExecutePreviewQuery,
#[serde(rename = "executeStatisticsQuery")]
ExecuteStatisticsQuery,
#[serde(rename = "executeExpressionQuery")]
ExecuteExpressionQuery,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowDebugCommandResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub data: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryDataFlowDebugSessionsResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DataFlowDebugSessionInfo>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowDebugSessionInfo {
#[serde(rename = "dataFlowName", default, skip_serializing_if = "Option::is_none")]
pub data_flow_name: Option<String>,
#[serde(rename = "computeType", default, skip_serializing_if = "Option::is_none")]
pub compute_type: Option<String>,
#[serde(rename = "coreCount", default, skip_serializing_if = "Option::is_none")]
pub core_count: Option<i64>,
#[serde(rename = "nodeCount", default, skip_serializing_if = "Option::is_none")]
pub node_count: Option<i64>,
#[serde(rename = "integrationRuntimeName", default, skip_serializing_if = "Option::is_none")]
pub integration_runtime_name: Option<String>,
#[serde(rename = "sessionId", default, skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "timeToLiveInMinutes", default, skip_serializing_if = "Option::is_none")]
pub time_to_live_in_minutes: Option<i64>,
#[serde(rename = "lastActivityTime", default, skip_serializing_if = "Option::is_none")]
pub last_activity_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetDataFactoryOperationStatusResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: DataFlow,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowDebugResource {
#[serde(flatten)]
pub sub_resource_debug_resource: SubResourceDebugResource,
pub properties: DataFlow,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowReference {
#[serde(rename = "type")]
pub type_: data_flow_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
#[serde(rename = "datasetParameters", default, skip_serializing_if = "Option::is_none")]
pub dataset_parameters: Option<serde_json::Value>,
}
pub mod data_flow_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
DataFlowReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedPrivateEndpointListResponse {
pub value: Vec<ManagedPrivateEndpointResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedPrivateEndpointResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: ManagedPrivateEndpoint,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedVirtualNetworkListResponse {
pub value: Vec<ManagedVirtualNetworkResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedVirtualNetworkReference {
#[serde(rename = "type")]
pub type_: managed_virtual_network_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
}
pub mod managed_virtual_network_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
ManagedVirtualNetworkReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedVirtualNetworkResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: ManagedVirtualNetwork,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionListResponse {
pub value: Vec<PrivateEndpointConnectionResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionResource {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RemotePrivateEndpointConnection>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemotePrivateEndpointConnection {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<ArmIdWrapper>,
#[serde(rename = "privateLinkServiceConnectionState", default, skip_serializing_if = "Option::is_none")]
pub private_link_service_connection_state: Option<PrivateLinkConnectionState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ArmIdWrapper {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkConnectionApprovalRequestResource {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkConnectionApprovalRequest>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkConnectionApprovalRequest {
#[serde(rename = "privateLinkServiceConnectionState", default, skip_serializing_if = "Option::is_none")]
pub private_link_service_connection_state: Option<PrivateLinkConnectionState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourcesWrapper {
pub value: Vec<PrivateLinkResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CredentialReference {
#[serde(rename = "type")]
pub type_: credential_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
}
pub mod credential_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
CredentialReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CredentialResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub properties: Credential,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntime {
#[serde(rename = "type")]
pub type_: IntegrationRuntimeType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntegrationRuntimeType {
Managed,
SelfHosted,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntime {
#[serde(flatten)]
pub integration_runtime: IntegrationRuntime,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<IntegrationRuntimeState>,
#[serde(rename = "typeProperties")]
pub type_properties: ManagedIntegrationRuntimeTypeProperties,
#[serde(rename = "managedVirtualNetwork", default, skip_serializing_if = "Option::is_none")]
pub managed_virtual_network: Option<ManagedVirtualNetworkReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntimeTypeProperties {
#[serde(rename = "computeProperties", default, skip_serializing_if = "Option::is_none")]
pub compute_properties: Option<IntegrationRuntimeComputeProperties>,
#[serde(rename = "ssisProperties", default, skip_serializing_if = "Option::is_none")]
pub ssis_properties: Option<IntegrationRuntimeSsisProperties>,
#[serde(rename = "customerVirtualNetwork", default, skip_serializing_if = "Option::is_none")]
pub customer_virtual_network: Option<IntegrationRuntimeCustomerVirtualNetwork>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeComputeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "nodeSize", default, skip_serializing_if = "Option::is_none")]
pub node_size: Option<String>,
#[serde(rename = "numberOfNodes", default, skip_serializing_if = "Option::is_none")]
pub number_of_nodes: Option<i64>,
#[serde(rename = "maxParallelExecutionsPerNode", default, skip_serializing_if = "Option::is_none")]
pub max_parallel_executions_per_node: Option<i64>,
#[serde(rename = "dataFlowProperties", default, skip_serializing_if = "Option::is_none")]
pub data_flow_properties: Option<IntegrationRuntimeDataFlowProperties>,
#[serde(rename = "vNetProperties", default, skip_serializing_if = "Option::is_none")]
pub v_net_properties: Option<IntegrationRuntimeVNetProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeDataFlowProperties {
#[serde(rename = "computeType", default, skip_serializing_if = "Option::is_none")]
pub compute_type: Option<integration_runtime_data_flow_properties::ComputeType>,
#[serde(rename = "coreCount", default, skip_serializing_if = "Option::is_none")]
pub core_count: Option<i64>,
#[serde(rename = "timeToLive", default, skip_serializing_if = "Option::is_none")]
pub time_to_live: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cleanup: Option<bool>,
}
pub mod integration_runtime_data_flow_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
General,
MemoryOptimized,
ComputeOptimized,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeVNetProperties {
#[serde(rename = "vNetId", default, skip_serializing_if = "Option::is_none")]
pub v_net_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<String>,
#[serde(rename = "publicIPs", default, skip_serializing_if = "Vec::is_empty")]
pub public_i_ps: Vec<String>,
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeSsisProperties {
#[serde(rename = "catalogInfo", default, skip_serializing_if = "Option::is_none")]
pub catalog_info: Option<IntegrationRuntimeSsisCatalogInfo>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<integration_runtime_ssis_properties::LicenseType>,
#[serde(rename = "customSetupScriptProperties", default, skip_serializing_if = "Option::is_none")]
pub custom_setup_script_properties: Option<IntegrationRuntimeCustomSetupScriptProperties>,
#[serde(rename = "dataProxyProperties", default, skip_serializing_if = "Option::is_none")]
pub data_proxy_properties: Option<IntegrationRuntimeDataProxyProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub edition: Option<integration_runtime_ssis_properties::Edition>,
#[serde(rename = "expressCustomSetupProperties", default, skip_serializing_if = "Vec::is_empty")]
pub express_custom_setup_properties: Vec<CustomSetupBase>,
#[serde(rename = "packageStores", default, skip_serializing_if = "Vec::is_empty")]
pub package_stores: Vec<PackageStore>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
pub mod integration_runtime_ssis_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LicenseType {
BasePrice,
LicenseIncluded,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Edition {
Standard,
Enterprise,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeCustomerVirtualNetwork {
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeSsisCatalogInfo {
#[serde(rename = "catalogServerEndpoint", default, skip_serializing_if = "Option::is_none")]
pub catalog_server_endpoint: Option<String>,
#[serde(rename = "catalogAdminUserName", default, skip_serializing_if = "Option::is_none")]
pub catalog_admin_user_name: Option<String>,
#[serde(rename = "catalogAdminPassword", default, skip_serializing_if = "Option::is_none")]
pub catalog_admin_password: Option<SecureString>,
#[serde(rename = "catalogPricingTier", default, skip_serializing_if = "Option::is_none")]
pub catalog_pricing_tier: Option<integration_runtime_ssis_catalog_info::CatalogPricingTier>,
#[serde(rename = "dualStandbyPairName", default, skip_serializing_if = "Option::is_none")]
pub dual_standby_pair_name: Option<String>,
}
pub mod integration_runtime_ssis_catalog_info {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CatalogPricingTier {
Basic,
Standard,
Premium,
#[serde(rename = "PremiumRS")]
PremiumRs,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeCustomSetupScriptProperties {
#[serde(rename = "blobContainerUri", default, skip_serializing_if = "Option::is_none")]
pub blob_container_uri: Option<String>,
#[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")]
pub sas_token: Option<SecureString>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeDataProxyProperties {
#[serde(rename = "connectVia", default, skip_serializing_if = "Option::is_none")]
pub connect_via: Option<EntityReference>,
#[serde(rename = "stagingLinkedService", default, skip_serializing_if = "Option::is_none")]
pub staging_linked_service: Option<EntityReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PackageStore {
pub name: String,
#[serde(rename = "packageStoreLinkedService")]
pub package_store_linked_service: EntityReference,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomSetupBase {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CmdkeySetup {
#[serde(flatten)]
pub custom_setup_base: CustomSetupBase,
#[serde(rename = "typeProperties")]
pub type_properties: CmdkeySetupTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CmdkeySetupTypeProperties {
#[serde(rename = "targetName")]
pub target_name: serde_json::Value,
#[serde(rename = "userName")]
pub user_name: serde_json::Value,
pub password: SecretBase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentVariableSetup {
#[serde(flatten)]
pub custom_setup_base: CustomSetupBase,
#[serde(rename = "typeProperties")]
pub type_properties: EnvironmentVariableSetupTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentVariableSetupTypeProperties {
#[serde(rename = "variableName")]
pub variable_name: String,
#[serde(rename = "variableValue")]
pub variable_value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComponentSetup {
#[serde(flatten)]
pub custom_setup_base: CustomSetupBase,
#[serde(rename = "typeProperties")]
pub type_properties: LicensedComponentSetupTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LicensedComponentSetupTypeProperties {
#[serde(rename = "componentName")]
pub component_name: String,
#[serde(rename = "licenseKey", default, skip_serializing_if = "Option::is_none")]
pub license_key: Option<SecretBase>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzPowerShellSetup {
#[serde(flatten)]
pub custom_setup_base: CustomSetupBase,
#[serde(rename = "typeProperties")]
pub type_properties: AzPowerShellSetupTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzPowerShellSetupTypeProperties {
pub version: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EntityReference {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<entity_reference::Type>,
#[serde(rename = "referenceName", default, skip_serializing_if = "Option::is_none")]
pub reference_name: Option<String>,
}
pub mod entity_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
IntegrationRuntimeReference,
LinkedServiceReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SelfHostedIntegrationRuntime {
#[serde(flatten)]
pub integration_runtime: IntegrationRuntime,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SelfHostedIntegrationRuntimeTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SelfHostedIntegrationRuntimeTypeProperties {
#[serde(rename = "linkedInfo", default, skip_serializing_if = "Option::is_none")]
pub linked_info: Option<LinkedIntegrationRuntimeType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedIntegrationRuntimeType {
#[serde(rename = "authorizationType")]
pub authorization_type: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedIntegrationRuntimeKeyAuthorization {
#[serde(flatten)]
pub linked_integration_runtime_type: LinkedIntegrationRuntimeType,
pub key: SecureString,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedIntegrationRuntimeRbacAuthorization {
#[serde(flatten)]
pub linked_integration_runtime_type: LinkedIntegrationRuntimeType,
#[serde(rename = "resourceId")]
pub resource_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeOutboundNetworkDependenciesEndpointsResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IntegrationRuntimeOutboundNetworkDependenciesCategoryEndpoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeOutboundNetworkDependenciesCategoryEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub endpoints: Vec<IntegrationRuntimeOutboundNetworkDependenciesEndpoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeOutboundNetworkDependenciesEndpoint {
#[serde(rename = "domainName", default, skip_serializing_if = "Option::is_none")]
pub domain_name: Option<String>,
#[serde(rename = "endpointDetails", default, skip_serializing_if = "Vec::is_empty")]
pub endpoint_details: Vec<IntegrationRuntimeOutboundNetworkDependenciesEndpointDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeOutboundNetworkDependenciesEndpointDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeStatus {
#[serde(rename = "type")]
pub type_: IntegrationRuntimeType,
#[serde(rename = "dataFactoryName", default, skip_serializing_if = "Option::is_none")]
pub data_factory_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<IntegrationRuntimeState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntegrationRuntimeState {
Initial,
Stopped,
Started,
Starting,
Stopping,
NeedRegistration,
Online,
Limited,
Offline,
AccessDenied,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntimeStatus {
#[serde(flatten)]
pub integration_runtime_status: IntegrationRuntimeStatus,
#[serde(rename = "typeProperties")]
pub type_properties: ManagedIntegrationRuntimeStatusTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntimeStatusTypeProperties {
#[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")]
pub create_time: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub nodes: Vec<ManagedIntegrationRuntimeNode>,
#[serde(rename = "otherErrors", default, skip_serializing_if = "Vec::is_empty")]
pub other_errors: Vec<ManagedIntegrationRuntimeError>,
#[serde(rename = "lastOperation", default, skip_serializing_if = "Option::is_none")]
pub last_operation: Option<ManagedIntegrationRuntimeOperationResult>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntimeOperationResult {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub result: Option<String>,
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<String>,
#[serde(rename = "activityId", default, skip_serializing_if = "Option::is_none")]
pub activity_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntimeNode {
#[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")]
pub node_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<managed_integration_runtime_node::Status>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<ManagedIntegrationRuntimeError>,
}
pub mod managed_integration_runtime_node {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Starting,
Available,
Recycling,
Unavailable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedIntegrationRuntimeError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SelfHostedIntegrationRuntimeStatus {
#[serde(flatten)]
pub integration_runtime_status: IntegrationRuntimeStatus,
#[serde(rename = "typeProperties")]
pub type_properties: SelfHostedIntegrationRuntimeStatusTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SelfHostedIntegrationRuntimeStatusTypeProperties {
#[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")]
pub create_time: Option<String>,
#[serde(rename = "taskQueueId", default, skip_serializing_if = "Option::is_none")]
pub task_queue_id: Option<String>,
#[serde(rename = "internalChannelEncryption", default, skip_serializing_if = "Option::is_none")]
pub internal_channel_encryption: Option<self_hosted_integration_runtime_status_type_properties::InternalChannelEncryption>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub nodes: Vec<SelfHostedIntegrationRuntimeNode>,
#[serde(rename = "scheduledUpdateDate", default, skip_serializing_if = "Option::is_none")]
pub scheduled_update_date: Option<String>,
#[serde(rename = "updateDelayOffset", default, skip_serializing_if = "Option::is_none")]
pub update_delay_offset: Option<String>,
#[serde(rename = "localTimeZoneOffset", default, skip_serializing_if = "Option::is_none")]
pub local_time_zone_offset: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capabilities: Option<serde_json::Value>,
#[serde(rename = "serviceUrls", default, skip_serializing_if = "Vec::is_empty")]
pub service_urls: Vec<String>,
#[serde(rename = "autoUpdate", default, skip_serializing_if = "Option::is_none")]
pub auto_update: Option<IntegrationRuntimeAutoUpdate>,
#[serde(rename = "versionStatus", default, skip_serializing_if = "Option::is_none")]
pub version_status: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub links: Vec<LinkedIntegrationRuntime>,
#[serde(rename = "pushedVersion", default, skip_serializing_if = "Option::is_none")]
pub pushed_version: Option<String>,
#[serde(rename = "latestVersion", default, skip_serializing_if = "Option::is_none")]
pub latest_version: Option<String>,
#[serde(rename = "autoUpdateETA", default, skip_serializing_if = "Option::is_none")]
pub auto_update_eta: Option<String>,
}
pub mod self_hosted_integration_runtime_status_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum InternalChannelEncryption {
NotSet,
SslEncrypted,
NotEncrypted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntegrationRuntimeAutoUpdate {
On,
Off,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedIntegrationRuntime {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "dataFactoryName", default, skip_serializing_if = "Option::is_none")]
pub data_factory_name: Option<String>,
#[serde(rename = "dataFactoryLocation", default, skip_serializing_if = "Option::is_none")]
pub data_factory_location: Option<String>,
#[serde(rename = "createTime", default, skip_serializing_if = "Option::is_none")]
pub create_time: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SelfHostedIntegrationRuntimeNode {
#[serde(rename = "nodeName", default, skip_serializing_if = "Option::is_none")]
pub node_name: Option<String>,
#[serde(rename = "machineName", default, skip_serializing_if = "Option::is_none")]
pub machine_name: Option<String>,
#[serde(rename = "hostServiceUri", default, skip_serializing_if = "Option::is_none")]
pub host_service_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<self_hosted_integration_runtime_node::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capabilities: Option<serde_json::Value>,
#[serde(rename = "versionStatus", default, skip_serializing_if = "Option::is_none")]
pub version_status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "registerTime", default, skip_serializing_if = "Option::is_none")]
pub register_time: Option<String>,
#[serde(rename = "lastConnectTime", default, skip_serializing_if = "Option::is_none")]
pub last_connect_time: Option<String>,
#[serde(rename = "expiryTime", default, skip_serializing_if = "Option::is_none")]
pub expiry_time: Option<String>,
#[serde(rename = "lastStartTime", default, skip_serializing_if = "Option::is_none")]
pub last_start_time: Option<String>,
#[serde(rename = "lastStopTime", default, skip_serializing_if = "Option::is_none")]
pub last_stop_time: Option<String>,
#[serde(rename = "lastUpdateResult", default, skip_serializing_if = "Option::is_none")]
pub last_update_result: Option<self_hosted_integration_runtime_node::LastUpdateResult>,
#[serde(rename = "lastStartUpdateTime", default, skip_serializing_if = "Option::is_none")]
pub last_start_update_time: Option<String>,
#[serde(rename = "lastEndUpdateTime", default, skip_serializing_if = "Option::is_none")]
pub last_end_update_time: Option<String>,
#[serde(rename = "isActiveDispatcher", default, skip_serializing_if = "Option::is_none")]
pub is_active_dispatcher: Option<bool>,
#[serde(rename = "concurrentJobsLimit", default, skip_serializing_if = "Option::is_none")]
pub concurrent_jobs_limit: Option<i64>,
#[serde(rename = "maxConcurrentJobs", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_jobs: Option<i64>,
}
pub mod self_hosted_integration_runtime_node {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
NeedRegistration,
Online,
Limited,
Offline,
Upgrading,
Initializing,
InitializeFailed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastUpdateResult {
None,
Succeed,
Fail,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeConnectionInfo {
#[serde(rename = "serviceToken", default, skip_serializing_if = "Option::is_none")]
pub service_token: Option<String>,
#[serde(rename = "identityCertThumbprint", default, skip_serializing_if = "Option::is_none")]
pub identity_cert_thumbprint: Option<String>,
#[serde(rename = "hostServiceUri", default, skip_serializing_if = "Option::is_none")]
pub host_service_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "publicKey", default, skip_serializing_if = "Option::is_none")]
pub public_key: Option<String>,
#[serde(rename = "isIdentityCertExprired", default, skip_serializing_if = "Option::is_none")]
pub is_identity_cert_exprired: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeRegenerateKeyParameters {
#[serde(rename = "keyName", default, skip_serializing_if = "Option::is_none")]
pub key_name: Option<integration_runtime_regenerate_key_parameters::KeyName>,
}
pub mod integration_runtime_regenerate_key_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum KeyName {
#[serde(rename = "authKey1")]
AuthKey1,
#[serde(rename = "authKey2")]
AuthKey2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeAuthKeys {
#[serde(rename = "authKey1", default, skip_serializing_if = "Option::is_none")]
pub auth_key1: Option<String>,
#[serde(rename = "authKey2", default, skip_serializing_if = "Option::is_none")]
pub auth_key2: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeMonitoringData {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub nodes: Vec<IntegrationRuntimeNodeMonitoringData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeNodeMonitoringData {
#[serde(rename = "nodeName", default, skip_serializing_if = "Option::is_none")]
pub node_name: Option<String>,
#[serde(rename = "availableMemoryInMB", default, skip_serializing_if = "Option::is_none")]
pub available_memory_in_mb: Option<i64>,
#[serde(rename = "cpuUtilization", default, skip_serializing_if = "Option::is_none")]
pub cpu_utilization: Option<i64>,
#[serde(rename = "concurrentJobsLimit", default, skip_serializing_if = "Option::is_none")]
pub concurrent_jobs_limit: Option<i64>,
#[serde(rename = "concurrentJobsRunning", default, skip_serializing_if = "Option::is_none")]
pub concurrent_jobs_running: Option<i64>,
#[serde(rename = "maxConcurrentJobs", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_jobs: Option<i64>,
#[serde(rename = "sentBytes", default, skip_serializing_if = "Option::is_none")]
pub sent_bytes: Option<f64>,
#[serde(rename = "receivedBytes", default, skip_serializing_if = "Option::is_none")]
pub received_bytes: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IntegrationRuntimeNodeIpAddress {
#[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisObjectMetadataListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SsisObjectMetadata>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisObjectMetadata {
#[serde(rename = "type")]
pub type_: SsisObjectMetadataType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SsisObjectMetadataType {
Folder,
Project,
Package,
Environment,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisFolder {
#[serde(flatten)]
pub ssis_object_metadata: SsisObjectMetadata,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisProject {
#[serde(flatten)]
pub ssis_object_metadata: SsisObjectMetadata,
#[serde(rename = "folderId", default, skip_serializing_if = "Option::is_none")]
pub folder_id: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<i64>,
#[serde(rename = "environmentRefs", default, skip_serializing_if = "Vec::is_empty")]
pub environment_refs: Vec<SsisEnvironmentReference>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<SsisParameter>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisPackage {
#[serde(flatten)]
pub ssis_object_metadata: SsisObjectMetadata,
#[serde(rename = "folderId", default, skip_serializing_if = "Option::is_none")]
pub folder_id: Option<i64>,
#[serde(rename = "projectVersion", default, skip_serializing_if = "Option::is_none")]
pub project_version: Option<i64>,
#[serde(rename = "projectId", default, skip_serializing_if = "Option::is_none")]
pub project_id: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<SsisParameter>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisEnvironment {
#[serde(flatten)]
pub ssis_object_metadata: SsisObjectMetadata,
#[serde(rename = "folderId", default, skip_serializing_if = "Option::is_none")]
pub folder_id: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub variables: Vec<SsisVariable>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisParameter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "dataType", default, skip_serializing_if = "Option::is_none")]
pub data_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sensitive: Option<bool>,
#[serde(rename = "designDefaultValue", default, skip_serializing_if = "Option::is_none")]
pub design_default_value: Option<String>,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<String>,
#[serde(rename = "sensitiveDefaultValue", default, skip_serializing_if = "Option::is_none")]
pub sensitive_default_value: Option<String>,
#[serde(rename = "valueType", default, skip_serializing_if = "Option::is_none")]
pub value_type: Option<String>,
#[serde(rename = "valueSet", default, skip_serializing_if = "Option::is_none")]
pub value_set: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub variable: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisVariable {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "dataType", default, skip_serializing_if = "Option::is_none")]
pub data_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sensitive: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "sensitiveValue", default, skip_serializing_if = "Option::is_none")]
pub sensitive_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisEnvironmentReference {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<i64>,
#[serde(rename = "environmentFolderName", default, skip_serializing_if = "Option::is_none")]
pub environment_folder_name: Option<String>,
#[serde(rename = "environmentName", default, skip_serializing_if = "Option::is_none")]
pub environment_name: Option<String>,
#[serde(rename = "referenceType", default, skip_serializing_if = "Option::is_none")]
pub reference_type: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Pipeline {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub activities: Vec<Activity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterDefinitionSpecification>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub variables: Option<VariableDefinitionSpecification>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub concurrency: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub annotations: Vec<serde_json::Value>,
#[serde(rename = "runDimensions", default, skip_serializing_if = "Option::is_none")]
pub run_dimensions: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub folder: Option<pipeline::Folder>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policy: Option<PipelinePolicy>,
}
pub mod pipeline {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Folder {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelinePolicy {
#[serde(rename = "elapsedTimeMetric", default, skip_serializing_if = "Option::is_none")]
pub elapsed_time_metric: Option<PipelineElapsedTimeMetricPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineElapsedTimeMetricPolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub duration: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Activity {
pub name: String,
#[serde(rename = "type")]
pub type_: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "dependsOn", default, skip_serializing_if = "Vec::is_empty")]
pub depends_on: Vec<ActivityDependency>,
#[serde(rename = "userProperties", default, skip_serializing_if = "Vec::is_empty")]
pub user_properties: Vec<UserProperty>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserProperty {
pub name: String,
pub value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActivityDependency {
pub activity: String,
#[serde(rename = "dependencyConditions")]
pub dependency_conditions: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ControlActivity {
#[serde(flatten)]
pub activity: Activity,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecutionActivity {
#[serde(flatten)]
pub activity: Activity,
#[serde(rename = "linkedServiceName", default, skip_serializing_if = "Option::is_none")]
pub linked_service_name: Option<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policy: Option<ActivityPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActivityPolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub retry: Option<serde_json::Value>,
#[serde(rename = "retryIntervalInSeconds", default, skip_serializing_if = "Option::is_none")]
pub retry_interval_in_seconds: Option<i64>,
#[serde(rename = "secureInput", default, skip_serializing_if = "Option::is_none")]
pub secure_input: Option<bool>,
#[serde(rename = "secureOutput", default, skip_serializing_if = "Option::is_none")]
pub secure_output: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StoreReadSettings {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "maxConcurrentConnections", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_connections: Option<serde_json::Value>,
#[serde(rename = "disableMetricsCollection", default, skip_serializing_if = "Option::is_none")]
pub disable_metrics_collection: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobStorageReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "listAfter", default, skip_serializing_if = "Option::is_none")]
pub list_after: Option<serde_json::Value>,
#[serde(rename = "listBefore", default, skip_serializing_if = "Option::is_none")]
pub list_before: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3ReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServerReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
#[serde(rename = "fileFilter", default, skip_serializing_if = "Option::is_none")]
pub file_filter: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFileStorageReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SftpWriteSettings {
#[serde(flatten)]
pub store_write_settings: StoreWriteSettings,
#[serde(rename = "operationTimeout", default, skip_serializing_if = "Option::is_none")]
pub operation_timeout: Option<serde_json::Value>,
#[serde(rename = "useTempFileRename", default, skip_serializing_if = "Option::is_none")]
pub use_temp_file_rename: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3CompatibleReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleCloudStorageReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleCloudStorageReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FtpReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "useBinaryTransfer", default, skip_serializing_if = "Option::is_none")]
pub use_binary_transfer: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SftpReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(rename = "requestMethod", default, skip_serializing_if = "Option::is_none")]
pub request_method: Option<serde_json::Value>,
#[serde(rename = "requestBody", default, skip_serializing_if = "Option::is_none")]
pub request_body: Option<serde_json::Value>,
#[serde(rename = "additionalHeaders", default, skip_serializing_if = "Option::is_none")]
pub additional_headers: Option<serde_json::Value>,
#[serde(rename = "requestTimeout", default, skip_serializing_if = "Option::is_none")]
pub request_timeout: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdfsReadSettings {
#[serde(flatten)]
pub store_read_settings: StoreReadSettings,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "wildcardFolderPath", default, skip_serializing_if = "Option::is_none")]
pub wildcard_folder_path: Option<serde_json::Value>,
#[serde(rename = "wildcardFileName", default, skip_serializing_if = "Option::is_none")]
pub wildcard_file_name: Option<serde_json::Value>,
#[serde(rename = "fileListPath", default, skip_serializing_if = "Option::is_none")]
pub file_list_path: Option<serde_json::Value>,
#[serde(rename = "enablePartitionDiscovery", default, skip_serializing_if = "Option::is_none")]
pub enable_partition_discovery: Option<bool>,
#[serde(rename = "partitionRootPath", default, skip_serializing_if = "Option::is_none")]
pub partition_root_path: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
#[serde(rename = "distcpSettings", default, skip_serializing_if = "Option::is_none")]
pub distcp_settings: Option<DistcpSettings>,
#[serde(rename = "deleteFilesAfterCompletion", default, skip_serializing_if = "Option::is_none")]
pub delete_files_after_completion: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StoreWriteSettings {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "maxConcurrentConnections", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_connections: Option<serde_json::Value>,
#[serde(rename = "disableMetricsCollection", default, skip_serializing_if = "Option::is_none")]
pub disable_metrics_collection: Option<serde_json::Value>,
#[serde(rename = "copyBehavior", default, skip_serializing_if = "Option::is_none")]
pub copy_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobStorageWriteSettings {
#[serde(flatten)]
pub store_write_settings: StoreWriteSettings,
#[serde(rename = "blockSizeInMB", default, skip_serializing_if = "Option::is_none")]
pub block_size_in_mb: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsWriteSettings {
#[serde(flatten)]
pub store_write_settings: StoreWriteSettings,
#[serde(rename = "blockSizeInMB", default, skip_serializing_if = "Option::is_none")]
pub block_size_in_mb: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreWriteSettings {
#[serde(flatten)]
pub store_write_settings: StoreWriteSettings,
#[serde(rename = "expiryDateTime", default, skip_serializing_if = "Option::is_none")]
pub expiry_date_time: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServerWriteSettings {
#[serde(flatten)]
pub store_write_settings: StoreWriteSettings,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFileStorageWriteSettings {
#[serde(flatten)]
pub store_write_settings: StoreWriteSettings,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FormatReadSettings {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CompressionReadSettings {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ZipDeflateReadSettings {
#[serde(flatten)]
pub compression_read_settings: CompressionReadSettings,
#[serde(rename = "preserveZipFileNameAsFolder", default, skip_serializing_if = "Option::is_none")]
pub preserve_zip_file_name_as_folder: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TarReadSettings {
#[serde(flatten)]
pub compression_read_settings: CompressionReadSettings,
#[serde(rename = "preserveCompressionFileNameAsFolder", default, skip_serializing_if = "Option::is_none")]
pub preserve_compression_file_name_as_folder: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TarGZipReadSettings {
#[serde(flatten)]
pub compression_read_settings: CompressionReadSettings,
#[serde(rename = "preserveCompressionFileNameAsFolder", default, skip_serializing_if = "Option::is_none")]
pub preserve_compression_file_name_as_folder: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextReadSettings {
#[serde(flatten)]
pub format_read_settings: FormatReadSettings,
#[serde(rename = "skipLineCount", default, skip_serializing_if = "Option::is_none")]
pub skip_line_count: Option<serde_json::Value>,
#[serde(rename = "compressionProperties", default, skip_serializing_if = "Option::is_none")]
pub compression_properties: Option<CompressionReadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonReadSettings {
#[serde(flatten)]
pub format_read_settings: FormatReadSettings,
#[serde(rename = "compressionProperties", default, skip_serializing_if = "Option::is_none")]
pub compression_properties: Option<CompressionReadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XmlReadSettings {
#[serde(flatten)]
pub format_read_settings: FormatReadSettings,
#[serde(rename = "compressionProperties", default, skip_serializing_if = "Option::is_none")]
pub compression_properties: Option<CompressionReadSettings>,
#[serde(rename = "validationMode", default, skip_serializing_if = "Option::is_none")]
pub validation_mode: Option<serde_json::Value>,
#[serde(rename = "detectDataType", default, skip_serializing_if = "Option::is_none")]
pub detect_data_type: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespaces: Option<serde_json::Value>,
#[serde(rename = "namespacePrefixes", default, skip_serializing_if = "Option::is_none")]
pub namespace_prefixes: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BinaryReadSettings {
#[serde(flatten)]
pub format_read_settings: FormatReadSettings,
#[serde(rename = "compressionProperties", default, skip_serializing_if = "Option::is_none")]
pub compression_properties: Option<CompressionReadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FormatWriteSettings {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvroWriteSettings {
#[serde(flatten)]
pub format_write_settings: FormatWriteSettings,
#[serde(rename = "recordName", default, skip_serializing_if = "Option::is_none")]
pub record_name: Option<String>,
#[serde(rename = "recordNamespace", default, skip_serializing_if = "Option::is_none")]
pub record_namespace: Option<String>,
#[serde(rename = "maxRowsPerFile", default, skip_serializing_if = "Option::is_none")]
pub max_rows_per_file: Option<serde_json::Value>,
#[serde(rename = "fileNamePrefix", default, skip_serializing_if = "Option::is_none")]
pub file_name_prefix: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrcWriteSettings {
#[serde(flatten)]
pub format_write_settings: FormatWriteSettings,
#[serde(rename = "maxRowsPerFile", default, skip_serializing_if = "Option::is_none")]
pub max_rows_per_file: Option<serde_json::Value>,
#[serde(rename = "fileNamePrefix", default, skip_serializing_if = "Option::is_none")]
pub file_name_prefix: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetWriteSettings {
#[serde(flatten)]
pub format_write_settings: FormatWriteSettings,
#[serde(rename = "maxRowsPerFile", default, skip_serializing_if = "Option::is_none")]
pub max_rows_per_file: Option<serde_json::Value>,
#[serde(rename = "fileNamePrefix", default, skip_serializing_if = "Option::is_none")]
pub file_name_prefix: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextWriteSettings {
#[serde(flatten)]
pub format_write_settings: FormatWriteSettings,
#[serde(rename = "quoteAllText", default, skip_serializing_if = "Option::is_none")]
pub quote_all_text: Option<serde_json::Value>,
#[serde(rename = "fileExtension")]
pub file_extension: serde_json::Value,
#[serde(rename = "maxRowsPerFile", default, skip_serializing_if = "Option::is_none")]
pub max_rows_per_file: Option<serde_json::Value>,
#[serde(rename = "fileNamePrefix", default, skip_serializing_if = "Option::is_none")]
pub file_name_prefix: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonWriteSettings {
#[serde(flatten)]
pub format_write_settings: FormatWriteSettings,
#[serde(rename = "filePattern", default, skip_serializing_if = "Option::is_none")]
pub file_pattern: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum JsonWriteFilePattern {
#[serde(rename = "setOfObjects")]
SetOfObjects,
#[serde(rename = "arrayOfObjects")]
ArrayOfObjects,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvroSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExcelSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<DelimitedTextReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<JsonReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XmlSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<XmlReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrcSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreWriteSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<DelimitedTextWriteSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreWriteSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<JsonWriteSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrcSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreWriteSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<OrcWriteSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CopyActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: CopyActivityTypeProperties,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub inputs: Vec<DatasetReference>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub outputs: Vec<DatasetReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CopyActivityTypeProperties {
pub source: CopySource,
pub sink: CopySink,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub translator: Option<serde_json::Value>,
#[serde(rename = "enableStaging", default, skip_serializing_if = "Option::is_none")]
pub enable_staging: Option<serde_json::Value>,
#[serde(rename = "stagingSettings", default, skip_serializing_if = "Option::is_none")]
pub staging_settings: Option<StagingSettings>,
#[serde(rename = "parallelCopies", default, skip_serializing_if = "Option::is_none")]
pub parallel_copies: Option<serde_json::Value>,
#[serde(rename = "dataIntegrationUnits", default, skip_serializing_if = "Option::is_none")]
pub data_integration_units: Option<serde_json::Value>,
#[serde(rename = "enableSkipIncompatibleRow", default, skip_serializing_if = "Option::is_none")]
pub enable_skip_incompatible_row: Option<serde_json::Value>,
#[serde(rename = "redirectIncompatibleRowSettings", default, skip_serializing_if = "Option::is_none")]
pub redirect_incompatible_row_settings: Option<RedirectIncompatibleRowSettings>,
#[serde(rename = "logStorageSettings", default, skip_serializing_if = "Option::is_none")]
pub log_storage_settings: Option<LogStorageSettings>,
#[serde(rename = "logSettings", default, skip_serializing_if = "Option::is_none")]
pub log_settings: Option<LogSettings>,
#[serde(rename = "preserveRules", default, skip_serializing_if = "Vec::is_empty")]
pub preserve_rules: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub preserve: Vec<serde_json::Value>,
#[serde(rename = "validateDataConsistency", default, skip_serializing_if = "Option::is_none")]
pub validate_data_consistency: Option<serde_json::Value>,
#[serde(rename = "skipErrorFile", default, skip_serializing_if = "Option::is_none")]
pub skip_error_file: Option<SkipErrorFile>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CopySource {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "sourceRetryCount", default, skip_serializing_if = "Option::is_none")]
pub source_retry_count: Option<serde_json::Value>,
#[serde(rename = "sourceRetryWait", default, skip_serializing_if = "Option::is_none")]
pub source_retry_wait: Option<serde_json::Value>,
#[serde(rename = "maxConcurrentConnections", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_connections: Option<serde_json::Value>,
#[serde(rename = "disableMetricsCollection", default, skip_serializing_if = "Option::is_none")]
pub disable_metrics_collection: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BinarySource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<BinaryReadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TabularSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureTableSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "azureTableSourceQuery", default, skip_serializing_if = "Option::is_none")]
pub azure_table_source_query: Option<serde_json::Value>,
#[serde(rename = "azureTableSourceIgnoreTableNotFound", default, skip_serializing_if = "Option::is_none")]
pub azure_table_source_ignore_table_not_found: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "treatEmptyAsNull", default, skip_serializing_if = "Option::is_none")]
pub treat_empty_as_null: Option<serde_json::Value>,
#[serde(rename = "skipHeaderLineCount", default, skip_serializing_if = "Option::is_none")]
pub skip_header_line_count: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DocumentDbCollectionSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "nestingSeparator", default, skip_serializing_if = "Option::is_none")]
pub nesting_separator: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbSqlApiSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "pageSize", default, skip_serializing_if = "Option::is_none")]
pub page_size: Option<serde_json::Value>,
#[serde(rename = "preferredRegions", default, skip_serializing_if = "Option::is_none")]
pub preferred_regions: Option<serde_json::Value>,
#[serde(rename = "detectDatetime", default, skip_serializing_if = "Option::is_none")]
pub detect_datetime: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsCrmSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonDataServiceForAppsSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RelationalSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InformixSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftAccessSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Db2Source {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OdbcSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MySqlSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostgreSqlSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SybaseSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapBwSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ODataSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "readBehavior", default, skip_serializing_if = "Option::is_none")]
pub read_behavior: Option<salesforce_source::ReadBehavior>,
}
pub mod salesforce_source {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReadBehavior {
Query,
QueryAll,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceServiceCloudSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "readBehavior", default, skip_serializing_if = "Option::is_none")]
pub read_behavior: Option<salesforce_service_cloud_source::ReadBehavior>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
pub mod salesforce_service_cloud_source {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReadBehavior {
Query,
QueryAll,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapCloudForCustomerSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapEccSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapHanaSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "packetSize", default, skip_serializing_if = "Option::is_none")]
pub packet_size: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SapHanaPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapHanaPartitionSettings {
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapOpenHubSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "excludeLastRequest", default, skip_serializing_if = "Option::is_none")]
pub exclude_last_request: Option<serde_json::Value>,
#[serde(rename = "baseRequestId", default, skip_serializing_if = "Option::is_none")]
pub base_request_id: Option<serde_json::Value>,
#[serde(rename = "customRfcReadTableFunctionModule", default, skip_serializing_if = "Option::is_none")]
pub custom_rfc_read_table_function_module: Option<serde_json::Value>,
#[serde(rename = "sapDataColumnDelimiter", default, skip_serializing_if = "Option::is_none")]
pub sap_data_column_delimiter: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapTableSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "rowCount", default, skip_serializing_if = "Option::is_none")]
pub row_count: Option<serde_json::Value>,
#[serde(rename = "rowSkips", default, skip_serializing_if = "Option::is_none")]
pub row_skips: Option<serde_json::Value>,
#[serde(rename = "rfcTableFields", default, skip_serializing_if = "Option::is_none")]
pub rfc_table_fields: Option<serde_json::Value>,
#[serde(rename = "rfcTableOptions", default, skip_serializing_if = "Option::is_none")]
pub rfc_table_options: Option<serde_json::Value>,
#[serde(rename = "batchSize", default, skip_serializing_if = "Option::is_none")]
pub batch_size: Option<serde_json::Value>,
#[serde(rename = "customRfcReadTableFunctionModule", default, skip_serializing_if = "Option::is_none")]
pub custom_rfc_read_table_function_module: Option<serde_json::Value>,
#[serde(rename = "sapDataColumnDelimiter", default, skip_serializing_if = "Option::is_none")]
pub sap_data_column_delimiter: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SapTablePartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapTablePartitionSettings {
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
#[serde(rename = "partitionUpperBound", default, skip_serializing_if = "Option::is_none")]
pub partition_upper_bound: Option<serde_json::Value>,
#[serde(rename = "partitionLowerBound", default, skip_serializing_if = "Option::is_none")]
pub partition_lower_bound: Option<serde_json::Value>,
#[serde(rename = "maxPartitionsNumber", default, skip_serializing_if = "Option::is_none")]
pub max_partitions_number: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "requestMethod", default, skip_serializing_if = "Option::is_none")]
pub request_method: Option<serde_json::Value>,
#[serde(rename = "additionalHeaders", default, skip_serializing_if = "Option::is_none")]
pub additional_headers: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
#[serde(rename = "requestInterval", default, skip_serializing_if = "Option::is_none")]
pub request_interval: Option<serde_json::Value>,
#[serde(rename = "httpCompressionType", default, skip_serializing_if = "Option::is_none")]
pub http_compression_type: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "requestMethod", default, skip_serializing_if = "Option::is_none")]
pub request_method: Option<serde_json::Value>,
#[serde(rename = "requestBody", default, skip_serializing_if = "Option::is_none")]
pub request_body: Option<serde_json::Value>,
#[serde(rename = "additionalHeaders", default, skip_serializing_if = "Option::is_none")]
pub additional_headers: Option<serde_json::Value>,
#[serde(rename = "paginationRules", default, skip_serializing_if = "Option::is_none")]
pub pagination_rules: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
#[serde(rename = "requestInterval", default, skip_serializing_if = "Option::is_none")]
pub request_interval: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "sqlReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_query: Option<serde_json::Value>,
#[serde(rename = "sqlReaderStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "isolationLevel", default, skip_serializing_if = "Option::is_none")]
pub isolation_level: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SqlPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "sqlReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_query: Option<serde_json::Value>,
#[serde(rename = "sqlReaderStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "produceAdditionalTypes", default, skip_serializing_if = "Option::is_none")]
pub produce_additional_types: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SqlPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForSqlServerSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "sqlReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_query: Option<serde_json::Value>,
#[serde(rename = "sqlReaderStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "produceAdditionalTypes", default, skip_serializing_if = "Option::is_none")]
pub produce_additional_types: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SqlPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "sqlReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_query: Option<serde_json::Value>,
#[serde(rename = "sqlReaderStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "produceAdditionalTypes", default, skip_serializing_if = "Option::is_none")]
pub produce_additional_types: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SqlPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlMiSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "sqlReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_query: Option<serde_json::Value>,
#[serde(rename = "sqlReaderStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "produceAdditionalTypes", default, skip_serializing_if = "Option::is_none")]
pub produce_additional_types: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SqlPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(rename = "sqlReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_query: Option<serde_json::Value>,
#[serde(rename = "sqlReaderStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_reader_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<SqlPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlPartitionSettings {
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
#[serde(rename = "partitionUpperBound", default, skip_serializing_if = "Option::is_none")]
pub partition_upper_bound: Option<serde_json::Value>,
#[serde(rename = "partitionLowerBound", default, skip_serializing_if = "Option::is_none")]
pub partition_lower_bound: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileSystemSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdfsSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "distcpSettings", default, skip_serializing_if = "Option::is_none")]
pub distcp_settings: Option<DistcpSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DistcpSettings {
#[serde(rename = "resourceManagerEndpoint")]
pub resource_manager_endpoint: serde_json::Value,
#[serde(rename = "tempScriptPath")]
pub temp_script_path: serde_json::Value,
#[serde(rename = "distcpOptions", default, skip_serializing_if = "Option::is_none")]
pub distcp_options: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMySqlSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerSource {
#[serde(flatten)]
pub copy_source: CopySource,
pub query: serde_json::Value,
#[serde(rename = "noTruncation", default, skip_serializing_if = "Option::is_none")]
pub no_truncation: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "oracleReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub oracle_reader_query: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<OraclePartitionSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OraclePartitionSettings {
#[serde(rename = "partitionNames", default, skip_serializing_if = "Option::is_none")]
pub partition_names: Option<serde_json::Value>,
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
#[serde(rename = "partitionUpperBound", default, skip_serializing_if = "Option::is_none")]
pub partition_upper_bound: Option<serde_json::Value>,
#[serde(rename = "partitionLowerBound", default, skip_serializing_if = "Option::is_none")]
pub partition_lower_bound: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForOracleSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "oracleReaderQuery", default, skip_serializing_if = "Option::is_none")]
pub oracle_reader_query: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<AmazonRdsForOraclePartitionSettings>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AmazonRdsForOraclePartitionOption {
None,
PhysicalPartitionsOfTable,
DynamicRange,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForOraclePartitionSettings {
#[serde(rename = "partitionNames", default, skip_serializing_if = "Option::is_none")]
pub partition_names: Option<serde_json::Value>,
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
#[serde(rename = "partitionUpperBound", default, skip_serializing_if = "Option::is_none")]
pub partition_upper_bound: Option<serde_json::Value>,
#[serde(rename = "partitionLowerBound", default, skip_serializing_if = "Option::is_none")]
pub partition_lower_bound: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TeradataSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<TeradataPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TeradataPartitionSettings {
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
#[serde(rename = "partitionUpperBound", default, skip_serializing_if = "Option::is_none")]
pub partition_upper_bound: Option<serde_json::Value>,
#[serde(rename = "partitionLowerBound", default, skip_serializing_if = "Option::is_none")]
pub partition_lower_bound: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CassandraSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "consistencyLevel", default, skip_serializing_if = "Option::is_none")]
pub consistency_level: Option<cassandra_source::ConsistencyLevel>,
}
pub mod cassandra_source {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConsistencyLevel {
#[serde(rename = "ALL")]
All,
#[serde(rename = "EACH_QUORUM")]
EachQuorum,
#[serde(rename = "QUORUM")]
Quorum,
#[serde(rename = "LOCAL_QUORUM")]
LocalQuorum,
#[serde(rename = "ONE")]
One,
#[serde(rename = "TWO")]
Two,
#[serde(rename = "THREE")]
Three,
#[serde(rename = "LOCAL_ONE")]
LocalOne,
#[serde(rename = "SERIAL")]
Serial,
#[serde(rename = "LOCAL_SERIAL")]
LocalSerial,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbAtlasSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<serde_json::Value>,
#[serde(rename = "cursorMethods", default, skip_serializing_if = "Option::is_none")]
pub cursor_methods: Option<MongoDbCursorMethodsProperties>,
#[serde(rename = "batchSize", default, skip_serializing_if = "Option::is_none")]
pub batch_size: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbV2Source {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<serde_json::Value>,
#[serde(rename = "cursorMethods", default, skip_serializing_if = "Option::is_none")]
pub cursor_methods: Option<MongoDbCursorMethodsProperties>,
#[serde(rename = "batchSize", default, skip_serializing_if = "Option::is_none")]
pub batch_size: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbMongoDbApiSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<serde_json::Value>,
#[serde(rename = "cursorMethods", default, skip_serializing_if = "Option::is_none")]
pub cursor_methods: Option<MongoDbCursorMethodsProperties>,
#[serde(rename = "batchSize", default, skip_serializing_if = "Option::is_none")]
pub batch_size: Option<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<serde_json::Value>,
#[serde(rename = "additionalColumns", default, skip_serializing_if = "Option::is_none")]
pub additional_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbCursorMethodsProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub project: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sort: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub skip: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Office365Source {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "allowedGroups", default, skip_serializing_if = "Option::is_none")]
pub allowed_groups: Option<serde_json::Value>,
#[serde(rename = "userScopeFilterUri", default, skip_serializing_if = "Option::is_none")]
pub user_scope_filter_uri: Option<serde_json::Value>,
#[serde(rename = "dateFilterColumn", default, skip_serializing_if = "Option::is_none")]
pub date_filter_column: Option<serde_json::Value>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<serde_json::Value>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<serde_json::Value>,
#[serde(rename = "outputColumns", default, skip_serializing_if = "Option::is_none")]
pub output_columns: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "treatEmptyAsNull", default, skip_serializing_if = "Option::is_none")]
pub treat_empty_as_null: Option<serde_json::Value>,
#[serde(rename = "skipHeaderLineCount", default, skip_serializing_if = "Option::is_none")]
pub skip_header_line_count: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonMwsSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzurePostgreSqlSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzurePostgreSqlSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMySqlSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConcurSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CouchbaseSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DrillSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EloquaSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleBigQuerySource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GreenplumSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HBaseSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HiveSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HubspotSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImpalaSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JiraSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MagentoSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MariaDbSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMariaDbSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MarketoSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaypalSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PhoenixSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrestoSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuickBooksSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceNowSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShopifySource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SparkSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SquareSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XeroSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ZohoSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetezzaSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "partitionOption", default, skip_serializing_if = "Option::is_none")]
pub partition_option: Option<serde_json::Value>,
#[serde(rename = "partitionSettings", default, skip_serializing_if = "Option::is_none")]
pub partition_settings: Option<NetezzaPartitionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetezzaPartitionSettings {
#[serde(rename = "partitionColumnName", default, skip_serializing_if = "Option::is_none")]
pub partition_column_name: Option<serde_json::Value>,
#[serde(rename = "partitionUpperBound", default, skip_serializing_if = "Option::is_none")]
pub partition_upper_bound: Option<serde_json::Value>,
#[serde(rename = "partitionLowerBound", default, skip_serializing_if = "Option::is_none")]
pub partition_lower_bound: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VerticaSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceMarketingCloudSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResponsysSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsAxSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleServiceCloudSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleAdWordsSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRedshiftSource {
#[serde(flatten)]
pub tabular_source: TabularSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "redshiftUnloadSettings", default, skip_serializing_if = "Option::is_none")]
pub redshift_unload_settings: Option<RedshiftUnloadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RedshiftUnloadSettings {
#[serde(rename = "s3LinkedServiceName")]
pub s3_linked_service_name: LinkedServiceReference,
#[serde(rename = "bucketName")]
pub bucket_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "exportSettings", default, skip_serializing_if = "Option::is_none")]
pub export_settings: Option<SnowflakeExportCopyCommand>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportSettings {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeExportCopyCommand {
#[serde(flatten)]
pub export_settings: ExportSettings,
#[serde(rename = "additionalCopyOptions", default, skip_serializing_if = "Option::is_none")]
pub additional_copy_options: Option<serde_json::Value>,
#[serde(rename = "additionalFormatOptions", default, skip_serializing_if = "Option::is_none")]
pub additional_format_options: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "exportSettings", default, skip_serializing_if = "Option::is_none")]
pub export_settings: Option<AzureDatabricksDeltaLakeExportCommand>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeExportCommand {
#[serde(flatten)]
pub export_settings: ExportSettings,
#[serde(rename = "dateFormat", default, skip_serializing_if = "Option::is_none")]
pub date_format: Option<serde_json::Value>,
#[serde(rename = "timestampFormat", default, skip_serializing_if = "Option::is_none")]
pub timestamp_format: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "importSettings", default, skip_serializing_if = "Option::is_none")]
pub import_settings: Option<AzureDatabricksDeltaLakeImportCommand>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeImportCommand {
#[serde(flatten)]
pub import_settings: ImportSettings,
#[serde(rename = "dateFormat", default, skip_serializing_if = "Option::is_none")]
pub date_format: Option<serde_json::Value>,
#[serde(rename = "timestampFormat", default, skip_serializing_if = "Option::is_none")]
pub timestamp_format: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StoredProcedureParameter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<StoredProcedureParameterType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StoredProcedureParameterType {
String,
Int,
Int64,
Decimal,
Guid,
Boolean,
Date,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CopySink {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "writeBatchSize", default, skip_serializing_if = "Option::is_none")]
pub write_batch_size: Option<serde_json::Value>,
#[serde(rename = "writeBatchTimeout", default, skip_serializing_if = "Option::is_none")]
pub write_batch_timeout: Option<serde_json::Value>,
#[serde(rename = "sinkRetryCount", default, skip_serializing_if = "Option::is_none")]
pub sink_retry_count: Option<serde_json::Value>,
#[serde(rename = "sinkRetryWait", default, skip_serializing_if = "Option::is_none")]
pub sink_retry_wait: Option<serde_json::Value>,
#[serde(rename = "maxConcurrentConnections", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_connections: Option<serde_json::Value>,
#[serde(rename = "disableMetricsCollection", default, skip_serializing_if = "Option::is_none")]
pub disable_metrics_collection: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapCloudForCustomerSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<sap_cloud_for_customer_sink::WriteBehavior>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
}
pub mod sap_cloud_for_customer_sink {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WriteBehavior {
Insert,
Update,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureQueueSink {
#[serde(flatten)]
pub copy_sink: CopySink,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CopyBehaviorType {
PreserveHierarchy,
FlattenHierarchy,
MergeFiles,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureTableSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "azureTableDefaultPartitionKeyValue", default, skip_serializing_if = "Option::is_none")]
pub azure_table_default_partition_key_value: Option<serde_json::Value>,
#[serde(rename = "azureTablePartitionKeyName", default, skip_serializing_if = "Option::is_none")]
pub azure_table_partition_key_name: Option<serde_json::Value>,
#[serde(rename = "azureTableRowKeyName", default, skip_serializing_if = "Option::is_none")]
pub azure_table_row_key_name: Option<serde_json::Value>,
#[serde(rename = "azureTableInsertType", default, skip_serializing_if = "Option::is_none")]
pub azure_table_insert_type: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvroSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreWriteSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<AvroWriteSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreWriteSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<ParquetWriteSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BinarySink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreWriteSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "blobWriterOverwriteFiles", default, skip_serializing_if = "Option::is_none")]
pub blob_writer_overwrite_files: Option<serde_json::Value>,
#[serde(rename = "blobWriterDateTimeFormat", default, skip_serializing_if = "Option::is_none")]
pub blob_writer_date_time_format: Option<serde_json::Value>,
#[serde(rename = "blobWriterAddHeader", default, skip_serializing_if = "Option::is_none")]
pub blob_writer_add_header: Option<serde_json::Value>,
#[serde(rename = "copyBehavior", default, skip_serializing_if = "Option::is_none")]
pub copy_behavior: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub metadata: Vec<MetadataItem>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileSystemSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "copyBehavior", default, skip_serializing_if = "Option::is_none")]
pub copy_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DocumentDbCollectionSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "nestingSeparator", default, skip_serializing_if = "Option::is_none")]
pub nesting_separator: Option<serde_json::Value>,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbSqlApiSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "sqlWriterStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "sqlWriterTableType", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_table_type: Option<serde_json::Value>,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "storedProcedureTableTypeParameterName", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_table_type_parameter_name: Option<serde_json::Value>,
#[serde(rename = "tableOption", default, skip_serializing_if = "Option::is_none")]
pub table_option: Option<serde_json::Value>,
#[serde(rename = "sqlWriterUseTableLock", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_use_table_lock: Option<serde_json::Value>,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
#[serde(rename = "upsertSettings", default, skip_serializing_if = "Option::is_none")]
pub upsert_settings: Option<SqlUpsertSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "sqlWriterStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "sqlWriterTableType", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_table_type: Option<serde_json::Value>,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "storedProcedureTableTypeParameterName", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_table_type_parameter_name: Option<serde_json::Value>,
#[serde(rename = "tableOption", default, skip_serializing_if = "Option::is_none")]
pub table_option: Option<serde_json::Value>,
#[serde(rename = "sqlWriterUseTableLock", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_use_table_lock: Option<serde_json::Value>,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
#[serde(rename = "upsertSettings", default, skip_serializing_if = "Option::is_none")]
pub upsert_settings: Option<SqlUpsertSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "sqlWriterStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "sqlWriterTableType", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_table_type: Option<serde_json::Value>,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "storedProcedureTableTypeParameterName", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_table_type_parameter_name: Option<serde_json::Value>,
#[serde(rename = "tableOption", default, skip_serializing_if = "Option::is_none")]
pub table_option: Option<serde_json::Value>,
#[serde(rename = "sqlWriterUseTableLock", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_use_table_lock: Option<serde_json::Value>,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
#[serde(rename = "upsertSettings", default, skip_serializing_if = "Option::is_none")]
pub upsert_settings: Option<SqlUpsertSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlMiSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "sqlWriterStoredProcedureName", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_stored_procedure_name: Option<serde_json::Value>,
#[serde(rename = "sqlWriterTableType", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_table_type: Option<serde_json::Value>,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
#[serde(rename = "storedProcedureTableTypeParameterName", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_table_type_parameter_name: Option<serde_json::Value>,
#[serde(rename = "tableOption", default, skip_serializing_if = "Option::is_none")]
pub table_option: Option<serde_json::Value>,
#[serde(rename = "sqlWriterUseTableLock", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_use_table_lock: Option<serde_json::Value>,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
#[serde(rename = "upsertSettings", default, skip_serializing_if = "Option::is_none")]
pub upsert_settings: Option<SqlUpsertSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "allowPolyBase", default, skip_serializing_if = "Option::is_none")]
pub allow_poly_base: Option<serde_json::Value>,
#[serde(rename = "polyBaseSettings", default, skip_serializing_if = "Option::is_none")]
pub poly_base_settings: Option<PolybaseSettings>,
#[serde(rename = "allowCopyCommand", default, skip_serializing_if = "Option::is_none")]
pub allow_copy_command: Option<serde_json::Value>,
#[serde(rename = "copyCommandSettings", default, skip_serializing_if = "Option::is_none")]
pub copy_command_settings: Option<DwCopyCommandSettings>,
#[serde(rename = "tableOption", default, skip_serializing_if = "Option::is_none")]
pub table_option: Option<serde_json::Value>,
#[serde(rename = "sqlWriterUseTableLock", default, skip_serializing_if = "Option::is_none")]
pub sql_writer_use_table_lock: Option<serde_json::Value>,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
#[serde(rename = "upsertSettings", default, skip_serializing_if = "Option::is_none")]
pub upsert_settings: Option<SqlDwUpsertSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolybaseSettings {
#[serde(rename = "rejectType", default, skip_serializing_if = "Option::is_none")]
pub reject_type: Option<PolybaseSettingsRejectType>,
#[serde(rename = "rejectValue", default, skip_serializing_if = "Option::is_none")]
pub reject_value: Option<serde_json::Value>,
#[serde(rename = "rejectSampleValue", default, skip_serializing_if = "Option::is_none")]
pub reject_sample_value: Option<serde_json::Value>,
#[serde(rename = "useTypeDefault", default, skip_serializing_if = "Option::is_none")]
pub use_type_default: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PolybaseSettingsRejectType {
#[serde(rename = "value")]
Value,
#[serde(rename = "percentage")]
Percentage,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DwCopyCommandSettings {
#[serde(rename = "defaultValues", default, skip_serializing_if = "Vec::is_empty")]
pub default_values: Vec<DwCopyCommandDefaultValue>,
#[serde(rename = "additionalOptions", default, skip_serializing_if = "Option::is_none")]
pub additional_options: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DwCopyCommandDefaultValue {
#[serde(rename = "columnName", default, skip_serializing_if = "Option::is_none")]
pub column_name: Option<serde_json::Value>,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlUpsertSettings {
#[serde(rename = "useTempDB", default, skip_serializing_if = "Option::is_none")]
pub use_temp_db: Option<serde_json::Value>,
#[serde(rename = "interimSchemaName", default, skip_serializing_if = "Option::is_none")]
pub interim_schema_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub keys: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlDwUpsertSettings {
#[serde(rename = "interimSchemaName", default, skip_serializing_if = "Option::is_none")]
pub interim_schema_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub keys: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SqlWriteBehaviorEnum {
Insert,
Upsert,
StoredProcedure,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SqlDwWriteBehaviorEnum {
Insert,
Upsert,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
#[serde(rename = "importSettings", default, skip_serializing_if = "Option::is_none")]
pub import_settings: Option<SnowflakeImportCopyCommand>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImportSettings {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeImportCopyCommand {
#[serde(flatten)]
pub import_settings: ImportSettings,
#[serde(rename = "additionalCopyOptions", default, skip_serializing_if = "Option::is_none")]
pub additional_copy_options: Option<serde_json::Value>,
#[serde(rename = "additionalFormatOptions", default, skip_serializing_if = "Option::is_none")]
pub additional_format_options: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogStorageSettings {
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<serde_json::Value>,
#[serde(rename = "logLevel", default, skip_serializing_if = "Option::is_none")]
pub log_level: Option<serde_json::Value>,
#[serde(rename = "enableReliableLogging", default, skip_serializing_if = "Option::is_none")]
pub enable_reliable_logging: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogSettings {
#[serde(rename = "enableCopyActivityLog", default, skip_serializing_if = "Option::is_none")]
pub enable_copy_activity_log: Option<serde_json::Value>,
#[serde(rename = "copyActivityLogSettings", default, skip_serializing_if = "Option::is_none")]
pub copy_activity_log_settings: Option<CopyActivityLogSettings>,
#[serde(rename = "logLocationSettings")]
pub log_location_settings: LogLocationSettings,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogLocationSettings {
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CopyActivityLogSettings {
#[serde(rename = "logLevel", default, skip_serializing_if = "Option::is_none")]
pub log_level: Option<serde_json::Value>,
#[serde(rename = "enableReliableLogging", default, skip_serializing_if = "Option::is_none")]
pub enable_reliable_logging: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StagingSettings {
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<serde_json::Value>,
#[serde(rename = "enableCompression", default, skip_serializing_if = "Option::is_none")]
pub enable_compression: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RedirectIncompatibleRowSettings {
#[serde(rename = "linkedServiceName")]
pub linked_service_name: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkipErrorFile {
#[serde(rename = "fileMissing", default, skip_serializing_if = "Option::is_none")]
pub file_missing: Option<serde_json::Value>,
#[serde(rename = "dataInconsistency", default, skip_serializing_if = "Option::is_none")]
pub data_inconsistency: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdditionalColumns {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "copyBehavior", default, skip_serializing_if = "Option::is_none")]
pub copy_behavior: Option<serde_json::Value>,
#[serde(rename = "enableAdlsSingleFileParallel", default, skip_serializing_if = "Option::is_none")]
pub enable_adls_single_file_parallel: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "copyBehavior", default, skip_serializing_if = "Option::is_none")]
pub copy_behavior: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub metadata: Vec<MetadataItem>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSearchIndexSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<azure_search_index_sink::WriteBehavior>,
}
pub mod azure_search_index_sink {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WriteBehavior {
Merge,
Upload,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OdbcSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InformixSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftAccessSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "preCopyScript", default, skip_serializing_if = "Option::is_none")]
pub pre_copy_script: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DynamicsSinkWriteBehavior {
Upsert,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior")]
pub write_behavior: DynamicsSinkWriteBehavior,
#[serde(rename = "ignoreNullValues", default, skip_serializing_if = "Option::is_none")]
pub ignore_null_values: Option<serde_json::Value>,
#[serde(rename = "alternateKeyName", default, skip_serializing_if = "Option::is_none")]
pub alternate_key_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsCrmSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior")]
pub write_behavior: DynamicsSinkWriteBehavior,
#[serde(rename = "ignoreNullValues", default, skip_serializing_if = "Option::is_none")]
pub ignore_null_values: Option<serde_json::Value>,
#[serde(rename = "alternateKeyName", default, skip_serializing_if = "Option::is_none")]
pub alternate_key_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonDataServiceForAppsSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior")]
pub write_behavior: DynamicsSinkWriteBehavior,
#[serde(rename = "ignoreNullValues", default, skip_serializing_if = "Option::is_none")]
pub ignore_null_values: Option<serde_json::Value>,
#[serde(rename = "alternateKeyName", default, skip_serializing_if = "Option::is_none")]
pub alternate_key_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "ingestionMappingName", default, skip_serializing_if = "Option::is_none")]
pub ingestion_mapping_name: Option<serde_json::Value>,
#[serde(rename = "ingestionMappingAsJson", default, skip_serializing_if = "Option::is_none")]
pub ingestion_mapping_as_json: Option<serde_json::Value>,
#[serde(rename = "flushImmediately", default, skip_serializing_if = "Option::is_none")]
pub flush_immediately: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<salesforce_sink::WriteBehavior>,
#[serde(rename = "externalIdFieldName", default, skip_serializing_if = "Option::is_none")]
pub external_id_field_name: Option<serde_json::Value>,
#[serde(rename = "ignoreNullValues", default, skip_serializing_if = "Option::is_none")]
pub ignore_null_values: Option<serde_json::Value>,
}
pub mod salesforce_sink {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WriteBehavior {
Insert,
Upsert,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceServiceCloudSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<salesforce_service_cloud_sink::WriteBehavior>,
#[serde(rename = "externalIdFieldName", default, skip_serializing_if = "Option::is_none")]
pub external_id_field_name: Option<serde_json::Value>,
#[serde(rename = "ignoreNullValues", default, skip_serializing_if = "Option::is_none")]
pub ignore_null_values: Option<serde_json::Value>,
}
pub mod salesforce_service_cloud_sink {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WriteBehavior {
Insert,
Upsert,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbAtlasSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbV2Sink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbMongoDbApiSink {
#[serde(flatten)]
pub copy_sink: CopySink,
#[serde(rename = "writeBehavior", default, skip_serializing_if = "Option::is_none")]
pub write_behavior: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CopyTranslator {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TabularTranslator {
#[serde(flatten)]
pub copy_translator: CopyTranslator,
#[serde(rename = "columnMappings", default, skip_serializing_if = "Option::is_none")]
pub column_mappings: Option<serde_json::Value>,
#[serde(rename = "schemaMapping", default, skip_serializing_if = "Option::is_none")]
pub schema_mapping: Option<serde_json::Value>,
#[serde(rename = "collectionReference", default, skip_serializing_if = "Option::is_none")]
pub collection_reference: Option<serde_json::Value>,
#[serde(rename = "mapComplexValuesToString", default, skip_serializing_if = "Option::is_none")]
pub map_complex_values_to_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub mappings: Option<serde_json::Value>,
#[serde(rename = "typeConversion", default, skip_serializing_if = "Option::is_none")]
pub type_conversion: Option<serde_json::Value>,
#[serde(rename = "typeConversionSettings", default, skip_serializing_if = "Option::is_none")]
pub type_conversion_settings: Option<TypeConversionSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeConversionSettings {
#[serde(rename = "allowDataTruncation", default, skip_serializing_if = "Option::is_none")]
pub allow_data_truncation: Option<serde_json::Value>,
#[serde(rename = "treatBooleanAsNumber", default, skip_serializing_if = "Option::is_none")]
pub treat_boolean_as_number: Option<serde_json::Value>,
#[serde(rename = "dateTimeFormat", default, skip_serializing_if = "Option::is_none")]
pub date_time_format: Option<serde_json::Value>,
#[serde(rename = "dateTimeOffsetFormat", default, skip_serializing_if = "Option::is_none")]
pub date_time_offset_format: Option<serde_json::Value>,
#[serde(rename = "timeSpanFormat", default, skip_serializing_if = "Option::is_none")]
pub time_span_format: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub culture: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightHiveActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightHiveActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightHiveActivityTypeProperties {
#[serde(rename = "storageLinkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub storage_linked_services: Vec<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub arguments: Vec<serde_json::Value>,
#[serde(rename = "getDebugInfo", default, skip_serializing_if = "Option::is_none")]
pub get_debug_info: Option<HdInsightActivityDebugInfoOption>,
#[serde(rename = "scriptPath", default, skip_serializing_if = "Option::is_none")]
pub script_path: Option<serde_json::Value>,
#[serde(rename = "scriptLinkedService", default, skip_serializing_if = "Option::is_none")]
pub script_linked_service: Option<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub defines: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub variables: Vec<serde_json::Value>,
#[serde(rename = "queryTimeout", default, skip_serializing_if = "Option::is_none")]
pub query_timeout: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HdInsightActivityDebugInfoOption {
None,
Always,
Failure,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightPigActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightPigActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightPigActivityTypeProperties {
#[serde(rename = "storageLinkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub storage_linked_services: Vec<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub arguments: Option<serde_json::Value>,
#[serde(rename = "getDebugInfo", default, skip_serializing_if = "Option::is_none")]
pub get_debug_info: Option<HdInsightActivityDebugInfoOption>,
#[serde(rename = "scriptPath", default, skip_serializing_if = "Option::is_none")]
pub script_path: Option<serde_json::Value>,
#[serde(rename = "scriptLinkedService", default, skip_serializing_if = "Option::is_none")]
pub script_linked_service: Option<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub defines: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightMapReduceActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightMapReduceActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightMapReduceActivityTypeProperties {
#[serde(rename = "storageLinkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub storage_linked_services: Vec<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub arguments: Vec<serde_json::Value>,
#[serde(rename = "getDebugInfo", default, skip_serializing_if = "Option::is_none")]
pub get_debug_info: Option<HdInsightActivityDebugInfoOption>,
#[serde(rename = "className")]
pub class_name: serde_json::Value,
#[serde(rename = "jarFilePath")]
pub jar_file_path: serde_json::Value,
#[serde(rename = "jarLinkedService", default, skip_serializing_if = "Option::is_none")]
pub jar_linked_service: Option<LinkedServiceReference>,
#[serde(rename = "jarLibs", default, skip_serializing_if = "Vec::is_empty")]
pub jar_libs: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub defines: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightStreamingActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightStreamingActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightStreamingActivityTypeProperties {
#[serde(rename = "storageLinkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub storage_linked_services: Vec<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub arguments: Vec<serde_json::Value>,
#[serde(rename = "getDebugInfo", default, skip_serializing_if = "Option::is_none")]
pub get_debug_info: Option<HdInsightActivityDebugInfoOption>,
pub mapper: serde_json::Value,
pub reducer: serde_json::Value,
pub input: serde_json::Value,
pub output: serde_json::Value,
#[serde(rename = "filePaths")]
pub file_paths: Vec<serde_json::Value>,
#[serde(rename = "fileLinkedService", default, skip_serializing_if = "Option::is_none")]
pub file_linked_service: Option<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub combiner: Option<serde_json::Value>,
#[serde(rename = "commandEnvironment", default, skip_serializing_if = "Vec::is_empty")]
pub command_environment: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub defines: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightSparkActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightSparkActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightSparkActivityTypeProperties {
#[serde(rename = "rootPath")]
pub root_path: serde_json::Value,
#[serde(rename = "entryFilePath")]
pub entry_file_path: serde_json::Value,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub arguments: Vec<serde_json::Value>,
#[serde(rename = "getDebugInfo", default, skip_serializing_if = "Option::is_none")]
pub get_debug_info: Option<HdInsightActivityDebugInfoOption>,
#[serde(rename = "sparkJobLinkedService", default, skip_serializing_if = "Option::is_none")]
pub spark_job_linked_service: Option<LinkedServiceReference>,
#[serde(rename = "className", default, skip_serializing_if = "Option::is_none")]
pub class_name: Option<String>,
#[serde(rename = "proxyUser", default, skip_serializing_if = "Option::is_none")]
pub proxy_user: Option<serde_json::Value>,
#[serde(rename = "sparkConfig", default, skip_serializing_if = "Option::is_none")]
pub spark_config: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecuteSsisPackageActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: ExecuteSsisPackageActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecuteSsisPackageActivityTypeProperties {
#[serde(rename = "packageLocation")]
pub package_location: SsisPackageLocation,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub runtime: Option<serde_json::Value>,
#[serde(rename = "loggingLevel", default, skip_serializing_if = "Option::is_none")]
pub logging_level: Option<serde_json::Value>,
#[serde(rename = "environmentPath", default, skip_serializing_if = "Option::is_none")]
pub environment_path: Option<serde_json::Value>,
#[serde(rename = "executionCredential", default, skip_serializing_if = "Option::is_none")]
pub execution_credential: Option<SsisExecutionCredential>,
#[serde(rename = "connectVia")]
pub connect_via: IntegrationRuntimeReference,
#[serde(rename = "projectParameters", default, skip_serializing_if = "Option::is_none")]
pub project_parameters: Option<serde_json::Value>,
#[serde(rename = "packageParameters", default, skip_serializing_if = "Option::is_none")]
pub package_parameters: Option<serde_json::Value>,
#[serde(rename = "projectConnectionManagers", default, skip_serializing_if = "Option::is_none")]
pub project_connection_managers: Option<serde_json::Value>,
#[serde(rename = "packageConnectionManagers", default, skip_serializing_if = "Option::is_none")]
pub package_connection_managers: Option<serde_json::Value>,
#[serde(rename = "propertyOverrides", default, skip_serializing_if = "Option::is_none")]
pub property_overrides: Option<serde_json::Value>,
#[serde(rename = "logLocation", default, skip_serializing_if = "Option::is_none")]
pub log_location: Option<SsisLogLocation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisPackageLocation {
#[serde(rename = "packagePath", default, skip_serializing_if = "Option::is_none")]
pub package_path: Option<serde_json::Value>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<ssis_package_location::Type>,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SsisPackageLocationTypeProperties>,
}
pub mod ssis_package_location {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "SSISDB")]
Ssisdb,
File,
InlinePackage,
PackageStore,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisPackageLocationTypeProperties {
#[serde(rename = "packagePassword", default, skip_serializing_if = "Option::is_none")]
pub package_password: Option<SecretBase>,
#[serde(rename = "accessCredential", default, skip_serializing_if = "Option::is_none")]
pub access_credential: Option<SsisAccessCredential>,
#[serde(rename = "configurationPath", default, skip_serializing_if = "Option::is_none")]
pub configuration_path: Option<serde_json::Value>,
#[serde(rename = "configurationAccessCredential", default, skip_serializing_if = "Option::is_none")]
pub configuration_access_credential: Option<SsisAccessCredential>,
#[serde(rename = "packageName", default, skip_serializing_if = "Option::is_none")]
pub package_name: Option<String>,
#[serde(rename = "packageContent", default, skip_serializing_if = "Option::is_none")]
pub package_content: Option<serde_json::Value>,
#[serde(rename = "packageLastModifiedDate", default, skip_serializing_if = "Option::is_none")]
pub package_last_modified_date: Option<String>,
#[serde(rename = "childPackages", default, skip_serializing_if = "Vec::is_empty")]
pub child_packages: Vec<SsisChildPackage>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisConnectionManager {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisExecutionParameter {
pub value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisPropertyOverride {
pub value: serde_json::Value,
#[serde(rename = "isSensitive", default, skip_serializing_if = "Option::is_none")]
pub is_sensitive: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisExecutionCredential {
pub domain: serde_json::Value,
#[serde(rename = "userName")]
pub user_name: serde_json::Value,
pub password: SecureString,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisAccessCredential {
pub domain: serde_json::Value,
#[serde(rename = "userName")]
pub user_name: serde_json::Value,
pub password: SecretBase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisChildPackage {
#[serde(rename = "packagePath")]
pub package_path: serde_json::Value,
#[serde(rename = "packageName", default, skip_serializing_if = "Option::is_none")]
pub package_name: Option<String>,
#[serde(rename = "packageContent")]
pub package_content: serde_json::Value,
#[serde(rename = "packageLastModifiedDate", default, skip_serializing_if = "Option::is_none")]
pub package_last_modified_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisLogLocation {
#[serde(rename = "logPath")]
pub log_path: serde_json::Value,
#[serde(rename = "type")]
pub type_: ssis_log_location::Type,
#[serde(rename = "typeProperties")]
pub type_properties: SsisLogLocationTypeProperties,
}
pub mod ssis_log_location {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
File,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SsisLogLocationTypeProperties {
#[serde(rename = "accessCredential", default, skip_serializing_if = "Option::is_none")]
pub access_credential: Option<SsisAccessCredential>,
#[serde(rename = "logRefreshInterval", default, skip_serializing_if = "Option::is_none")]
pub log_refresh_interval: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: CustomActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomActivityTypeProperties {
pub command: serde_json::Value,
#[serde(rename = "resourceLinkedService", default, skip_serializing_if = "Option::is_none")]
pub resource_linked_service: Option<LinkedServiceReference>,
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
#[serde(rename = "referenceObjects", default, skip_serializing_if = "Option::is_none")]
pub reference_objects: Option<CustomActivityReferenceObject>,
#[serde(rename = "extendedProperties", default, skip_serializing_if = "Option::is_none")]
pub extended_properties: Option<serde_json::Value>,
#[serde(rename = "retentionTimeInDays", default, skip_serializing_if = "Option::is_none")]
pub retention_time_in_days: Option<serde_json::Value>,
#[serde(rename = "autoUserSpecification", default, skip_serializing_if = "Option::is_none")]
pub auto_user_specification: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomActivityReferenceObject {
#[serde(rename = "linkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub linked_services: Vec<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub datasets: Vec<DatasetReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerStoredProcedureActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: SqlServerStoredProcedureActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerStoredProcedureActivityTypeProperties {
#[serde(rename = "storedProcedureName")]
pub stored_procedure_name: serde_json::Value,
#[serde(rename = "storedProcedureParameters", default, skip_serializing_if = "Option::is_none")]
pub stored_procedure_parameters: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecutePipelineActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: ExecutePipelineActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecutePipelineActivityTypeProperties {
pub pipeline: PipelineReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterValueSpecification>,
#[serde(rename = "waitOnCompletion", default, skip_serializing_if = "Option::is_none")]
pub wait_on_completion: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeleteActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: DeleteActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeleteActivityTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recursive: Option<serde_json::Value>,
#[serde(rename = "maxConcurrentConnections", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_connections: Option<i64>,
#[serde(rename = "enableLogging", default, skip_serializing_if = "Option::is_none")]
pub enable_logging: Option<serde_json::Value>,
#[serde(rename = "logStorageSettings", default, skip_serializing_if = "Option::is_none")]
pub log_storage_settings: Option<LogStorageSettings>,
pub dataset: DatasetReference,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerCommandActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDataExplorerCommandActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerCommandActivityTypeProperties {
pub command: serde_json::Value,
#[serde(rename = "commandTimeout", default, skip_serializing_if = "Option::is_none")]
pub command_timeout: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LookupActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: LookupActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LookupActivityTypeProperties {
pub source: CopySource,
pub dataset: DatasetReference,
#[serde(rename = "firstRowOnly", default, skip_serializing_if = "Option::is_none")]
pub first_row_only: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WebActivityMethod {
#[serde(rename = "GET")]
Get,
#[serde(rename = "POST")]
Post,
#[serde(rename = "PUT")]
Put,
#[serde(rename = "DELETE")]
Delete,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: WebActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebActivityAuthentication {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pfx: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<serde_json::Value>,
#[serde(rename = "userTenant", default, skip_serializing_if = "Option::is_none")]
pub user_tenant: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebActivityTypeProperties {
pub method: WebActivityMethod,
pub url: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub headers: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub body: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<WebActivityAuthentication>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub datasets: Vec<DatasetReference>,
#[serde(rename = "linkedServices", default, skip_serializing_if = "Vec::is_empty")]
pub linked_services: Vec<LinkedServiceReference>,
#[serde(rename = "connectVia", default, skip_serializing_if = "Option::is_none")]
pub connect_via: Option<IntegrationRuntimeReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetMetadataActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: GetMetadataActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetMetadataActivityTypeProperties {
pub dataset: DatasetReference,
#[serde(rename = "fieldList", default, skip_serializing_if = "Vec::is_empty")]
pub field_list: Vec<serde_json::Value>,
#[serde(rename = "storeSettings", default, skip_serializing_if = "Option::is_none")]
pub store_settings: Option<StoreReadSettings>,
#[serde(rename = "formatSettings", default, skip_serializing_if = "Option::is_none")]
pub format_settings: Option<FormatReadSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IfConditionActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: IfConditionActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IfConditionActivityTypeProperties {
pub expression: Expression,
#[serde(rename = "ifTrueActivities", default, skip_serializing_if = "Vec::is_empty")]
pub if_true_activities: Vec<Activity>,
#[serde(rename = "ifFalseActivities", default, skip_serializing_if = "Vec::is_empty")]
pub if_false_activities: Vec<Activity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SwitchActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: SwitchActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SwitchActivityTypeProperties {
pub on: Expression,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub cases: Vec<SwitchCase>,
#[serde(rename = "defaultActivities", default, skip_serializing_if = "Vec::is_empty")]
pub default_activities: Vec<Activity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SwitchCase {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub activities: Vec<Activity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ForEachActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: ForEachActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ForEachActivityTypeProperties {
#[serde(rename = "isSequential", default, skip_serializing_if = "Option::is_none")]
pub is_sequential: Option<bool>,
#[serde(rename = "batchCount", default, skip_serializing_if = "Option::is_none")]
pub batch_count: Option<i64>,
pub items: Expression,
pub activities: Vec<Activity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlBatchExecutionActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMlBatchExecutionActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlBatchExecutionActivityTypeProperties {
#[serde(rename = "globalParameters", default, skip_serializing_if = "Option::is_none")]
pub global_parameters: Option<serde_json::Value>,
#[serde(rename = "webServiceOutputs", default, skip_serializing_if = "Option::is_none")]
pub web_service_outputs: Option<serde_json::Value>,
#[serde(rename = "webServiceInputs", default, skip_serializing_if = "Option::is_none")]
pub web_service_inputs: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlWebServiceFile {
#[serde(rename = "filePath")]
pub file_path: serde_json::Value,
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlUpdateResourceActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMlUpdateResourceActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlUpdateResourceActivityTypeProperties {
#[serde(rename = "trainedModelName")]
pub trained_model_name: serde_json::Value,
#[serde(rename = "trainedModelLinkedServiceName")]
pub trained_model_linked_service_name: LinkedServiceReference,
#[serde(rename = "trainedModelFilePath")]
pub trained_model_file_path: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlExecutePipelineActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMlExecutePipelineActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlExecutePipelineActivityTypeProperties {
#[serde(rename = "mlPipelineId", default, skip_serializing_if = "Option::is_none")]
pub ml_pipeline_id: Option<serde_json::Value>,
#[serde(rename = "mlPipelineEndpointId", default, skip_serializing_if = "Option::is_none")]
pub ml_pipeline_endpoint_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<serde_json::Value>,
#[serde(rename = "experimentName", default, skip_serializing_if = "Option::is_none")]
pub experiment_name: Option<serde_json::Value>,
#[serde(rename = "mlPipelineParameters", default, skip_serializing_if = "Option::is_none")]
pub ml_pipeline_parameters: Option<serde_json::Value>,
#[serde(rename = "dataPathAssignments", default, skip_serializing_if = "Option::is_none")]
pub data_path_assignments: Option<serde_json::Value>,
#[serde(rename = "mlParentRunId", default, skip_serializing_if = "Option::is_none")]
pub ml_parent_run_id: Option<serde_json::Value>,
#[serde(rename = "continueOnStepFailure", default, skip_serializing_if = "Option::is_none")]
pub continue_on_step_failure: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlPipelineParameters {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataPathAssignments {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataLakeAnalyticsUsqlActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: DataLakeAnalyticsUsqlActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataLakeAnalyticsUsqlActivityTypeProperties {
#[serde(rename = "scriptPath")]
pub script_path: serde_json::Value,
#[serde(rename = "scriptLinkedService")]
pub script_linked_service: LinkedServiceReference,
#[serde(rename = "degreeOfParallelism", default, skip_serializing_if = "Option::is_none")]
pub degree_of_parallelism: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<serde_json::Value>,
#[serde(rename = "runtimeVersion", default, skip_serializing_if = "Option::is_none")]
pub runtime_version: Option<serde_json::Value>,
#[serde(rename = "compilationMode", default, skip_serializing_if = "Option::is_none")]
pub compilation_mode: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WaitActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: WaitActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WaitActivityTypeProperties {
#[serde(rename = "waitTimeInSeconds")]
pub wait_time_in_seconds: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UntilActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: UntilActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UntilActivityTypeProperties {
pub expression: Expression,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<serde_json::Value>,
pub activities: Vec<Activity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ValidationActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: ValidationActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ValidationActivityTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sleep: Option<serde_json::Value>,
#[serde(rename = "minimumSize", default, skip_serializing_if = "Option::is_none")]
pub minimum_size: Option<serde_json::Value>,
#[serde(rename = "childItems", default, skip_serializing_if = "Option::is_none")]
pub child_items: Option<serde_json::Value>,
pub dataset: DatasetReference,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FilterActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: FilterActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FilterActivityTypeProperties {
pub items: Expression,
pub condition: Expression,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksNotebookActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: DatabricksNotebookActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksNotebookActivityTypeProperties {
#[serde(rename = "notebookPath")]
pub notebook_path: serde_json::Value,
#[serde(rename = "baseParameters", default, skip_serializing_if = "Option::is_none")]
pub base_parameters: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub libraries: Vec<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksSparkJarActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: DatabricksSparkJarActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksSparkJarActivityTypeProperties {
#[serde(rename = "mainClassName")]
pub main_class_name: serde_json::Value,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub libraries: Vec<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksSparkPythonActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: DatabricksSparkPythonActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksSparkPythonActivityTypeProperties {
#[serde(rename = "pythonFile")]
pub python_file: serde_json::Value,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub libraries: Vec<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SetVariableActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: SetVariableActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SetVariableActivityTypeProperties {
#[serde(rename = "variableName", default, skip_serializing_if = "Option::is_none")]
pub variable_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AppendVariableActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: AppendVariableActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AppendVariableActivityTypeProperties {
#[serde(rename = "variableName", default, skip_serializing_if = "Option::is_none")]
pub variable_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AzureFunctionActivityMethod {
#[serde(rename = "GET")]
Get,
#[serde(rename = "POST")]
Post,
#[serde(rename = "PUT")]
Put,
#[serde(rename = "DELETE")]
Delete,
#[serde(rename = "OPTIONS")]
Options,
#[serde(rename = "HEAD")]
Head,
#[serde(rename = "TRACE")]
Trace,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFunctionActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: AzureFunctionActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFunctionActivityTypeProperties {
pub method: AzureFunctionActivityMethod,
#[serde(rename = "functionName")]
pub function_name: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub headers: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub body: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebHookActivity {
#[serde(flatten)]
pub control_activity: ControlActivity,
#[serde(rename = "typeProperties")]
pub type_properties: WebHookActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum WebHookActivityMethod {
#[serde(rename = "POST")]
Post,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebHookActivityTypeProperties {
pub method: WebHookActivityMethod,
pub url: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub headers: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub body: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<WebActivityAuthentication>,
#[serde(rename = "reportStatusOnCallBack", default, skip_serializing_if = "Option::is_none")]
pub report_status_on_call_back: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecuteDataFlowActivity {
#[serde(flatten)]
pub execution_activity: ExecutionActivity,
#[serde(rename = "typeProperties")]
pub type_properties: ExecuteDataFlowActivityTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecuteWranglingDataflowActivity {
#[serde(flatten)]
pub activity: Activity,
#[serde(rename = "typeProperties")]
pub type_properties: ExecutePowerQueryActivityTypeProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policy: Option<ActivityPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecuteDataFlowActivityTypeProperties {
#[serde(rename = "dataFlow")]
pub data_flow: DataFlowReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub staging: Option<DataFlowStagingInfo>,
#[serde(rename = "integrationRuntime", default, skip_serializing_if = "Option::is_none")]
pub integration_runtime: Option<IntegrationRuntimeReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compute: Option<execute_data_flow_activity_type_properties::Compute>,
#[serde(rename = "traceLevel", default, skip_serializing_if = "Option::is_none")]
pub trace_level: Option<serde_json::Value>,
#[serde(rename = "continueOnError", default, skip_serializing_if = "Option::is_none")]
pub continue_on_error: Option<serde_json::Value>,
#[serde(rename = "runConcurrently", default, skip_serializing_if = "Option::is_none")]
pub run_concurrently: Option<serde_json::Value>,
}
pub mod execute_data_flow_activity_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Compute {
#[serde(rename = "computeType", default, skip_serializing_if = "Option::is_none")]
pub compute_type: Option<serde_json::Value>,
#[serde(rename = "coreCount", default, skip_serializing_if = "Option::is_none")]
pub core_count: Option<serde_json::Value>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExecutePowerQueryActivityTypeProperties {
#[serde(flatten)]
pub execute_data_flow_activity_type_properties: ExecuteDataFlowActivityTypeProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sinks: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub queries: Vec<PowerQuerySinkMapping>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PowerQuerySinkMapping {
#[serde(rename = "queryName", default, skip_serializing_if = "Option::is_none")]
pub query_name: Option<String>,
#[serde(rename = "dataflowSinks", default, skip_serializing_if = "Vec::is_empty")]
pub dataflow_sinks: Vec<PowerQuerySink>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharePointOnlineListSource {
#[serde(flatten)]
pub copy_source: CopySource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(rename = "httpRequestTimeout", default, skip_serializing_if = "Option::is_none")]
pub http_request_timeout: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SqlPartitionOption {
None,
PhysicalPartitionsOfTable,
DynamicRange,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SapHanaPartitionOption {
None,
PhysicalPartitionsOfTable,
SapHanaDynamicRange,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SapTablePartitionOption {
None,
PartitionOnInt,
PartitionOnCalendarYear,
PartitionOnCalendarMonth,
PartitionOnCalendarDate,
PartitionOnTime,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OraclePartitionOption {
None,
PhysicalPartitionsOfTable,
DynamicRange,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TeradataPartitionOption {
None,
Hash,
DynamicRange,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NetezzaPartitionOption {
None,
DataSlice,
DynamicRange,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetadataItem {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlow {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub annotations: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub folder: Option<data_flow::Folder>,
}
pub mod data_flow {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Folder {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MappingDataFlow {
#[serde(flatten)]
pub data_flow: DataFlow,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<MappingDataFlowTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Flowlet {
#[serde(flatten)]
pub data_flow: DataFlow,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<FlowletTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MappingDataFlowTypeProperties {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sources: Vec<DataFlowSource>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sinks: Vec<DataFlowSink>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub transformations: Vec<Transformation>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub script: Option<String>,
#[serde(rename = "scriptLines", default, skip_serializing_if = "Vec::is_empty")]
pub script_lines: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WranglingDataFlow {
#[serde(flatten)]
pub data_flow: DataFlow,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<PowerQueryTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PowerQueryTypeProperties {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sources: Vec<PowerQuerySource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub script: Option<String>,
#[serde(rename = "documentLocale", default, skip_serializing_if = "Option::is_none")]
pub document_locale: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FlowletTypeProperties {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sources: Vec<DataFlowSource>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sinks: Vec<DataFlowSink>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub transformations: Vec<Transformation>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub script: Option<String>,
#[serde(rename = "scriptLines", default, skip_serializing_if = "Vec::is_empty")]
pub script_lines: Vec<String>,
#[serde(rename = "additionalProperties", default, skip_serializing_if = "Option::is_none")]
pub additional_properties: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Transformation {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub flowlet: Option<DataFlowReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowSource {
#[serde(flatten)]
pub transformation: Transformation,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dataset: Option<DatasetReference>,
#[serde(rename = "linkedService", default, skip_serializing_if = "Option::is_none")]
pub linked_service: Option<LinkedServiceReference>,
#[serde(rename = "schemaLinkedService", default, skip_serializing_if = "Option::is_none")]
pub schema_linked_service: Option<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub flowlet: Option<DataFlowReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFlowSink {
#[serde(flatten)]
pub transformation: Transformation,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dataset: Option<DatasetReference>,
#[serde(rename = "linkedService", default, skip_serializing_if = "Option::is_none")]
pub linked_service: Option<LinkedServiceReference>,
#[serde(rename = "schemaLinkedService", default, skip_serializing_if = "Option::is_none")]
pub schema_linked_service: Option<LinkedServiceReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub flowlet: Option<DataFlowReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PowerQuerySource {
#[serde(flatten)]
pub data_flow_source: DataFlowSource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub script: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PowerQuerySink {
#[serde(flatten)]
pub data_flow_sink: DataFlowSink,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub script: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Trigger {
#[serde(rename = "type")]
pub type_: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "runtimeState", default, skip_serializing_if = "Option::is_none")]
pub runtime_state: Option<TriggerRuntimeState>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub annotations: Vec<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TriggerRuntimeState {
Started,
Stopped,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MultiplePipelineTrigger {
#[serde(flatten)]
pub trigger: Trigger,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub pipelines: Vec<TriggerPipelineReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduleTrigger {
#[serde(flatten)]
pub multiple_pipeline_trigger: MultiplePipelineTrigger,
#[serde(rename = "typeProperties")]
pub type_properties: schedule_trigger::TypeProperties,
}
pub mod schedule_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
pub recurrence: ScheduleTriggerRecurrence,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduleTriggerRecurrence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub frequency: Option<RecurrenceFrequency>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<i32>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(rename = "timeZone", default, skip_serializing_if = "Option::is_none")]
pub time_zone: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schedule: Option<RecurrenceSchedule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RecurrenceFrequency {
NotSpecified,
Minute,
Hour,
Day,
Week,
Month,
Year,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecurrenceSchedule {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub minutes: Vec<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub hours: Vec<i32>,
#[serde(rename = "weekDays", default, skip_serializing_if = "Vec::is_empty")]
pub week_days: Vec<String>,
#[serde(rename = "monthDays", default, skip_serializing_if = "Vec::is_empty")]
pub month_days: Vec<i32>,
#[serde(rename = "monthlyOccurrences", default, skip_serializing_if = "Vec::is_empty")]
pub monthly_occurrences: Vec<RecurrenceScheduleOccurrence>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecurrenceScheduleOccurrence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub day: Option<DayOfWeek>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub occurrence: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DayOfWeek {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobTrigger {
#[serde(flatten)]
pub multiple_pipeline_trigger: MultiplePipelineTrigger,
#[serde(rename = "typeProperties")]
pub type_properties: blob_trigger::TypeProperties,
}
pub mod blob_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
#[serde(rename = "folderPath")]
pub folder_path: String,
#[serde(rename = "maxConcurrency")]
pub max_concurrency: i64,
#[serde(rename = "linkedService")]
pub linked_service: LinkedServiceReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobEventsTrigger {
#[serde(flatten)]
pub multiple_pipeline_trigger: MultiplePipelineTrigger,
#[serde(rename = "typeProperties")]
pub type_properties: blob_events_trigger::TypeProperties,
}
pub mod blob_events_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
#[serde(rename = "blobPathBeginsWith", default, skip_serializing_if = "Option::is_none")]
pub blob_path_begins_with: Option<String>,
#[serde(rename = "blobPathEndsWith", default, skip_serializing_if = "Option::is_none")]
pub blob_path_ends_with: Option<String>,
#[serde(rename = "ignoreEmptyBlobs", default, skip_serializing_if = "Option::is_none")]
pub ignore_empty_blobs: Option<bool>,
pub events: BlobEventTypes,
pub scope: String,
}
}
pub type BlobEventTypes = Vec<String>;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomEventsTrigger {
#[serde(flatten)]
pub multiple_pipeline_trigger: MultiplePipelineTrigger,
#[serde(rename = "typeProperties")]
pub type_properties: custom_events_trigger::TypeProperties,
}
pub mod custom_events_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
#[serde(rename = "subjectBeginsWith", default, skip_serializing_if = "Option::is_none")]
pub subject_begins_with: Option<String>,
#[serde(rename = "subjectEndsWith", default, skip_serializing_if = "Option::is_none")]
pub subject_ends_with: Option<String>,
pub events: Vec<serde_json::Value>,
pub scope: String,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TumblingWindowTrigger {
#[serde(flatten)]
pub trigger: Trigger,
pub pipeline: TriggerPipelineReference,
#[serde(rename = "typeProperties")]
pub type_properties: tumbling_window_trigger::TypeProperties,
}
pub mod tumbling_window_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
pub frequency: TumblingWindowFrequency,
pub interval: i32,
#[serde(rename = "startTime")]
pub start_time: String,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub delay: Option<serde_json::Value>,
#[serde(rename = "maxConcurrency")]
pub max_concurrency: i64,
#[serde(rename = "retryPolicy", default, skip_serializing_if = "Option::is_none")]
pub retry_policy: Option<RetryPolicy>,
#[serde(rename = "dependsOn", default, skip_serializing_if = "Vec::is_empty")]
pub depends_on: Vec<DependencyReference>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TumblingWindowFrequency {
Minute,
Hour,
Month,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RetryPolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<serde_json::Value>,
#[serde(rename = "intervalInSeconds", default, skip_serializing_if = "Option::is_none")]
pub interval_in_seconds: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerReference {
#[serde(rename = "type")]
pub type_: trigger_reference::Type,
#[serde(rename = "referenceName")]
pub reference_name: String,
}
pub mod trigger_reference {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
TriggerReference,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DependencyReference {
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TriggerDependencyReference {
#[serde(flatten)]
pub dependency_reference: DependencyReference,
#[serde(rename = "referenceTrigger")]
pub reference_trigger: TriggerReference,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TumblingWindowTriggerDependencyReference {
#[serde(flatten)]
pub trigger_dependency_reference: TriggerDependencyReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub offset: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SelfDependencyTumblingWindowTriggerReference {
#[serde(flatten)]
pub dependency_reference: DependencyReference,
pub offset: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RerunTumblingWindowTrigger {
#[serde(flatten)]
pub trigger: Trigger,
#[serde(rename = "typeProperties")]
pub type_properties: rerun_tumbling_window_trigger::TypeProperties,
}
pub mod rerun_tumbling_window_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
#[serde(rename = "parentTrigger")]
pub parent_trigger: serde_json::Value,
#[serde(rename = "requestedStartTime")]
pub requested_start_time: String,
#[serde(rename = "requestedEndTime")]
pub requested_end_time: String,
#[serde(rename = "rerunConcurrency")]
pub rerun_concurrency: i64,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ChainingTrigger {
#[serde(flatten)]
pub trigger: Trigger,
pub pipeline: TriggerPipelineReference,
#[serde(rename = "typeProperties")]
pub type_properties: chaining_trigger::TypeProperties,
}
pub mod chaining_trigger {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TypeProperties {
#[serde(rename = "dependsOn")]
pub depends_on: Vec<PipelineReference>,
#[serde(rename = "runDimension")]
pub run_dimension: String,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Dataset {
#[serde(rename = "type")]
pub type_: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub structure: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterDefinitionSpecification>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub annotations: Vec<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub folder: Option<dataset::Folder>,
}
pub mod dataset {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Folder {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetLocation {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
#[serde(rename = "fileName", default, skip_serializing_if = "Option::is_none")]
pub file_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobStorageLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub container: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(rename = "fileSystem", default, skip_serializing_if = "Option::is_none")]
pub file_system: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3Location {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(rename = "bucketName", default, skip_serializing_if = "Option::is_none")]
pub bucket_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServerLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFileStorageLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3CompatibleLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(rename = "bucketName", default, skip_serializing_if = "Option::is_none")]
pub bucket_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleCloudStorageLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(rename = "bucketName", default, skip_serializing_if = "Option::is_none")]
pub bucket_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleCloudStorageLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(rename = "bucketName", default, skip_serializing_if = "Option::is_none")]
pub bucket_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FtpServerLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SftpLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpServerLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
#[serde(rename = "relativeUrl", default, skip_serializing_if = "Option::is_none")]
pub relative_url: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdfsLocation {
#[serde(flatten)]
pub dataset_location: DatasetLocation,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetDataElement {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<serde_json::Value>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetSchemaDataElement {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<serde_json::Value>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetStorageFormat {
#[serde(rename = "type")]
pub type_: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub serializer: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub deserializer: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TextFormat {
#[serde(flatten)]
pub dataset_storage_format: DatasetStorageFormat,
#[serde(rename = "columnDelimiter", default, skip_serializing_if = "Option::is_none")]
pub column_delimiter: Option<serde_json::Value>,
#[serde(rename = "rowDelimiter", default, skip_serializing_if = "Option::is_none")]
pub row_delimiter: Option<serde_json::Value>,
#[serde(rename = "escapeChar", default, skip_serializing_if = "Option::is_none")]
pub escape_char: Option<serde_json::Value>,
#[serde(rename = "quoteChar", default, skip_serializing_if = "Option::is_none")]
pub quote_char: Option<serde_json::Value>,
#[serde(rename = "nullValue", default, skip_serializing_if = "Option::is_none")]
pub null_value: Option<serde_json::Value>,
#[serde(rename = "encodingName", default, skip_serializing_if = "Option::is_none")]
pub encoding_name: Option<serde_json::Value>,
#[serde(rename = "treatEmptyAsNull", default, skip_serializing_if = "Option::is_none")]
pub treat_empty_as_null: Option<serde_json::Value>,
#[serde(rename = "skipLineCount", default, skip_serializing_if = "Option::is_none")]
pub skip_line_count: Option<serde_json::Value>,
#[serde(rename = "firstRowAsHeader", default, skip_serializing_if = "Option::is_none")]
pub first_row_as_header: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonFormat {
#[serde(flatten)]
pub dataset_storage_format: DatasetStorageFormat,
#[serde(rename = "filePattern", default, skip_serializing_if = "Option::is_none")]
pub file_pattern: Option<serde_json::Value>,
#[serde(rename = "nestingSeparator", default, skip_serializing_if = "Option::is_none")]
pub nesting_separator: Option<serde_json::Value>,
#[serde(rename = "encodingName", default, skip_serializing_if = "Option::is_none")]
pub encoding_name: Option<serde_json::Value>,
#[serde(rename = "jsonNodeReference", default, skip_serializing_if = "Option::is_none")]
pub json_node_reference: Option<serde_json::Value>,
#[serde(rename = "jsonPathDefinition", default, skip_serializing_if = "Option::is_none")]
pub json_path_definition: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum JsonFormatFilePattern {
#[serde(rename = "setOfObjects")]
SetOfObjects,
#[serde(rename = "arrayOfObjects")]
ArrayOfObjects,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvroFormat {
#[serde(flatten)]
pub dataset_storage_format: DatasetStorageFormat,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrcFormat {
#[serde(flatten)]
pub dataset_storage_format: DatasetStorageFormat,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetFormat {
#[serde(flatten)]
pub dataset_storage_format: DatasetStorageFormat,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatasetCompression {
#[serde(rename = "type")]
pub type_: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CompressionLevel {
Optimal,
Fastest,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3Dataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonS3DatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3DatasetTypeProperties {
#[serde(rename = "bucketName")]
pub bucket_name: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<DatasetStorageFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvroDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AvroDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvroDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "avroCompressionCodec", default, skip_serializing_if = "Option::is_none")]
pub avro_compression_codec: Option<serde_json::Value>,
#[serde(rename = "avroCompressionLevel", default, skip_serializing_if = "Option::is_none")]
pub avro_compression_level: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AvroCompressionCodec {
#[serde(rename = "none")]
None,
#[serde(rename = "deflate")]
Deflate,
#[serde(rename = "snappy")]
Snappy,
#[serde(rename = "xz")]
Xz,
#[serde(rename = "bzip2")]
Bzip2,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExcelDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<ExcelDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExcelDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "sheetName", default, skip_serializing_if = "Option::is_none")]
pub sheet_name: Option<serde_json::Value>,
#[serde(rename = "sheetIndex", default, skip_serializing_if = "Option::is_none")]
pub sheet_index: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub range: Option<serde_json::Value>,
#[serde(rename = "firstRowAsHeader", default, skip_serializing_if = "Option::is_none")]
pub first_row_as_header: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
#[serde(rename = "nullValue", default, skip_serializing_if = "Option::is_none")]
pub null_value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<ParquetDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "compressionCodec", default, skip_serializing_if = "Option::is_none")]
pub compression_codec: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<DelimitedTextDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "columnDelimiter", default, skip_serializing_if = "Option::is_none")]
pub column_delimiter: Option<serde_json::Value>,
#[serde(rename = "rowDelimiter", default, skip_serializing_if = "Option::is_none")]
pub row_delimiter: Option<serde_json::Value>,
#[serde(rename = "encodingName", default, skip_serializing_if = "Option::is_none")]
pub encoding_name: Option<serde_json::Value>,
#[serde(rename = "compressionCodec", default, skip_serializing_if = "Option::is_none")]
pub compression_codec: Option<serde_json::Value>,
#[serde(rename = "compressionLevel", default, skip_serializing_if = "Option::is_none")]
pub compression_level: Option<serde_json::Value>,
#[serde(rename = "quoteChar", default, skip_serializing_if = "Option::is_none")]
pub quote_char: Option<serde_json::Value>,
#[serde(rename = "escapeChar", default, skip_serializing_if = "Option::is_none")]
pub escape_char: Option<serde_json::Value>,
#[serde(rename = "firstRowAsHeader", default, skip_serializing_if = "Option::is_none")]
pub first_row_as_header: Option<serde_json::Value>,
#[serde(rename = "nullValue", default, skip_serializing_if = "Option::is_none")]
pub null_value: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CompressionCodec {
#[serde(rename = "none")]
None,
#[serde(rename = "lzo")]
Lzo,
#[serde(rename = "bzip2")]
Bzip2,
#[serde(rename = "gzip")]
Gzip,
#[serde(rename = "deflate")]
Deflate,
#[serde(rename = "zipDeflate")]
ZipDeflate,
#[serde(rename = "snappy")]
Snappy,
#[serde(rename = "lz4")]
Lz4,
#[serde(rename = "tar")]
Tar,
#[serde(rename = "tarGZip")]
TarGZip,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<JsonDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "encodingName", default, skip_serializing_if = "Option::is_none")]
pub encoding_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XmlDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<XmlDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XmlDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "encodingName", default, skip_serializing_if = "Option::is_none")]
pub encoding_name: Option<serde_json::Value>,
#[serde(rename = "nullValue", default, skip_serializing_if = "Option::is_none")]
pub null_value: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrcDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<OrcDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrcDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(rename = "orcCompressionCodec", default, skip_serializing_if = "Option::is_none")]
pub orc_compression_codec: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OrcCompressionCodec {
#[serde(rename = "none")]
None,
#[serde(rename = "zlib")]
Zlib,
#[serde(rename = "snappy")]
Snappy,
#[serde(rename = "lzo")]
Lzo,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BinaryDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<BinaryDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BinaryDatasetTypeProperties {
pub location: DatasetLocation,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureBlobDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobDatasetTypeProperties {
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
#[serde(rename = "tableRootLocation", default, skip_serializing_if = "Option::is_none")]
pub table_root_location: Option<serde_json::Value>,
#[serde(rename = "fileName", default, skip_serializing_if = "Option::is_none")]
pub file_name: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<DatasetStorageFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: AzureTableDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureTableDatasetTypeProperties {
#[serde(rename = "tableName")]
pub table_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureSqlTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlMiTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureSqlMiTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlMiTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlDwTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureSqlDwTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlDwTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CassandraTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<CassandraTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CassandraTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub keyspace: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbSqlApiCollectionDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: CosmosDbSqlApiCollectionDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbSqlApiCollectionDatasetTypeProperties {
#[serde(rename = "collectionName")]
pub collection_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DocumentDbCollectionDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: DocumentDbCollectionDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DocumentDbCollectionDatasetTypeProperties {
#[serde(rename = "collectionName")]
pub collection_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsEntityDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<DynamicsEntityDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsEntityDatasetTypeProperties {
#[serde(rename = "entityName", default, skip_serializing_if = "Option::is_none")]
pub entity_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsCrmEntityDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<DynamicsCrmEntityDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsCrmEntityDatasetTypeProperties {
#[serde(rename = "entityName", default, skip_serializing_if = "Option::is_none")]
pub entity_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonDataServiceForAppsEntityDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<CommonDataServiceForAppsEntityDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonDataServiceForAppsEntityDatasetTypeProperties {
#[serde(rename = "entityName", default, skip_serializing_if = "Option::is_none")]
pub entity_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureDataLakeStoreDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreDatasetTypeProperties {
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
#[serde(rename = "fileName", default, skip_serializing_if = "Option::is_none")]
pub file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<DatasetStorageFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureBlobFsDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsDatasetTypeProperties {
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
#[serde(rename = "fileName", default, skip_serializing_if = "Option::is_none")]
pub file_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<DatasetStorageFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Office365Dataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: Office365DatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Office365DatasetTypeProperties {
#[serde(rename = "tableName")]
pub table_name: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub predicate: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileShareDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<FileShareDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileShareDatasetTypeProperties {
#[serde(rename = "folderPath", default, skip_serializing_if = "Option::is_none")]
pub folder_path: Option<serde_json::Value>,
#[serde(rename = "fileName", default, skip_serializing_if = "Option::is_none")]
pub file_name: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeStart", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_start: Option<serde_json::Value>,
#[serde(rename = "modifiedDatetimeEnd", default, skip_serializing_if = "Option::is_none")]
pub modified_datetime_end: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<DatasetStorageFormat>,
#[serde(rename = "fileFilter", default, skip_serializing_if = "Option::is_none")]
pub file_filter: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbCollectionDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: MongoDbCollectionDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbCollectionDatasetTypeProperties {
#[serde(rename = "collectionName")]
pub collection_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbAtlasCollectionDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: MongoDbAtlasCollectionDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbAtlasCollectionDatasetTypeProperties {
pub collection: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbV2CollectionDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: MongoDbV2CollectionDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbV2CollectionDatasetTypeProperties {
pub collection: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbMongoDbApiCollectionDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: CosmosDbMongoDbApiCollectionDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbMongoDbApiCollectionDatasetTypeProperties {
pub collection: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ODataResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<ODataResourceDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ODataResourceDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<OracleTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForOracleTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AmazonRdsForOracleTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForOracleTableDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TeradataTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<TeradataTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TeradataTableDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMySqlTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMySqlTableDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMySqlTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRedshiftTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AmazonRedshiftTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRedshiftTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Db2TableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<Db2TableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Db2TableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RelationalTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<RelationalTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RelationalTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InformixTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<InformixTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InformixTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OdbcTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<OdbcTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OdbcTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MySqlTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<MySqlTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MySqlTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostgreSqlTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<PostgreSqlTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostgreSqlTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftAccessTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<MicrosoftAccessTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftAccessTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SalesforceObjectDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceObjectDatasetTypeProperties {
#[serde(rename = "objectApiName", default, skip_serializing_if = "Option::is_none")]
pub object_api_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceServiceCloudObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SalesforceServiceCloudObjectDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceServiceCloudObjectDatasetTypeProperties {
#[serde(rename = "objectApiName", default, skip_serializing_if = "Option::is_none")]
pub object_api_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SybaseTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SybaseTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SybaseTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapBwCubeDataset {
#[serde(flatten)]
pub dataset: Dataset,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapCloudForCustomerResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: SapCloudForCustomerResourceDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapCloudForCustomerResourceDatasetTypeProperties {
pub path: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapEccResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: SapEccResourceDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapEccResourceDatasetTypeProperties {
pub path: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapHanaTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SapHanaTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapHanaTableDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapOpenHubTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: SapOpenHubTableDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapOpenHubTableDatasetTypeProperties {
#[serde(rename = "openHubDestinationName")]
pub open_hub_destination_name: serde_json::Value,
#[serde(rename = "excludeLastRequest", default, skip_serializing_if = "Option::is_none")]
pub exclude_last_request: Option<serde_json::Value>,
#[serde(rename = "baseRequestId", default, skip_serializing_if = "Option::is_none")]
pub base_request_id: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SqlServerTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForSqlServerTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AmazonRdsForSqlServerTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForSqlServerTableDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<RestResourceDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestResourceDatasetTypeProperties {
#[serde(rename = "relativeUrl", default, skip_serializing_if = "Option::is_none")]
pub relative_url: Option<serde_json::Value>,
#[serde(rename = "requestMethod", default, skip_serializing_if = "Option::is_none")]
pub request_method: Option<serde_json::Value>,
#[serde(rename = "requestBody", default, skip_serializing_if = "Option::is_none")]
pub request_body: Option<serde_json::Value>,
#[serde(rename = "additionalHeaders", default, skip_serializing_if = "Option::is_none")]
pub additional_headers: Option<serde_json::Value>,
#[serde(rename = "paginationRules", default, skip_serializing_if = "Option::is_none")]
pub pagination_rules: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapTableResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: SapTableResourceDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapTableResourceDatasetTypeProperties {
#[serde(rename = "tableName")]
pub table_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: WebTableDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebTableDatasetTypeProperties {
pub index: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSearchIndexDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: AzureSearchIndexDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSearchIndexDatasetTypeProperties {
#[serde(rename = "indexName")]
pub index_name: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<HttpDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpDatasetTypeProperties {
#[serde(rename = "relativeUrl", default, skip_serializing_if = "Option::is_none")]
pub relative_url: Option<serde_json::Value>,
#[serde(rename = "requestMethod", default, skip_serializing_if = "Option::is_none")]
pub request_method: Option<serde_json::Value>,
#[serde(rename = "requestBody", default, skip_serializing_if = "Option::is_none")]
pub request_body: Option<serde_json::Value>,
#[serde(rename = "additionalHeaders", default, skip_serializing_if = "Option::is_none")]
pub additional_headers: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<DatasetStorageFormat>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compression: Option<DatasetCompression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GenericDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonMwsObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzurePostgreSqlTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzurePostgreSqlTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzurePostgreSqlTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConcurObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CouchbaseTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DrillTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<DrillDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DrillDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EloquaObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleBigQueryObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GoogleBigQueryDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleBigQueryDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dataset: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GreenplumTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GreenplumDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GreenplumDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HBaseObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HiveObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<HiveDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HiveDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HubspotObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImpalaObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<ImpalaDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImpalaDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JiraObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MagentoObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MariaDbTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMariaDbTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MarketoObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaypalObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PhoenixObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<PhoenixDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PhoenixDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrestoObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<PrestoDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrestoDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuickBooksObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceNowObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShopifyObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SparkObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SparkDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SparkDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SquareObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XeroObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ZohoObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetezzaTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<NetezzaTableDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetezzaTableDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VerticaTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<VerticaDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VerticaDatasetTypeProperties {
#[serde(rename = "tableName", default, skip_serializing_if = "Option::is_none")]
pub table_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceMarketingCloudObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResponsysObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsAxResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: DynamicsAxResourceDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsAxResourceDatasetTypeProperties {
pub path: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleServiceCloudObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerTableDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDataExplorerDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleAdWordsObjectDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<GenericDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties")]
pub type_properties: SnowflakeDatasetTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharePointOnlineListResourceDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<SharePointOnlineListDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharePointOnlineListDatasetTypeProperties {
#[serde(rename = "listName", default, skip_serializing_if = "Option::is_none")]
pub list_name: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeDataset {
#[serde(flatten)]
pub dataset: Dataset,
#[serde(rename = "typeProperties", default, skip_serializing_if = "Option::is_none")]
pub type_properties: Option<AzureDatabricksDeltaLakeDatasetTypeProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeDatasetTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedService {
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "connectVia", default, skip_serializing_if = "Option::is_none")]
pub connect_via: Option<IntegrationRuntimeReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<ParameterDefinitionSpecification>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub annotations: Vec<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureStorageLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureStorageLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobStorageLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureBlobStorageLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureTableStorageLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureStorageLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureStorageLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(rename = "accountKey", default, skip_serializing_if = "Option::is_none")]
pub account_key: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "sasUri", default, skip_serializing_if = "Option::is_none")]
pub sas_uri: Option<serde_json::Value>,
#[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")]
pub sas_token: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobStorageLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(rename = "accountKey", default, skip_serializing_if = "Option::is_none")]
pub account_key: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "sasUri", default, skip_serializing_if = "Option::is_none")]
pub sas_uri: Option<serde_json::Value>,
#[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")]
pub sas_token: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "serviceEndpoint", default, skip_serializing_if = "Option::is_none")]
pub service_endpoint: Option<String>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "accountKind", default, skip_serializing_if = "Option::is_none")]
pub account_kind: Option<String>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlDwLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureSqlDwLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlDwLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SqlServerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlServerLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "alwaysEncryptedSettings", default, skip_serializing_if = "Option::is_none")]
pub always_encrypted_settings: Option<SqlAlwaysEncryptedProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForSqlServerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonRdsForSqlServerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForSqlServerLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "alwaysEncryptedSettings", default, skip_serializing_if = "Option::is_none")]
pub always_encrypted_settings: Option<SqlAlwaysEncryptedProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlDatabaseLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureSqlDatabaseLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlDatabaseLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "alwaysEncryptedSettings", default, skip_serializing_if = "Option::is_none")]
pub always_encrypted_settings: Option<SqlAlwaysEncryptedProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlMiLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureSqlMiLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSqlMiLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "alwaysEncryptedSettings", default, skip_serializing_if = "Option::is_none")]
pub always_encrypted_settings: Option<SqlAlwaysEncryptedProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SqlAlwaysEncryptedProperties {
#[serde(rename = "alwaysEncryptedAkvAuthType")]
pub always_encrypted_akv_auth_type: sql_always_encrypted_properties::AlwaysEncryptedAkvAuthType,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
pub mod sql_always_encrypted_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AlwaysEncryptedAkvAuthType {
ServicePrincipal,
ManagedIdentity,
UserAssignedManagedIdentity,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBatchLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureBatchLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBatchLinkedServiceTypeProperties {
#[serde(rename = "accountName")]
pub account_name: serde_json::Value,
#[serde(rename = "accessKey", default, skip_serializing_if = "Option::is_none")]
pub access_key: Option<SecretBase>,
#[serde(rename = "batchUri")]
pub batch_uri: serde_json::Value,
#[serde(rename = "poolName")]
pub pool_name: serde_json::Value,
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureKeyVaultLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureKeyVaultLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureKeyVaultLinkedServiceTypeProperties {
#[serde(rename = "baseUrl")]
pub base_url: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: CosmosDbLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(rename = "accountEndpoint", default, skip_serializing_if = "Option::is_none")]
pub account_endpoint: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database: Option<serde_json::Value>,
#[serde(rename = "accountKey", default, skip_serializing_if = "Option::is_none")]
pub account_key: Option<SecretBase>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredentialType", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential_type: Option<cosmos_db_linked_service_type_properties::ServicePrincipalCredentialType>,
#[serde(rename = "servicePrincipalCredential", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "connectionMode", default, skip_serializing_if = "Option::is_none")]
pub connection_mode: Option<cosmos_db_linked_service_type_properties::ConnectionMode>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod cosmos_db_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServicePrincipalCredentialType {
ServicePrincipalKey,
ServicePrincipalCert,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnectionMode {
Gateway,
Direct,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: DynamicsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsLinkedServiceTypeProperties {
#[serde(rename = "deploymentType")]
pub deployment_type: serde_json::Value,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "serviceUri", default, skip_serializing_if = "Option::is_none")]
pub service_uri: Option<serde_json::Value>,
#[serde(rename = "organizationName", default, skip_serializing_if = "Option::is_none")]
pub organization_name: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredentialType", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential_type: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredential", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DynamicsDeploymentType {
Online,
OnPremisesWithIfd,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DynamicsAuthenticationType {
Office365,
Ifd,
#[serde(rename = "AADServicePrincipal")]
AadServicePrincipal,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsCrmLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: DynamicsCrmLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsCrmLinkedServiceTypeProperties {
#[serde(rename = "deploymentType")]
pub deployment_type: serde_json::Value,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "serviceUri", default, skip_serializing_if = "Option::is_none")]
pub service_uri: Option<serde_json::Value>,
#[serde(rename = "organizationName", default, skip_serializing_if = "Option::is_none")]
pub organization_name: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredentialType", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential_type: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredential", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServicePrincipalCredentialType {
ServicePrincipalKey,
ServicePrincipalCert,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonDataServiceForAppsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: CommonDataServiceForAppsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonDataServiceForAppsLinkedServiceTypeProperties {
#[serde(rename = "deploymentType")]
pub deployment_type: serde_json::Value,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "serviceUri", default, skip_serializing_if = "Option::is_none")]
pub service_uri: Option<serde_json::Value>,
#[serde(rename = "organizationName", default, skip_serializing_if = "Option::is_none")]
pub organization_name: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredentialType", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential_type: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalCredential", default, skip_serializing_if = "Option::is_none")]
pub service_principal_credential: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightLinkedServiceTypeProperties {
#[serde(rename = "clusterUri")]
pub cluster_uri: serde_json::Value,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "linkedServiceName", default, skip_serializing_if = "Option::is_none")]
pub linked_service_name: Option<LinkedServiceReference>,
#[serde(rename = "hcatalogLinkedServiceName", default, skip_serializing_if = "Option::is_none")]
pub hcatalog_linked_service_name: Option<LinkedServiceReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "isEspEnabled", default, skip_serializing_if = "Option::is_none")]
pub is_esp_enabled: Option<serde_json::Value>,
#[serde(rename = "fileSystem", default, skip_serializing_if = "Option::is_none")]
pub file_system: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: FileServerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FileServerLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFileStorageLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureFileStorageLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFileStorageLinkedServiceTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub host: Option<serde_json::Value>,
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(rename = "accountKey", default, skip_serializing_if = "Option::is_none")]
pub account_key: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "sasUri", default, skip_serializing_if = "Option::is_none")]
pub sas_uri: Option<serde_json::Value>,
#[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")]
pub sas_token: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "fileShare", default, skip_serializing_if = "Option::is_none")]
pub file_share: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub snapshot: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3CompatibleLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonS3CompatibleLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3CompatibleLinkedServiceTypeProperties {
#[serde(rename = "accessKeyId", default, skip_serializing_if = "Option::is_none")]
pub access_key_id: Option<serde_json::Value>,
#[serde(rename = "secretAccessKey", default, skip_serializing_if = "Option::is_none")]
pub secret_access_key: Option<SecretBase>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<serde_json::Value>,
#[serde(rename = "forcePathStyle", default, skip_serializing_if = "Option::is_none")]
pub force_path_style: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleCloudStorageLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: OracleCloudStorageLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleCloudStorageLinkedServiceTypeProperties {
#[serde(rename = "accessKeyId", default, skip_serializing_if = "Option::is_none")]
pub access_key_id: Option<serde_json::Value>,
#[serde(rename = "secretAccessKey", default, skip_serializing_if = "Option::is_none")]
pub secret_access_key: Option<SecretBase>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleCloudStorageLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: GoogleCloudStorageLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleCloudStorageLinkedServiceTypeProperties {
#[serde(rename = "accessKeyId", default, skip_serializing_if = "Option::is_none")]
pub access_key_id: Option<serde_json::Value>,
#[serde(rename = "secretAccessKey", default, skip_serializing_if = "Option::is_none")]
pub secret_access_key: Option<SecretBase>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: OracleLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForOracleLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonRdsForLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRdsForLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMySqlLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMySqlLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMySqlLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MySqlLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MySqlLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MySqlLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostgreSqlLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: PostgreSqlLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PostgreSqlLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SybaseLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SybaseLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SybaseLinkedServiceTypeProperties {
pub server: serde_json::Value,
pub database: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<sybase_linked_service_type_properties::AuthenticationType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod sybase_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Windows,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Db2LinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: Db2LinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Db2LinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub server: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub database: Option<serde_json::Value>,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<db2_linked_service_type_properties::AuthenticationType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "packageCollection", default, skip_serializing_if = "Option::is_none")]
pub package_collection: Option<serde_json::Value>,
#[serde(rename = "certificateCommonName", default, skip_serializing_if = "Option::is_none")]
pub certificate_common_name: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod db2_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TeradataLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: TeradataLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TeradataLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub server: Option<serde_json::Value>,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<teradata_linked_service_type_properties::AuthenticationType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod teradata_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Windows,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMlLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlLinkedServiceTypeProperties {
#[serde(rename = "mlEndpoint")]
pub ml_endpoint: serde_json::Value,
#[serde(rename = "apiKey")]
pub api_key: SecretBase,
#[serde(rename = "updateResourceEndpoint", default, skip_serializing_if = "Option::is_none")]
pub update_resource_endpoint: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlServiceLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMlServiceLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMlServiceLinkedServiceTypeProperties {
#[serde(rename = "subscriptionId")]
pub subscription_id: serde_json::Value,
#[serde(rename = "resourceGroupName")]
pub resource_group_name: serde_json::Value,
#[serde(rename = "mlWorkspaceName")]
pub ml_workspace_name: serde_json::Value,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OdbcLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: OdbcLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OdbcLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<SecretBase>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InformixLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: InformixLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InformixLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<SecretBase>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftAccessLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MicrosoftAccessLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MicrosoftAccessLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<SecretBase>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdfsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HdfsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdfsLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ODataLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ODataLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ODataLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<o_data_linked_service_type_properties::AuthenticationType>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "authHeaders", default, skip_serializing_if = "Option::is_none")]
pub auth_headers: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "aadResourceId", default, skip_serializing_if = "Option::is_none")]
pub aad_resource_id: Option<serde_json::Value>,
#[serde(rename = "aadServicePrincipalCredentialType", default, skip_serializing_if = "Option::is_none")]
pub aad_service_principal_credential_type: Option<o_data_linked_service_type_properties::AadServicePrincipalCredentialType>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(rename = "servicePrincipalEmbeddedCert", default, skip_serializing_if = "Option::is_none")]
pub service_principal_embedded_cert: Option<SecretBase>,
#[serde(rename = "servicePrincipalEmbeddedCertPassword", default, skip_serializing_if = "Option::is_none")]
pub service_principal_embedded_cert_password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod o_data_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Anonymous,
Windows,
AadServicePrincipal,
ManagedServiceIdentity,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AadServicePrincipalCredentialType {
ServicePrincipalKey,
ServicePrincipalCert,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: WebLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "authenticationType")]
pub authentication_type: web_linked_service_type_properties::AuthenticationType,
}
pub mod web_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Anonymous,
ClientCertificate,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebAnonymousAuthentication {
#[serde(flatten)]
pub web_linked_service_type_properties: WebLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebBasicAuthentication {
#[serde(flatten)]
pub web_linked_service_type_properties: WebLinkedServiceTypeProperties,
pub username: serde_json::Value,
pub password: SecretBase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WebClientCertificateAuthentication {
#[serde(flatten)]
pub web_linked_service_type_properties: WebLinkedServiceTypeProperties,
pub pfx: SecretBase,
pub password: SecretBase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CassandraLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: CassandraLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CassandraLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MongoDbLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbLinkedServiceTypeProperties {
pub server: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<mongo_db_linked_service_type_properties::AuthenticationType>,
#[serde(rename = "databaseName")]
pub database_name: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "authSource", default, skip_serializing_if = "Option::is_none")]
pub auth_source: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod mongo_db_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Anonymous,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbAtlasLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MongoDbAtlasLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbAtlasLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
pub database: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbV2LinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MongoDbV2LinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MongoDbV2LinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
pub database: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbMongoDbApiLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: CosmosDbMongoDbApiLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbMongoDbApiLinkedServiceTypeProperties {
#[serde(rename = "isServerVersionAbove32", default, skip_serializing_if = "Option::is_none")]
pub is_server_version_above32: Option<serde_json::Value>,
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
pub database: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDataLakeStoreLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeStoreLinkedServiceTypeProperties {
#[serde(rename = "dataLakeStoreUri")]
pub data_lake_store_uri: serde_json::Value,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "accountName", default, skip_serializing_if = "Option::is_none")]
pub account_name: Option<serde_json::Value>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<serde_json::Value>,
#[serde(rename = "resourceGroupName", default, skip_serializing_if = "Option::is_none")]
pub resource_group_name: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureBlobFsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureBlobFsLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "accountKey", default, skip_serializing_if = "Option::is_none")]
pub account_key: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Office365LinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: Office365LinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Office365LinkedServiceTypeProperties {
#[serde(rename = "office365TenantId")]
pub office365_tenant_id: serde_json::Value,
#[serde(rename = "servicePrincipalTenantId")]
pub service_principal_tenant_id: serde_json::Value,
#[serde(rename = "servicePrincipalId")]
pub service_principal_id: serde_json::Value,
#[serde(rename = "servicePrincipalKey")]
pub service_principal_key: SecretBase,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SalesforceLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceLinkedServiceTypeProperties {
#[serde(rename = "environmentUrl", default, skip_serializing_if = "Option::is_none")]
pub environment_url: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "securityToken", default, skip_serializing_if = "Option::is_none")]
pub security_token: Option<SecretBase>,
#[serde(rename = "apiVersion", default, skip_serializing_if = "Option::is_none")]
pub api_version: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceServiceCloudLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SalesforceServiceCloudLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceServiceCloudLinkedServiceTypeProperties {
#[serde(rename = "environmentUrl", default, skip_serializing_if = "Option::is_none")]
pub environment_url: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "securityToken", default, skip_serializing_if = "Option::is_none")]
pub security_token: Option<SecretBase>,
#[serde(rename = "apiVersion", default, skip_serializing_if = "Option::is_none")]
pub api_version: Option<serde_json::Value>,
#[serde(rename = "extendedProperties", default, skip_serializing_if = "Option::is_none")]
pub extended_properties: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapCloudForCustomerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SapCloudForCustomerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapCloudForCustomerLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapEccLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SapEccLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapEccLinkedServiceTypeProperties {
pub url: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapOpenHubLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SapOpenHubLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapOpenHubLinkedServiceTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub server: Option<serde_json::Value>,
#[serde(rename = "systemNumber", default, skip_serializing_if = "Option::is_none")]
pub system_number: Option<serde_json::Value>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub language: Option<serde_json::Value>,
#[serde(rename = "systemId", default, skip_serializing_if = "Option::is_none")]
pub system_id: Option<serde_json::Value>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "messageServer", default, skip_serializing_if = "Option::is_none")]
pub message_server: Option<serde_json::Value>,
#[serde(rename = "messageServerService", default, skip_serializing_if = "Option::is_none")]
pub message_server_service: Option<serde_json::Value>,
#[serde(rename = "logonGroup", default, skip_serializing_if = "Option::is_none")]
pub logon_group: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestServiceLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: RestServiceLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestServiceLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "enableServerCertificateValidation", default, skip_serializing_if = "Option::is_none")]
pub enable_server_certificate_validation: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: rest_service_linked_service_type_properties::AuthenticationType,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "authHeaders", default, skip_serializing_if = "Option::is_none")]
pub auth_headers: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(rename = "azureCloudType", default, skip_serializing_if = "Option::is_none")]
pub azure_cloud_type: Option<serde_json::Value>,
#[serde(rename = "aadResourceId", default, skip_serializing_if = "Option::is_none")]
pub aad_resource_id: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
pub mod rest_service_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
Basic,
AadServicePrincipal,
ManagedServiceIdentity,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3LinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonS3LinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonS3LinkedServiceTypeProperties {
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<serde_json::Value>,
#[serde(rename = "accessKeyId", default, skip_serializing_if = "Option::is_none")]
pub access_key_id: Option<serde_json::Value>,
#[serde(rename = "secretAccessKey", default, skip_serializing_if = "Option::is_none")]
pub secret_access_key: Option<SecretBase>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<serde_json::Value>,
#[serde(rename = "sessionToken", default, skip_serializing_if = "Option::is_none")]
pub session_token: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRedshiftLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonRedshiftLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonRedshiftLinkedServiceTypeProperties {
pub server: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
pub database: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomDataSourceLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSearchLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureSearchLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureSearchLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HttpLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<http_linked_service_type_properties::AuthenticationType>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "authHeaders", default, skip_serializing_if = "Option::is_none")]
pub auth_headers: Option<serde_json::Value>,
#[serde(rename = "embeddedCertData", default, skip_serializing_if = "Option::is_none")]
pub embedded_cert_data: Option<serde_json::Value>,
#[serde(rename = "certThumbprint", default, skip_serializing_if = "Option::is_none")]
pub cert_thumbprint: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "enableServerCertificateValidation", default, skip_serializing_if = "Option::is_none")]
pub enable_server_certificate_validation: Option<serde_json::Value>,
}
pub mod http_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Anonymous,
Digest,
Windows,
ClientCertificate,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FtpServerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: FtpServerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FtpServerLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<ftp_server_linked_service_type_properties::AuthenticationType>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "enableServerCertificateValidation", default, skip_serializing_if = "Option::is_none")]
pub enable_server_certificate_validation: Option<serde_json::Value>,
}
pub mod ftp_server_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Anonymous,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SftpServerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SftpServerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SftpServerLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<sftp_server_linked_service_type_properties::AuthenticationType>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "privateKeyPath", default, skip_serializing_if = "Option::is_none")]
pub private_key_path: Option<serde_json::Value>,
#[serde(rename = "privateKeyContent", default, skip_serializing_if = "Option::is_none")]
pub private_key_content: Option<SecretBase>,
#[serde(rename = "passPhrase", default, skip_serializing_if = "Option::is_none")]
pub pass_phrase: Option<SecretBase>,
#[serde(rename = "skipHostKeyValidation", default, skip_serializing_if = "Option::is_none")]
pub skip_host_key_validation: Option<serde_json::Value>,
#[serde(rename = "hostKeyFingerprint", default, skip_serializing_if = "Option::is_none")]
pub host_key_fingerprint: Option<serde_json::Value>,
}
pub mod sftp_server_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
SshPublicKey,
MultiFactor,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapBwLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SapBwLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapBwLinkedServiceTypeProperties {
pub server: serde_json::Value,
#[serde(rename = "systemNumber")]
pub system_number: serde_json::Value,
#[serde(rename = "clientId")]
pub client_id: serde_json::Value,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapHanaLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SapHanaLinkedServiceProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapHanaLinkedServiceProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub server: Option<serde_json::Value>,
#[serde(rename = "authenticationType", default, skip_serializing_if = "Option::is_none")]
pub authentication_type: Option<sap_hana_linked_service_properties::AuthenticationType>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod sap_hana_linked_service_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
Windows,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonMwsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AmazonMwsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmazonMwsLinkedServiceTypeProperties {
pub endpoint: serde_json::Value,
#[serde(rename = "marketplaceID")]
pub marketplace_id: serde_json::Value,
#[serde(rename = "sellerID")]
pub seller_id: serde_json::Value,
#[serde(rename = "mwsAuthToken", default, skip_serializing_if = "Option::is_none")]
pub mws_auth_token: Option<SecretBase>,
#[serde(rename = "accessKeyId")]
pub access_key_id: serde_json::Value,
#[serde(rename = "secretKey", default, skip_serializing_if = "Option::is_none")]
pub secret_key: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzurePostgreSqlLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzurePostgreSqlLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzurePostgreSqlLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConcurLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ConcurLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConcurLinkedServiceTypeProperties {
#[serde(rename = "connectionProperties", default, skip_serializing_if = "Option::is_none")]
pub connection_properties: Option<serde_json::Value>,
#[serde(rename = "clientId")]
pub client_id: serde_json::Value,
pub username: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CouchbaseLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: CouchbaseLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CouchbaseLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(rename = "credString", default, skip_serializing_if = "Option::is_none")]
pub cred_string: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DrillLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: DrillLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DrillLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pwd: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EloquaLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: EloquaLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EloquaLinkedServiceTypeProperties {
pub endpoint: serde_json::Value,
pub username: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleBigQueryLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: GoogleBigQueryLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleBigQueryLinkedServiceTypeProperties {
pub project: serde_json::Value,
#[serde(rename = "additionalProjects", default, skip_serializing_if = "Option::is_none")]
pub additional_projects: Option<serde_json::Value>,
#[serde(rename = "requestGoogleDriveScope", default, skip_serializing_if = "Option::is_none")]
pub request_google_drive_scope: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: google_big_query_linked_service_type_properties::AuthenticationType,
#[serde(rename = "refreshToken", default, skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<SecretBase>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<serde_json::Value>,
#[serde(rename = "keyFilePath", default, skip_serializing_if = "Option::is_none")]
pub key_file_path: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod google_big_query_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
ServiceAuthentication,
UserAuthentication,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GreenplumLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: GreenplumLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GreenplumLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pwd: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HBaseLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HBaseLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HBaseLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "httpPath", default, skip_serializing_if = "Option::is_none")]
pub http_path: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: h_base_linked_service_type_properties::AuthenticationType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "allowHostNameCNMismatch", default, skip_serializing_if = "Option::is_none")]
pub allow_host_name_cn_mismatch: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod h_base_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
Basic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HiveLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HiveLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HiveLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "serverType", default, skip_serializing_if = "Option::is_none")]
pub server_type: Option<hive_linked_service_type_properties::ServerType>,
#[serde(rename = "thriftTransportProtocol", default, skip_serializing_if = "Option::is_none")]
pub thrift_transport_protocol: Option<hive_linked_service_type_properties::ThriftTransportProtocol>,
#[serde(rename = "authenticationType")]
pub authentication_type: hive_linked_service_type_properties::AuthenticationType,
#[serde(rename = "serviceDiscoveryMode", default, skip_serializing_if = "Option::is_none")]
pub service_discovery_mode: Option<serde_json::Value>,
#[serde(rename = "zooKeeperNameSpace", default, skip_serializing_if = "Option::is_none")]
pub zoo_keeper_name_space: Option<serde_json::Value>,
#[serde(rename = "useNativeQuery", default, skip_serializing_if = "Option::is_none")]
pub use_native_query: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "httpPath", default, skip_serializing_if = "Option::is_none")]
pub http_path: Option<serde_json::Value>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "allowHostNameCNMismatch", default, skip_serializing_if = "Option::is_none")]
pub allow_host_name_cn_mismatch: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod hive_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerType {
HiveServer1,
HiveServer2,
HiveThriftServer,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ThriftTransportProtocol {
Binary,
#[serde(rename = "SASL")]
Sasl,
#[serde(rename = "HTTP ")]
Http,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
Username,
UsernameAndPassword,
#[serde(rename = "WindowsAzureHDInsightService")]
WindowsAzureHdInsightService,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HubspotLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HubspotLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HubspotLinkedServiceTypeProperties {
#[serde(rename = "clientId")]
pub client_id: serde_json::Value,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(rename = "refreshToken", default, skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImpalaLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ImpalaLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImpalaLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: impala_linked_service_type_properties::AuthenticationType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "allowHostNameCNMismatch", default, skip_serializing_if = "Option::is_none")]
pub allow_host_name_cn_mismatch: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod impala_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
#[serde(rename = "SASLUsername")]
SaslUsername,
UsernameAndPassword,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JiraLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: JiraLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JiraLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
pub username: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MagentoLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MagentoLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MagentoLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MariaDbLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MariaDbLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MariaDbLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pwd: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMariaDbLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureMariaDbLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureMariaDbLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pwd: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MarketoLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: MarketoLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MarketoLinkedServiceTypeProperties {
pub endpoint: serde_json::Value,
#[serde(rename = "clientId")]
pub client_id: serde_json::Value,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaypalLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: PaypalLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaypalLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(rename = "clientId")]
pub client_id: serde_json::Value,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PhoenixLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: PhoenixLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PhoenixLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "httpPath", default, skip_serializing_if = "Option::is_none")]
pub http_path: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: phoenix_linked_service_type_properties::AuthenticationType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "allowHostNameCNMismatch", default, skip_serializing_if = "Option::is_none")]
pub allow_host_name_cn_mismatch: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod phoenix_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
UsernameAndPassword,
#[serde(rename = "WindowsAzureHDInsightService")]
WindowsAzureHdInsightService,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrestoLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: PrestoLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrestoLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(rename = "serverVersion")]
pub server_version: serde_json::Value,
pub catalog: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<serde_json::Value>,
#[serde(rename = "authenticationType")]
pub authentication_type: presto_linked_service_type_properties::AuthenticationType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "allowHostNameCNMismatch", default, skip_serializing_if = "Option::is_none")]
pub allow_host_name_cn_mismatch: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "timeZoneID", default, skip_serializing_if = "Option::is_none")]
pub time_zone_id: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod presto_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
#[serde(rename = "LDAP")]
Ldap,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuickBooksLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: QuickBooksLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuickBooksLinkedServiceTypeProperties {
#[serde(rename = "connectionProperties", default, skip_serializing_if = "Option::is_none")]
pub connection_properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub endpoint: Option<serde_json::Value>,
#[serde(rename = "companyId", default, skip_serializing_if = "Option::is_none")]
pub company_id: Option<serde_json::Value>,
#[serde(rename = "consumerKey", default, skip_serializing_if = "Option::is_none")]
pub consumer_key: Option<serde_json::Value>,
#[serde(rename = "consumerSecret", default, skip_serializing_if = "Option::is_none")]
pub consumer_secret: Option<SecretBase>,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(rename = "accessTokenSecret", default, skip_serializing_if = "Option::is_none")]
pub access_token_secret: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceNowLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ServiceNowLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceNowLinkedServiceTypeProperties {
pub endpoint: serde_json::Value,
#[serde(rename = "authenticationType")]
pub authentication_type: service_now_linked_service_type_properties::AuthenticationType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod service_now_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Basic,
OAuth2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShopifyLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ShopifyLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShopifyLinkedServiceTypeProperties {
pub host: serde_json::Value,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SparkLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SparkLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SparkLinkedServiceTypeProperties {
pub host: serde_json::Value,
pub port: serde_json::Value,
#[serde(rename = "serverType", default, skip_serializing_if = "Option::is_none")]
pub server_type: Option<spark_linked_service_type_properties::ServerType>,
#[serde(rename = "thriftTransportProtocol", default, skip_serializing_if = "Option::is_none")]
pub thrift_transport_protocol: Option<spark_linked_service_type_properties::ThriftTransportProtocol>,
#[serde(rename = "authenticationType")]
pub authentication_type: spark_linked_service_type_properties::AuthenticationType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "httpPath", default, skip_serializing_if = "Option::is_none")]
pub http_path: Option<serde_json::Value>,
#[serde(rename = "enableSsl", default, skip_serializing_if = "Option::is_none")]
pub enable_ssl: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "allowHostNameCNMismatch", default, skip_serializing_if = "Option::is_none")]
pub allow_host_name_cn_mismatch: Option<serde_json::Value>,
#[serde(rename = "allowSelfSignedServerCert", default, skip_serializing_if = "Option::is_none")]
pub allow_self_signed_server_cert: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod spark_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerType {
SharkServer,
SharkServer2,
SparkThriftServer,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ThriftTransportProtocol {
Binary,
#[serde(rename = "SASL")]
Sasl,
#[serde(rename = "HTTP ")]
Http,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
Anonymous,
Username,
UsernameAndPassword,
#[serde(rename = "WindowsAzureHDInsightService")]
WindowsAzureHdInsightService,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SquareLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SquareLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SquareLinkedServiceTypeProperties {
#[serde(rename = "connectionProperties", default, skip_serializing_if = "Option::is_none")]
pub connection_properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub host: Option<serde_json::Value>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "redirectUri", default, skip_serializing_if = "Option::is_none")]
pub redirect_uri: Option<serde_json::Value>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XeroLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: XeroLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XeroLinkedServiceTypeProperties {
#[serde(rename = "connectionProperties", default, skip_serializing_if = "Option::is_none")]
pub connection_properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub host: Option<serde_json::Value>,
#[serde(rename = "consumerKey", default, skip_serializing_if = "Option::is_none")]
pub consumer_key: Option<SecretBase>,
#[serde(rename = "privateKey", default, skip_serializing_if = "Option::is_none")]
pub private_key: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ZohoLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ZohoLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ZohoLinkedServiceTypeProperties {
#[serde(rename = "connectionProperties", default, skip_serializing_if = "Option::is_none")]
pub connection_properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub endpoint: Option<serde_json::Value>,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VerticaLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: VerticaLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VerticaLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pwd: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetezzaLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: NetezzaLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetezzaLinkedServiceTypeProperties {
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pwd: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceMarketingCloudLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SalesforceMarketingCloudLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SalesforceMarketingCloudLinkedServiceTypeProperties {
#[serde(rename = "connectionProperties", default, skip_serializing_if = "Option::is_none")]
pub connection_properties: Option<serde_json::Value>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightOnDemandLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: HdInsightOnDemandLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightOnDemandLinkedServiceTypeProperties {
#[serde(rename = "clusterSize")]
pub cluster_size: serde_json::Value,
#[serde(rename = "timeToLive")]
pub time_to_live: serde_json::Value,
pub version: serde_json::Value,
#[serde(rename = "linkedServiceName")]
pub linked_service_name: LinkedServiceReference,
#[serde(rename = "hostSubscriptionId")]
pub host_subscription_id: serde_json::Value,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
pub tenant: serde_json::Value,
#[serde(rename = "clusterResourceGroup")]
pub cluster_resource_group: serde_json::Value,
#[serde(rename = "clusterNamePrefix", default, skip_serializing_if = "Option::is_none")]
pub cluster_name_prefix: Option<serde_json::Value>,
#[serde(rename = "clusterUserName", default, skip_serializing_if = "Option::is_none")]
pub cluster_user_name: Option<serde_json::Value>,
#[serde(rename = "clusterPassword", default, skip_serializing_if = "Option::is_none")]
pub cluster_password: Option<SecretBase>,
#[serde(rename = "clusterSshUserName", default, skip_serializing_if = "Option::is_none")]
pub cluster_ssh_user_name: Option<serde_json::Value>,
#[serde(rename = "clusterSshPassword", default, skip_serializing_if = "Option::is_none")]
pub cluster_ssh_password: Option<SecretBase>,
#[serde(rename = "additionalLinkedServiceNames", default, skip_serializing_if = "Vec::is_empty")]
pub additional_linked_service_names: Vec<LinkedServiceReference>,
#[serde(rename = "hcatalogLinkedServiceName", default, skip_serializing_if = "Option::is_none")]
pub hcatalog_linked_service_name: Option<LinkedServiceReference>,
#[serde(rename = "clusterType", default, skip_serializing_if = "Option::is_none")]
pub cluster_type: Option<serde_json::Value>,
#[serde(rename = "sparkVersion", default, skip_serializing_if = "Option::is_none")]
pub spark_version: Option<serde_json::Value>,
#[serde(rename = "coreConfiguration", default, skip_serializing_if = "Option::is_none")]
pub core_configuration: Option<serde_json::Value>,
#[serde(rename = "hBaseConfiguration", default, skip_serializing_if = "Option::is_none")]
pub h_base_configuration: Option<serde_json::Value>,
#[serde(rename = "hdfsConfiguration", default, skip_serializing_if = "Option::is_none")]
pub hdfs_configuration: Option<serde_json::Value>,
#[serde(rename = "hiveConfiguration", default, skip_serializing_if = "Option::is_none")]
pub hive_configuration: Option<serde_json::Value>,
#[serde(rename = "mapReduceConfiguration", default, skip_serializing_if = "Option::is_none")]
pub map_reduce_configuration: Option<serde_json::Value>,
#[serde(rename = "oozieConfiguration", default, skip_serializing_if = "Option::is_none")]
pub oozie_configuration: Option<serde_json::Value>,
#[serde(rename = "stormConfiguration", default, skip_serializing_if = "Option::is_none")]
pub storm_configuration: Option<serde_json::Value>,
#[serde(rename = "yarnConfiguration", default, skip_serializing_if = "Option::is_none")]
pub yarn_configuration: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "headNodeSize", default, skip_serializing_if = "Option::is_none")]
pub head_node_size: Option<serde_json::Value>,
#[serde(rename = "dataNodeSize", default, skip_serializing_if = "Option::is_none")]
pub data_node_size: Option<serde_json::Value>,
#[serde(rename = "zookeeperNodeSize", default, skip_serializing_if = "Option::is_none")]
pub zookeeper_node_size: Option<serde_json::Value>,
#[serde(rename = "scriptActions", default, skip_serializing_if = "Vec::is_empty")]
pub script_actions: Vec<ScriptAction>,
#[serde(rename = "virtualNetworkId", default, skip_serializing_if = "Option::is_none")]
pub virtual_network_id: Option<serde_json::Value>,
#[serde(rename = "subnetName", default, skip_serializing_if = "Option::is_none")]
pub subnet_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScriptAction {
pub name: String,
pub uri: String,
pub roles: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HdiNodeTypes {
Headnode,
Workernode,
Zookeeper,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeAnalyticsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDataLakeAnalyticsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataLakeAnalyticsLinkedServiceTypeProperties {
#[serde(rename = "accountName")]
pub account_name: serde_json::Value,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
pub tenant: serde_json::Value,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<serde_json::Value>,
#[serde(rename = "resourceGroupName", default, skip_serializing_if = "Option::is_none")]
pub resource_group_name: Option<serde_json::Value>,
#[serde(rename = "dataLakeAnalyticsUri", default, skip_serializing_if = "Option::is_none")]
pub data_lake_analytics_uri: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDatabricksLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksLinkedServiceTypeProperties {
pub domain: serde_json::Value,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<serde_json::Value>,
#[serde(rename = "workspaceResourceId", default, skip_serializing_if = "Option::is_none")]
pub workspace_resource_id: Option<serde_json::Value>,
#[serde(rename = "existingClusterId", default, skip_serializing_if = "Option::is_none")]
pub existing_cluster_id: Option<serde_json::Value>,
#[serde(rename = "instancePoolId", default, skip_serializing_if = "Option::is_none")]
pub instance_pool_id: Option<serde_json::Value>,
#[serde(rename = "newClusterVersion", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_version: Option<serde_json::Value>,
#[serde(rename = "newClusterNumOfWorker", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_num_of_worker: Option<serde_json::Value>,
#[serde(rename = "newClusterNodeType", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_node_type: Option<serde_json::Value>,
#[serde(rename = "newClusterSparkConf", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_spark_conf: Option<serde_json::Value>,
#[serde(rename = "newClusterSparkEnvVars", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_spark_env_vars: Option<serde_json::Value>,
#[serde(rename = "newClusterCustomTags", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_custom_tags: Option<serde_json::Value>,
#[serde(rename = "newClusterLogDestination", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_log_destination: Option<serde_json::Value>,
#[serde(rename = "newClusterDriverNodeType", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_driver_node_type: Option<serde_json::Value>,
#[serde(rename = "newClusterInitScripts", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_init_scripts: Option<serde_json::Value>,
#[serde(rename = "newClusterEnableElasticDisk", default, skip_serializing_if = "Option::is_none")]
pub new_cluster_enable_elastic_disk: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(rename = "policyId", default, skip_serializing_if = "Option::is_none")]
pub policy_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDeltaLakeLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDatabricksDetltaLakeLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDatabricksDetltaLakeLinkedServiceTypeProperties {
pub domain: serde_json::Value,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<SecretBase>,
#[serde(rename = "clusterId", default, skip_serializing_if = "Option::is_none")]
pub cluster_id: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResponsysLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: ResponsysLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResponsysLinkedServiceTypeProperties {
pub endpoint: serde_json::Value,
#[serde(rename = "clientId")]
pub client_id: serde_json::Value,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsAxLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: DynamicsAxLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DynamicsAxLinkedServiceTypeProperties {
pub url: serde_json::Value,
#[serde(rename = "servicePrincipalId")]
pub service_principal_id: serde_json::Value,
#[serde(rename = "servicePrincipalKey")]
pub service_principal_key: SecretBase,
pub tenant: serde_json::Value,
#[serde(rename = "aadResourceId")]
pub aad_resource_id: serde_json::Value,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleServiceCloudLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: OracleServiceCloudLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OracleServiceCloudLinkedServiceTypeProperties {
pub host: serde_json::Value,
pub username: serde_json::Value,
pub password: SecretBase,
#[serde(rename = "useEncryptedEndpoints", default, skip_serializing_if = "Option::is_none")]
pub use_encrypted_endpoints: Option<serde_json::Value>,
#[serde(rename = "useHostVerification", default, skip_serializing_if = "Option::is_none")]
pub use_host_verification: Option<serde_json::Value>,
#[serde(rename = "usePeerVerification", default, skip_serializing_if = "Option::is_none")]
pub use_peer_verification: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleAdWordsLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: GoogleAdWordsLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GoogleAdWordsLinkedServiceTypeProperties {
#[serde(rename = "clientCustomerID")]
pub client_customer_id: serde_json::Value,
#[serde(rename = "developerToken")]
pub developer_token: SecretBase,
#[serde(rename = "authenticationType")]
pub authentication_type: google_ad_words_linked_service_type_properties::AuthenticationType,
#[serde(rename = "refreshToken", default, skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<SecretBase>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<SecretBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<serde_json::Value>,
#[serde(rename = "keyFilePath", default, skip_serializing_if = "Option::is_none")]
pub key_file_path: Option<serde_json::Value>,
#[serde(rename = "trustedCertPath", default, skip_serializing_if = "Option::is_none")]
pub trusted_cert_path: Option<serde_json::Value>,
#[serde(rename = "useSystemTrustStore", default, skip_serializing_if = "Option::is_none")]
pub use_system_trust_store: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
pub mod google_ad_words_linked_service_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AuthenticationType {
ServiceAuthentication,
UserAuthentication,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapTableLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SapTableLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SapTableLinkedServiceTypeProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub server: Option<serde_json::Value>,
#[serde(rename = "systemNumber", default, skip_serializing_if = "Option::is_none")]
pub system_number: Option<serde_json::Value>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub language: Option<serde_json::Value>,
#[serde(rename = "systemId", default, skip_serializing_if = "Option::is_none")]
pub system_id: Option<serde_json::Value>,
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<SecretBase>,
#[serde(rename = "messageServer", default, skip_serializing_if = "Option::is_none")]
pub message_server: Option<serde_json::Value>,
#[serde(rename = "messageServerService", default, skip_serializing_if = "Option::is_none")]
pub message_server_service: Option<serde_json::Value>,
#[serde(rename = "sncMode", default, skip_serializing_if = "Option::is_none")]
pub snc_mode: Option<serde_json::Value>,
#[serde(rename = "sncMyName", default, skip_serializing_if = "Option::is_none")]
pub snc_my_name: Option<serde_json::Value>,
#[serde(rename = "sncPartnerName", default, skip_serializing_if = "Option::is_none")]
pub snc_partner_name: Option<serde_json::Value>,
#[serde(rename = "sncLibraryPath", default, skip_serializing_if = "Option::is_none")]
pub snc_library_path: Option<serde_json::Value>,
#[serde(rename = "sncQop", default, skip_serializing_if = "Option::is_none")]
pub snc_qop: Option<serde_json::Value>,
#[serde(rename = "logonGroup", default, skip_serializing_if = "Option::is_none")]
pub logon_group: Option<serde_json::Value>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureDataExplorerLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureDataExplorerLinkedServiceTypeProperties {
pub endpoint: serde_json::Value,
#[serde(rename = "servicePrincipalId", default, skip_serializing_if = "Option::is_none")]
pub service_principal_id: Option<serde_json::Value>,
#[serde(rename = "servicePrincipalKey", default, skip_serializing_if = "Option::is_none")]
pub service_principal_key: Option<SecretBase>,
pub database: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFunctionLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: AzureFunctionLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureFunctionLinkedServiceTypeProperties {
#[serde(rename = "functionAppUrl")]
pub function_app_url: serde_json::Value,
#[serde(rename = "functionKey", default, skip_serializing_if = "Option::is_none")]
pub function_key: Option<SecretBase>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credential: Option<CredentialReference>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SnowflakeLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnowflakeLinkedServiceTypeProperties {
#[serde(rename = "connectionString")]
pub connection_string: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<AzureKeyVaultSecretReference>,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharePointOnlineListLinkedService {
#[serde(flatten)]
pub linked_service: LinkedService,
#[serde(rename = "typeProperties")]
pub type_properties: SharePointOnlineListLinkedServiceTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharePointOnlineListLinkedServiceTypeProperties {
#[serde(rename = "siteUrl")]
pub site_url: serde_json::Value,
#[serde(rename = "tenantId")]
pub tenant_id: serde_json::Value,
#[serde(rename = "servicePrincipalId")]
pub service_principal_id: serde_json::Value,
#[serde(rename = "servicePrincipalKey")]
pub service_principal_key: SecretBase,
#[serde(rename = "encryptedCredential", default, skip_serializing_if = "Option::is_none")]
pub encrypted_credential: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedPrivateEndpoint {
#[serde(rename = "connectionState", default, skip_serializing_if = "Option::is_none")]
pub connection_state: Option<ConnectionStateProperties>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub fqdns: Vec<String>,
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "isReserved", default, skip_serializing_if = "Option::is_none")]
pub is_reserved: Option<bool>,
#[serde(rename = "privateLinkResourceId", default, skip_serializing_if = "Option::is_none")]
pub private_link_resource_id: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectionStateProperties {
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedVirtualNetwork {
#[serde(rename = "vNetId", default, skip_serializing_if = "Option::is_none")]
pub v_net_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub alias: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Credential {
#[serde(rename = "type")]
pub type_: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub annotations: Vec<serde_json::Value>,
}
| 47.739197 | 128 | 0.73295 |
d9fc25d88bc8423eebfed1d8a8451be98e4807f9
| 35,909 |
mod tags;
mod html;
mod injection;
#[cfg(test)]
mod tests;
use hir::{Local, Name, Semantics, VariantDef};
use ide_db::{
defs::{classify_name, classify_name_ref, Definition, NameClass, NameRefClass},
RootDatabase,
};
use rustc_hash::FxHashMap;
use syntax::{
ast::{self, HasFormatSpecifier},
AstNode, AstToken, Direction, NodeOrToken, SyntaxElement,
SyntaxKind::{self, *},
SyntaxNode, SyntaxToken, TextRange, WalkEvent, T,
};
use crate::FileId;
use ast::FormatSpecifier;
pub(crate) use html::highlight_as_html;
pub use tags::{Highlight, HighlightModifier, HighlightModifiers, HighlightTag};
#[derive(Debug, Clone)]
pub struct HighlightedRange {
pub range: TextRange,
pub highlight: Highlight,
pub binding_hash: Option<u64>,
}
// Feature: Semantic Syntax Highlighting
//
// rust-analyzer highlights the code semantically.
// For example, `bar` in `foo::Bar` might be colored differently depending on whether `Bar` is an enum or a trait.
// rust-analyzer does not specify colors directly, instead it assigns tag (like `struct`) and a set of modifiers (like `declaration`) to each token.
// It's up to the client to map those to specific colors.
//
// The general rule is that a reference to an entity gets colored the same way as the entity itself.
// We also give special modifier for `mut` and `&mut` local variables.
pub(crate) fn highlight(
db: &RootDatabase,
file_id: FileId,
range_to_highlight: Option<TextRange>,
syntactic_name_ref_highlighting: bool,
) -> Vec<HighlightedRange> {
let _p = profile::span("highlight");
let sema = Semantics::new(db);
// Determine the root based on the given range.
let (root, range_to_highlight) = {
let source_file = sema.parse(file_id);
match range_to_highlight {
Some(range) => {
let node = match source_file.syntax().covering_element(range) {
NodeOrToken::Node(it) => it,
NodeOrToken::Token(it) => it.parent(),
};
(node, range)
}
None => (source_file.syntax().clone(), source_file.syntax().text_range()),
}
};
let mut bindings_shadow_count: FxHashMap<Name, u32> = FxHashMap::default();
// We use a stack for the DFS traversal below.
// When we leave a node, the we use it to flatten the highlighted ranges.
let mut stack = HighlightedRangeStack::new();
let mut current_macro_call: Option<ast::MacroCall> = None;
let mut format_string: Option<SyntaxElement> = None;
// Walk all nodes, keeping track of whether we are inside a macro or not.
// If in macro, expand it first and highlight the expanded code.
for event in root.preorder_with_tokens() {
match &event {
WalkEvent::Enter(_) => stack.push(),
WalkEvent::Leave(_) => stack.pop(),
};
let event_range = match &event {
WalkEvent::Enter(it) => it.text_range(),
WalkEvent::Leave(it) => it.text_range(),
};
// Element outside of the viewport, no need to highlight
if range_to_highlight.intersect(event_range).is_none() {
continue;
}
// Track "inside macro" state
match event.clone().map(|it| it.into_node().and_then(ast::MacroCall::cast)) {
WalkEvent::Enter(Some(mc)) => {
current_macro_call = Some(mc.clone());
if let Some(range) = macro_call_range(&mc) {
stack.add(HighlightedRange {
range,
highlight: HighlightTag::Macro.into(),
binding_hash: None,
});
}
if let Some(name) = mc.is_macro_rules() {
if let Some((highlight, binding_hash)) = highlight_element(
&sema,
&mut bindings_shadow_count,
syntactic_name_ref_highlighting,
name.syntax().clone().into(),
) {
stack.add(HighlightedRange {
range: name.syntax().text_range(),
highlight,
binding_hash,
});
}
}
continue;
}
WalkEvent::Leave(Some(mc)) => {
assert!(current_macro_call == Some(mc));
current_macro_call = None;
format_string = None;
}
_ => (),
}
// Check for Rust code in documentation
match &event {
WalkEvent::Leave(NodeOrToken::Node(node)) => {
if let Some((doctest, range_mapping, new_comments)) =
injection::extract_doc_comments(node)
{
injection::highlight_doc_comment(
doctest,
range_mapping,
new_comments,
&mut stack,
);
}
}
_ => (),
}
let element = match event {
WalkEvent::Enter(it) => it,
WalkEvent::Leave(_) => continue,
};
let range = element.text_range();
let element_to_highlight = if current_macro_call.is_some() && element.kind() != COMMENT {
// Inside a macro -- expand it first
let token = match element.clone().into_token() {
Some(it) if it.parent().kind() == TOKEN_TREE => it,
_ => continue,
};
let token = sema.descend_into_macros(token.clone());
let parent = token.parent();
// Check if macro takes a format string and remember it for highlighting later.
// The macros that accept a format string expand to a compiler builtin macros
// `format_args` and `format_args_nl`.
if let Some(name) = parent
.parent()
.and_then(ast::MacroCall::cast)
.and_then(|mc| mc.path())
.and_then(|p| p.segment())
.and_then(|s| s.name_ref())
{
match name.text().as_str() {
"format_args" | "format_args_nl" => {
format_string = parent
.children_with_tokens()
.filter(|t| t.kind() != WHITESPACE)
.nth(1)
.filter(|e| {
ast::String::can_cast(e.kind())
|| ast::RawString::can_cast(e.kind())
})
}
_ => {}
}
}
// We only care Name and Name_ref
match (token.kind(), parent.kind()) {
(IDENT, NAME) | (IDENT, NAME_REF) => parent.into(),
_ => token.into(),
}
} else {
element.clone()
};
if let Some(token) = element.as_token().cloned().and_then(ast::RawString::cast) {
let expanded = element_to_highlight.as_token().unwrap().clone();
if injection::highlight_injection(&mut stack, &sema, token, expanded).is_some() {
continue;
}
}
let is_format_string = format_string.as_ref() == Some(&element_to_highlight);
if let Some((highlight, binding_hash)) = highlight_element(
&sema,
&mut bindings_shadow_count,
syntactic_name_ref_highlighting,
element_to_highlight.clone(),
) {
stack.add(HighlightedRange { range, highlight, binding_hash });
if let Some(string) =
element_to_highlight.as_token().cloned().and_then(ast::String::cast)
{
if is_format_string {
stack.push();
string.lex_format_specifier(|piece_range, kind| {
if let Some(highlight) = highlight_format_specifier(kind) {
stack.add(HighlightedRange {
range: piece_range + range.start(),
highlight: highlight.into(),
binding_hash: None,
});
}
});
stack.pop();
}
// Highlight escape sequences
if let Some(char_ranges) = string.char_ranges() {
stack.push();
for (piece_range, _) in char_ranges.iter().filter(|(_, char)| char.is_ok()) {
if string.text()[piece_range.start().into()..].starts_with('\\') {
stack.add(HighlightedRange {
range: piece_range + range.start(),
highlight: HighlightTag::EscapeSequence.into(),
binding_hash: None,
});
}
}
stack.pop_and_inject(None);
}
} else if let Some(string) =
element_to_highlight.as_token().cloned().and_then(ast::RawString::cast)
{
if is_format_string {
stack.push();
string.lex_format_specifier(|piece_range, kind| {
if let Some(highlight) = highlight_format_specifier(kind) {
stack.add(HighlightedRange {
range: piece_range + range.start(),
highlight: highlight.into(),
binding_hash: None,
});
}
});
stack.pop();
}
}
}
}
stack.flattened()
}
#[derive(Debug)]
struct HighlightedRangeStack {
stack: Vec<Vec<HighlightedRange>>,
}
/// We use a stack to implement the flattening logic for the highlighted
/// syntax ranges.
impl HighlightedRangeStack {
fn new() -> Self {
Self { stack: vec![Vec::new()] }
}
fn push(&mut self) {
self.stack.push(Vec::new());
}
/// Flattens the highlighted ranges.
///
/// For example `#[cfg(feature = "foo")]` contains the nested ranges:
/// 1) parent-range: Attribute [0, 23)
/// 2) child-range: String [16, 21)
///
/// The following code implements the flattening, for our example this results to:
/// `[Attribute [0, 16), String [16, 21), Attribute [21, 23)]`
fn pop(&mut self) {
let children = self.stack.pop().unwrap();
let prev = self.stack.last_mut().unwrap();
let needs_flattening = !children.is_empty()
&& !prev.is_empty()
&& prev.last().unwrap().range.contains_range(children.first().unwrap().range);
if !needs_flattening {
prev.extend(children);
} else {
let mut parent = prev.pop().unwrap();
for ele in children {
assert!(parent.range.contains_range(ele.range));
let cloned = Self::intersect(&mut parent, &ele);
if !parent.range.is_empty() {
prev.push(parent);
}
prev.push(ele);
parent = cloned;
}
if !parent.range.is_empty() {
prev.push(parent);
}
}
}
/// Intersects the `HighlightedRange` `parent` with `child`.
/// `parent` is mutated in place, becoming the range before `child`.
/// Returns the range (of the same type as `parent`) *after* `child`.
fn intersect(parent: &mut HighlightedRange, child: &HighlightedRange) -> HighlightedRange {
assert!(parent.range.contains_range(child.range));
let mut cloned = parent.clone();
parent.range = TextRange::new(parent.range.start(), child.range.start());
cloned.range = TextRange::new(child.range.end(), cloned.range.end());
cloned
}
/// Remove the `HighlightRange` of `parent` that's currently covered by `child`.
fn intersect_partial(parent: &mut HighlightedRange, child: &HighlightedRange) {
assert!(
parent.range.start() <= child.range.start()
&& parent.range.end() >= child.range.start()
&& child.range.end() > parent.range.end()
);
parent.range = TextRange::new(parent.range.start(), child.range.start());
}
/// Similar to `pop`, but can modify arbitrary prior ranges (where `pop`)
/// can only modify the last range currently on the stack.
/// Can be used to do injections that span multiple ranges, like the
/// doctest injection below.
/// If `overwrite_parent` is non-optional, the highlighting of the parent range
/// is overwritten with the argument.
///
/// Note that `pop` can be simulated by `pop_and_inject(false)` but the
/// latter is computationally more expensive.
fn pop_and_inject(&mut self, overwrite_parent: Option<Highlight>) {
let mut children = self.stack.pop().unwrap();
let prev = self.stack.last_mut().unwrap();
children.sort_by_key(|range| range.range.start());
prev.sort_by_key(|range| range.range.start());
for child in children {
if let Some(idx) =
prev.iter().position(|parent| parent.range.contains_range(child.range))
{
if let Some(tag) = overwrite_parent {
prev[idx].highlight = tag;
}
let cloned = Self::intersect(&mut prev[idx], &child);
let insert_idx = if prev[idx].range.is_empty() {
prev.remove(idx);
idx
} else {
idx + 1
};
prev.insert(insert_idx, child);
if !cloned.range.is_empty() {
prev.insert(insert_idx + 1, cloned);
}
} else {
let maybe_idx =
prev.iter().position(|parent| parent.range.contains(child.range.start()));
match (overwrite_parent, maybe_idx) {
(Some(_), Some(idx)) => {
Self::intersect_partial(&mut prev[idx], &child);
let insert_idx = if prev[idx].range.is_empty() {
prev.remove(idx);
idx
} else {
idx + 1
};
prev.insert(insert_idx, child);
}
(_, None) => {
let idx = prev
.binary_search_by_key(&child.range.start(), |range| range.range.start())
.unwrap_or_else(|x| x);
prev.insert(idx, child);
}
_ => {
unreachable!("child range should be completely contained in parent range");
}
}
}
}
}
fn add(&mut self, range: HighlightedRange) {
self.stack
.last_mut()
.expect("during DFS traversal, the stack must not be empty")
.push(range)
}
fn flattened(mut self) -> Vec<HighlightedRange> {
assert_eq!(
self.stack.len(),
1,
"after DFS traversal, the stack should only contain a single element"
);
let mut res = self.stack.pop().unwrap();
res.sort_by_key(|range| range.range.start());
// Check that ranges are sorted and disjoint
assert!(res
.iter()
.zip(res.iter().skip(1))
.all(|(left, right)| left.range.end() <= right.range.start()));
res
}
}
fn highlight_format_specifier(kind: FormatSpecifier) -> Option<HighlightTag> {
Some(match kind {
FormatSpecifier::Open
| FormatSpecifier::Close
| FormatSpecifier::Colon
| FormatSpecifier::Fill
| FormatSpecifier::Align
| FormatSpecifier::Sign
| FormatSpecifier::NumberSign
| FormatSpecifier::DollarSign
| FormatSpecifier::Dot
| FormatSpecifier::Asterisk
| FormatSpecifier::QuestionMark => HighlightTag::FormatSpecifier,
FormatSpecifier::Integer | FormatSpecifier::Zero => HighlightTag::NumericLiteral,
FormatSpecifier::Identifier => HighlightTag::Local,
})
}
fn macro_call_range(macro_call: &ast::MacroCall) -> Option<TextRange> {
let path = macro_call.path()?;
let name_ref = path.segment()?.name_ref()?;
let range_start = name_ref.syntax().text_range().start();
let mut range_end = name_ref.syntax().text_range().end();
for sibling in path.syntax().siblings_with_tokens(Direction::Next) {
match sibling.kind() {
T![!] | IDENT => range_end = sibling.text_range().end(),
_ => (),
}
}
Some(TextRange::new(range_start, range_end))
}
/// Returns true if the parent nodes of `node` all match the `SyntaxKind`s in `kinds` exactly.
fn parents_match(mut node: NodeOrToken<SyntaxNode, SyntaxToken>, mut kinds: &[SyntaxKind]) -> bool {
while let (Some(parent), [kind, rest @ ..]) = (&node.parent(), kinds) {
if parent.kind() != *kind {
return false;
}
// FIXME: Would be nice to get parent out of the match, but binding by-move and by-value
// in the same pattern is unstable: rust-lang/rust#68354.
node = node.parent().unwrap().into();
kinds = rest;
}
// Only true if we matched all expected kinds
kinds.len() == 0
}
fn is_consumed_lvalue(
node: NodeOrToken<SyntaxNode, SyntaxToken>,
local: &Local,
db: &RootDatabase,
) -> bool {
// When lvalues are passed as arguments and they're not Copy, then mark them as Consuming.
parents_match(node, &[PATH_SEGMENT, PATH, PATH_EXPR, ARG_LIST]) && !local.ty(db).is_copy(db)
}
fn highlight_element(
sema: &Semantics<RootDatabase>,
bindings_shadow_count: &mut FxHashMap<Name, u32>,
syntactic_name_ref_highlighting: bool,
element: SyntaxElement,
) -> Option<(Highlight, Option<u64>)> {
let db = sema.db;
let mut binding_hash = None;
let highlight: Highlight = match element.kind() {
FN => {
bindings_shadow_count.clear();
return None;
}
// Highlight definitions depending on the "type" of the definition.
NAME => {
let name = element.into_node().and_then(ast::Name::cast).unwrap();
let name_kind = classify_name(sema, &name);
if let Some(NameClass::Definition(Definition::Local(local))) = &name_kind {
if let Some(name) = local.name(db) {
let shadow_count = bindings_shadow_count.entry(name.clone()).or_default();
*shadow_count += 1;
binding_hash = Some(calc_binding_hash(&name, *shadow_count))
}
};
match name_kind {
Some(NameClass::ExternCrate(_)) => HighlightTag::Module.into(),
Some(NameClass::Definition(def)) => {
highlight_def(db, def) | HighlightModifier::Definition
}
Some(NameClass::ConstReference(def)) => highlight_def(db, def),
Some(NameClass::FieldShorthand { field, .. }) => {
let mut h = HighlightTag::Field.into();
if let Definition::Field(field) = field {
if let VariantDef::Union(_) = field.parent_def(db) {
h |= HighlightModifier::Unsafe;
}
}
h
}
None => highlight_name_by_syntax(name) | HighlightModifier::Definition,
}
}
// Highlight references like the definitions they resolve to
NAME_REF if element.ancestors().any(|it| it.kind() == ATTR) => {
Highlight::from(HighlightTag::Function) | HighlightModifier::Attribute
}
NAME_REF => {
let name_ref = element.into_node().and_then(ast::NameRef::cast).unwrap();
highlight_func_by_name_ref(sema, &name_ref).unwrap_or_else(|| {
match classify_name_ref(sema, &name_ref) {
Some(name_kind) => match name_kind {
NameRefClass::ExternCrate(_) => HighlightTag::Module.into(),
NameRefClass::Definition(def) => {
if let Definition::Local(local) = &def {
if let Some(name) = local.name(db) {
let shadow_count =
bindings_shadow_count.entry(name.clone()).or_default();
binding_hash = Some(calc_binding_hash(&name, *shadow_count))
}
};
let mut h = highlight_def(db, def);
if let Definition::Local(local) = &def {
if is_consumed_lvalue(name_ref.syntax().clone().into(), local, db) {
h |= HighlightModifier::Consuming;
}
}
if let Some(parent) = name_ref.syntax().parent() {
if matches!(parent.kind(), FIELD_EXPR | RECORD_PAT_FIELD) {
if let Definition::Field(field) = def {
if let VariantDef::Union(_) = field.parent_def(db) {
h |= HighlightModifier::Unsafe;
}
}
}
}
h
}
NameRefClass::FieldShorthand { .. } => HighlightTag::Field.into(),
},
None if syntactic_name_ref_highlighting => {
highlight_name_ref_by_syntax(name_ref, sema)
}
None => HighlightTag::UnresolvedReference.into(),
}
})
}
// Simple token-based highlighting
COMMENT => {
let comment = element.into_token().and_then(ast::Comment::cast)?;
let h = HighlightTag::Comment;
match comment.kind().doc {
Some(_) => h | HighlightModifier::Documentation,
None => h.into(),
}
}
STRING | RAW_STRING | RAW_BYTE_STRING | BYTE_STRING => HighlightTag::StringLiteral.into(),
ATTR => HighlightTag::Attribute.into(),
INT_NUMBER | FLOAT_NUMBER => HighlightTag::NumericLiteral.into(),
BYTE => HighlightTag::ByteLiteral.into(),
CHAR => HighlightTag::CharLiteral.into(),
QUESTION => Highlight::new(HighlightTag::Operator) | HighlightModifier::ControlFlow,
LIFETIME => {
let h = Highlight::new(HighlightTag::Lifetime);
match element.parent().map(|it| it.kind()) {
Some(LIFETIME_PARAM) | Some(LABEL) => h | HighlightModifier::Definition,
_ => h,
}
}
p if p.is_punct() => match p {
T![&] => {
let h = HighlightTag::Operator.into();
let is_unsafe = element
.parent()
.and_then(ast::RefExpr::cast)
.map(|ref_expr| sema.is_unsafe_ref_expr(&ref_expr))
.unwrap_or(false);
if is_unsafe {
h | HighlightModifier::Unsafe
} else {
h
}
}
T![::] | T![->] | T![=>] | T![..] | T![=] | T![@] => HighlightTag::Operator.into(),
T![!] if element.parent().and_then(ast::MacroCall::cast).is_some() => {
HighlightTag::Macro.into()
}
T![*] if element.parent().and_then(ast::PtrType::cast).is_some() => {
HighlightTag::Keyword.into()
}
T![*] if element.parent().and_then(ast::PrefixExpr::cast).is_some() => {
let prefix_expr = element.parent().and_then(ast::PrefixExpr::cast)?;
let expr = prefix_expr.expr()?;
let ty = sema.type_of_expr(&expr)?;
if ty.is_raw_ptr() {
HighlightTag::Operator | HighlightModifier::Unsafe
} else if let Some(ast::PrefixOp::Deref) = prefix_expr.op_kind() {
HighlightTag::Operator.into()
} else {
HighlightTag::Punctuation.into()
}
}
T![-] if element.parent().and_then(ast::PrefixExpr::cast).is_some() => {
HighlightTag::NumericLiteral.into()
}
_ if element.parent().and_then(ast::PrefixExpr::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::BinExpr::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::RangeExpr::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::RangePat::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::RestPat::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::Attr::cast).is_some() => {
HighlightTag::Attribute.into()
}
_ => HighlightTag::Punctuation.into(),
},
k if k.is_keyword() => {
let h = Highlight::new(HighlightTag::Keyword);
match k {
T![break]
| T![continue]
| T![else]
| T![if]
| T![loop]
| T![match]
| T![return]
| T![while]
| T![in] => h | HighlightModifier::ControlFlow,
T![for] if !is_child_of_impl(&element) => h | HighlightModifier::ControlFlow,
T![unsafe] => h | HighlightModifier::Unsafe,
T![true] | T![false] => HighlightTag::BoolLiteral.into(),
T![self] => {
let self_param_is_mut = element
.parent()
.and_then(ast::SelfParam::cast)
.and_then(|p| p.mut_token())
.is_some();
let self_path = &element
.parent()
.as_ref()
.and_then(SyntaxNode::parent)
.and_then(ast::Path::cast)
.and_then(|p| sema.resolve_path(&p));
let mut h = HighlightTag::SelfKeyword.into();
if self_param_is_mut
|| matches!(self_path,
Some(hir::PathResolution::Local(local))
if local.is_self(db)
&& (local.is_mut(db) || local.ty(db).is_mutable_reference())
)
{
h |= HighlightModifier::Mutable
}
if let Some(hir::PathResolution::Local(local)) = self_path {
if is_consumed_lvalue(element, &local, db) {
h |= HighlightModifier::Consuming;
}
}
h
}
T![ref] => element
.parent()
.and_then(ast::IdentPat::cast)
.and_then(|ident_pat| {
if sema.is_unsafe_ident_pat(&ident_pat) {
Some(HighlightModifier::Unsafe)
} else {
None
}
})
.map(|modifier| h | modifier)
.unwrap_or(h),
_ => h,
}
}
_ => return None,
};
return Some((highlight, binding_hash));
fn calc_binding_hash(name: &Name, shadow_count: u32) -> u64 {
fn hash<T: std::hash::Hash + std::fmt::Debug>(x: T) -> u64 {
use std::{collections::hash_map::DefaultHasher, hash::Hasher};
let mut hasher = DefaultHasher::new();
x.hash(&mut hasher);
hasher.finish()
}
hash((name, shadow_count))
}
}
fn is_child_of_impl(element: &SyntaxElement) -> bool {
match element.parent() {
Some(e) => e.kind() == IMPL,
_ => false,
}
}
fn highlight_func_by_name_ref(
sema: &Semantics<RootDatabase>,
name_ref: &ast::NameRef,
) -> Option<Highlight> {
let method_call = name_ref.syntax().parent().and_then(ast::MethodCallExpr::cast)?;
highlight_method_call(sema, &method_call)
}
fn highlight_method_call(
sema: &Semantics<RootDatabase>,
method_call: &ast::MethodCallExpr,
) -> Option<Highlight> {
let func = sema.resolve_method_call(&method_call)?;
let mut h = HighlightTag::Function.into();
if func.is_unsafe(sema.db) || sema.is_unsafe_method_call(&method_call) {
h |= HighlightModifier::Unsafe;
}
if let Some(self_param) = func.self_param(sema.db) {
match self_param.access(sema.db) {
hir::Access::Shared => (),
hir::Access::Exclusive => h |= HighlightModifier::Mutable,
hir::Access::Owned => {
if let Some(receiver_ty) =
method_call.receiver().and_then(|it| sema.type_of_expr(&it))
{
if !receiver_ty.is_copy(sema.db) {
h |= HighlightModifier::Consuming
}
}
}
}
}
Some(h)
}
fn highlight_def(db: &RootDatabase, def: Definition) -> Highlight {
match def {
Definition::Macro(_) => HighlightTag::Macro,
Definition::Field(_) => HighlightTag::Field,
Definition::ModuleDef(def) => match def {
hir::ModuleDef::Module(_) => HighlightTag::Module,
hir::ModuleDef::Function(func) => {
let mut h = HighlightTag::Function.into();
if func.is_unsafe(db) {
h |= HighlightModifier::Unsafe;
}
return h;
}
hir::ModuleDef::Adt(hir::Adt::Struct(_)) => HighlightTag::Struct,
hir::ModuleDef::Adt(hir::Adt::Enum(_)) => HighlightTag::Enum,
hir::ModuleDef::Adt(hir::Adt::Union(_)) => HighlightTag::Union,
hir::ModuleDef::EnumVariant(_) => HighlightTag::EnumVariant,
hir::ModuleDef::Const(_) => HighlightTag::Constant,
hir::ModuleDef::Trait(_) => HighlightTag::Trait,
hir::ModuleDef::TypeAlias(_) => HighlightTag::TypeAlias,
hir::ModuleDef::BuiltinType(_) => HighlightTag::BuiltinType,
hir::ModuleDef::Static(s) => {
let mut h = Highlight::new(HighlightTag::Static);
if s.is_mut(db) {
h |= HighlightModifier::Mutable;
h |= HighlightModifier::Unsafe;
}
return h;
}
},
Definition::SelfType(_) => HighlightTag::SelfType,
Definition::TypeParam(_) => HighlightTag::TypeParam,
Definition::Local(local) => {
let tag =
if local.is_param(db) { HighlightTag::ValueParam } else { HighlightTag::Local };
let mut h = Highlight::new(tag);
if local.is_mut(db) || local.ty(db).is_mutable_reference() {
h |= HighlightModifier::Mutable;
}
return h;
}
}
.into()
}
fn highlight_name_by_syntax(name: ast::Name) -> Highlight {
let default = HighlightTag::UnresolvedReference;
let parent = match name.syntax().parent() {
Some(it) => it,
_ => return default.into(),
};
let tag = match parent.kind() {
STRUCT => HighlightTag::Struct,
ENUM => HighlightTag::Enum,
UNION => HighlightTag::Union,
TRAIT => HighlightTag::Trait,
TYPE_ALIAS => HighlightTag::TypeAlias,
TYPE_PARAM => HighlightTag::TypeParam,
RECORD_FIELD => HighlightTag::Field,
MODULE => HighlightTag::Module,
FN => HighlightTag::Function,
CONST => HighlightTag::Constant,
STATIC => HighlightTag::Static,
VARIANT => HighlightTag::EnumVariant,
IDENT_PAT => HighlightTag::Local,
_ => default,
};
tag.into()
}
fn highlight_name_ref_by_syntax(name: ast::NameRef, sema: &Semantics<RootDatabase>) -> Highlight {
let default = HighlightTag::UnresolvedReference;
let parent = match name.syntax().parent() {
Some(it) => it,
_ => return default.into(),
};
match parent.kind() {
METHOD_CALL_EXPR => {
return ast::MethodCallExpr::cast(parent)
.and_then(|method_call| highlight_method_call(sema, &method_call))
.unwrap_or_else(|| HighlightTag::Function.into());
}
FIELD_EXPR => {
let h = HighlightTag::Field;
let is_union = ast::FieldExpr::cast(parent)
.and_then(|field_expr| {
let field = sema.resolve_field(&field_expr)?;
Some(if let VariantDef::Union(_) = field.parent_def(sema.db) {
true
} else {
false
})
})
.unwrap_or(false);
if is_union {
h | HighlightModifier::Unsafe
} else {
h.into()
}
}
PATH_SEGMENT => {
let path = match parent.parent().and_then(ast::Path::cast) {
Some(it) => it,
_ => return default.into(),
};
let expr = match path.syntax().parent().and_then(ast::PathExpr::cast) {
Some(it) => it,
_ => {
// within path, decide whether it is module or adt by checking for uppercase name
return if name.text().chars().next().unwrap_or_default().is_uppercase() {
HighlightTag::Struct
} else {
HighlightTag::Module
}
.into();
}
};
let parent = match expr.syntax().parent() {
Some(it) => it,
None => return default.into(),
};
match parent.kind() {
CALL_EXPR => HighlightTag::Function.into(),
_ => if name.text().chars().next().unwrap_or_default().is_uppercase() {
HighlightTag::Struct.into()
} else {
HighlightTag::Constant
}
.into(),
}
}
_ => default.into(),
}
}
| 38.989142 | 148 | 0.498148 |
4813f386865a7c2680e785c0b5cba754b1a5601a
| 975 |
use std::collections::HashSet;
use std::fs;
fn main() {
let input = fs::read_to_string("input").expect("Error reading the file");
let answers: Vec<Vec<HashSet<char>>> = input
.split("\n\n")
.map(|grp| {
grp.split_whitespace()
.map(|a| a.chars().collect())
.collect()
})
.collect();
let first: usize = answers
.iter()
.map(|grp| {
grp.iter()
.fold(HashSet::new(), |res, a| res.union(a).cloned().collect())
})
.map(|answs| answs.len())
.sum();
println!("First: {}", first);
let second: usize = answers
.iter()
.map(|grp| {
grp.iter()
.fold(('a'..='z').collect::<HashSet<_>>(), |res, a| {
res.intersection(a).cloned().collect()
})
})
.map(|answs| answs.len())
.sum();
println!("Second: {}", second);
}
| 25.657895 | 79 | 0.441026 |
03e06c890c8c165c455bac571dbe5e40572a7350
| 184 |
use anyhow::Result;
pub const YEAR: u32 = 2015;
pub const DAY: u32 = 24;
pub fn part_one(_: &str) -> Result<u32> {
Ok(1)
}
pub fn part_two(_: &str) -> Result<u32> {
Ok(2)
}
| 14.153846 | 41 | 0.586957 |
d65575d72c81bda0292183fd6ce2ad63c8c84903
| 747 |
//! Available styles applicable to windows.
bitflags! {
/// Available styles applicable to windows.
#[repr(C)]
pub struct Style: u32 {
/// No decorations (cannot be combined with other flags).
const NONE = 0;
/// Title bar and fixed border.
const TITLEBAR = 1;
/// Title bar, resizable border, and maximize button.
const RESIZE = 2;
/// Title bar and close button.
const CLOSE = 4;
/// Fullscreen mode (ignores other flags).
const FULLSCREEN = 8;
/// Default window style: title bar, resizable border, and close button.
const DEFAULT = 1 | 2 | 4;
}
}
impl Default for Style {
fn default() -> Self {
Self::DEFAULT
}
}
| 27.666667 | 80 | 0.578313 |
eb7b310a8bf07ed55fb3566062c3dbd8f3ffbe36
| 882 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that attempt to move `&mut` pointer while pointee is borrowed
// yields an error.
//
// Example from src/librustc_borrowck/borrowck/README.md
fn foo(t0: &mut isize) {
let p: &isize = &*t0; // Freezes `*t0`
let t1 = t0; //~ ERROR cannot move out of `t0`
*t1 = 22;
p.use_ref();
}
fn main() {
}
trait Fake { fn use_mut(&mut self) { } fn use_ref(&self) { } }
impl<T> Fake for T { }
| 29.4 | 69 | 0.673469 |
7541c2b054ba556dc42b2a3c1295fce7dd3bc585
| 6,450 |
//! Implement a concrete type to build channels.
use crate::Error;
use tor_linkspec::{ChanTarget, OwnedChanTarget};
use tor_llcrypto::pk;
use tor_rtcompat::{tls::TlsConnector, Runtime, TlsProvider};
use async_trait::async_trait;
use futures::task::SpawnExt;
use std::sync::Arc;
/// TLS-based channel builder.
///
/// This is a separate type so that we can keep our channel management
/// code network-agnostic.
pub(crate) struct ChanBuilder<R: Runtime> {
/// Asynchronous runtime for TLS, TCP, spawning, and timeouts.
runtime: R,
/// Object to build TLS connections.
tls_connector: <R as TlsProvider>::Connector,
}
impl<R: Runtime> ChanBuilder<R> {
/// Construct a new ChanBuilder.
pub(crate) fn new(runtime: R) -> Self {
let tls_connector = runtime.tls_connector();
ChanBuilder {
runtime,
tls_connector,
}
}
}
#[async_trait]
impl<R: Runtime> crate::mgr::ChannelFactory for ChanBuilder<R> {
type Channel = tor_proto::channel::Channel;
type BuildSpec = OwnedChanTarget;
async fn build_channel(&self, target: &Self::BuildSpec) -> crate::Result<Arc<Self::Channel>> {
use tor_rtcompat::SleepProviderExt;
// TODO: make this an option. And make a better value.
let five_seconds = std::time::Duration::new(5, 0);
self.runtime
.timeout(five_seconds, self.build_channel_notimeout(target))
.await?
}
}
impl<R: Runtime> ChanBuilder<R> {
/// As build_channel, but don't include a timeout.
async fn build_channel_notimeout(
&self,
target: &OwnedChanTarget,
) -> crate::Result<Arc<tor_proto::channel::Channel>> {
use tor_proto::channel::ChannelBuilder;
use tor_rtcompat::tls::CertifiedConn;
// 1. Negotiate the TLS connection.
// TODO: This just uses the first address. Instead we could be smarter,
// or use "happy eyeballs, or whatever. Maybe we will want to
// refactor as we do so?
let addr = target
.addrs()
.get(0)
.ok_or_else(|| Error::UnusableTarget("No addresses for chosen relay".into()))?;
tracing::info!("Negotiating TLS with {}", addr);
// TODO: add a random hostname here if it will be used for SNI?
let tls = self
.tls_connector
.connect_unvalidated(addr, "ignored")
.await?;
let peer_cert = tls
.peer_certificate()?
.ok_or(Error::Internal("TLS connection with no peer certificate"))?;
// 2. Set up the channel.
let mut builder = ChannelBuilder::new();
builder.set_declared_addr(*addr);
let chan = builder.launch(tls).connect().await?;
let now = self.runtime.wallclock();
let chan = chan.check(target, &peer_cert, Some(now))?;
let (chan, reactor) = chan.finish().await?;
// 3. Launch a task to run the channel reactor.
self.runtime.spawn(async {
let _ = reactor.run().await;
})?;
Ok(chan)
}
}
impl crate::mgr::AbstractChannel for tor_proto::channel::Channel {
type Ident = pk::ed25519::Ed25519Identity;
fn ident(&self) -> &Self::Ident {
self.peer_ed25519_id()
}
fn is_usable(&self) -> bool {
!self.is_closing()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{
mgr::{AbstractChannel, ChannelFactory},
Result,
};
use pk::ed25519::Ed25519Identity;
use pk::rsa::RsaIdentity;
use std::net::SocketAddr;
use std::time::{Duration, SystemTime};
use tor_proto::channel::Channel;
use tor_rtcompat::{test_with_one_runtime, TcpListener};
use tor_rtmock::{io::LocalStream, net::MockNetwork, MockSleepRuntime};
// Make sure that the builder can build a real channel. To test
// this out, we set up a listener that pretends to have the right
// IP, fake the current time, and use a canned response from
// [`testing::msgs`] crate.
#[test]
fn build_ok() -> Result<()> {
use crate::testing::msgs;
let orport: SocketAddr = msgs::ADDR.parse().unwrap();
let ed: Ed25519Identity = msgs::ED_ID.into();
let rsa: RsaIdentity = msgs::RSA_ID.into();
let client_addr = "192.0.2.17".parse().unwrap();
let tls_cert = msgs::X509_CERT.into();
let target = OwnedChanTarget::new(vec![orport], ed, rsa);
let now = SystemTime::UNIX_EPOCH + Duration::new(msgs::NOW, 0);
test_with_one_runtime!(|rt| async move {
// Stub out the internet so that this connection can work.
let network = MockNetwork::new();
// Set up a client runtime with a given IP
let client_rt = network
.builder()
.add_address(client_addr)
.runtime(rt.clone());
// Mock the current time too
let client_rt = MockSleepRuntime::new(client_rt);
// Set up a relay runtime with a different IP
let relay_rt = network
.builder()
.add_address(orport.ip())
.runtime(rt.clone());
// open a fake TLS listener and be ready to handle a request.
let lis = relay_rt.mock_net().listen_tls(&orport, tls_cert).unwrap();
// Tell the client to believe in a different timestamp.
client_rt.jump_to(now);
// Create the channelbuilder that we want to test.
let builder = ChanBuilder::new(client_rt);
let (r1, r2): (Result<Arc<Channel>>, Result<LocalStream>) = futures::join!(
async {
// client-side: build a channel!
builder.build_channel(&target).await
},
async {
// relay-side: accept the channel
// (and pretend to know what we're doing).
let (mut con, addr) = lis.accept().await?;
assert_eq!(client_addr, addr.ip());
crate::testing::answer_channel_req(&mut con).await?;
Ok(con)
}
);
let chan = r1.unwrap();
assert_eq!(chan.ident(), &ed);
assert!(chan.is_usable());
r2.unwrap();
Ok(())
})
}
// TODO: Write tests for timeout logic, once there is smarter logic.
}
| 33.769634 | 98 | 0.582946 |
610531c7b4c9e8fad03dcd8703b3fbe66565b542
| 362 |
#![feature(const_raw_ptr_deref)]
use std::mem;
// Make sure we error with the right kind of error on a too large slice.
const TEST: () = { unsafe {
let slice: *const [u8] = mem::transmute((1usize, usize::MAX));
let _val = &*slice; //~ ERROR: evaluation of constant value failed
//~| slice is bigger than largest supported object
} };
fn main() {}
| 27.846154 | 72 | 0.657459 |
898d68eacaf7fbf018f8c8bdf848d7a1f1b8cf1f
| 7,583 |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use anyhow::{bail, Result};
use move_core_types::{
account_address::AccountAddress, identifier::Identifier, language_storage::ModuleId,
};
use move_model::{
ast::Value,
model::{GlobalEnv, ModuleEnv, NamedConstantEnv},
symbol::Symbol,
};
use serde::{Deserialize, Serialize};
use std::{
collections::BTreeMap,
convert::TryFrom,
fs::File,
io::{Read, Write},
path::Path,
rc::Rc,
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrmapOptions {
/// The constant prefix that determines if a constant is an error or not
pub error_prefix: String,
/// The module ID of the error category module
pub error_category_module: ModuleId,
/// In which file to store the output
pub output_file: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrorDescription {
/// The constant name of error e.g., ECANT_PAY_DEPOSIT
pub code_name: String,
/// The code description. This is generated from the doc comments on the constant.
pub code_description: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrorContext {
/// The error category e.g., INVALID_ARGUMENT
pub category: ErrorDescription,
/// The error reason e.g., ECANT_PAY_DEPOSIT
pub reason: ErrorDescription,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrorMapping {
/// The set of error categories and their descriptions
pub error_categories: BTreeMap<u64, ErrorDescription>,
/// The set of modules, and the module-specific errors
pub module_error_maps: BTreeMap<ModuleId, BTreeMap<u64, ErrorDescription>>,
}
impl Default for ErrmapOptions {
fn default() -> Self {
Self {
error_prefix: "E".to_string(),
error_category_module: ModuleId::new(
AccountAddress::from_hex_literal("0x1").unwrap(),
Identifier::new("Errors").unwrap(),
),
output_file: "errmap".to_string(),
}
}
}
impl Default for ErrorMapping {
fn default() -> Self {
Self {
error_categories: BTreeMap::new(),
module_error_maps: BTreeMap::new(),
}
}
}
impl ErrorMapping {
pub fn add_error_category(
&mut self,
category_id: u64,
description: ErrorDescription,
) -> Result<()> {
if let Some(previous_entry) = self.error_categories.insert(category_id, description) {
bail!(format!(
"Entry for category {} already taken by: {:#?}",
category_id, previous_entry
))
}
Ok(())
}
pub fn add_module_error(
&mut self,
module_id: ModuleId,
abort_code: u64,
description: ErrorDescription,
) -> Result<()> {
let module_error_map = self.module_error_maps.entry(module_id.clone()).or_default();
if let Some(previous_entry) = module_error_map.insert(abort_code, description) {
bail!(format!(
"Duplicate entry for abort code {} found in {}, previous entry: {:#?}",
abort_code, module_id, previous_entry
))
}
Ok(())
}
pub fn from_file<P: AsRef<Path>>(path: P) -> Self {
let mut bytes = Vec::new();
File::open(path).unwrap().read_to_end(&mut bytes).unwrap();
bcs::from_bytes(&bytes).unwrap()
}
pub fn to_file<P: AsRef<Path>>(&self, path: P) {
let bytes = bcs::to_bytes(self).unwrap();
let mut file = File::create(path).unwrap();
file.write_all(&bytes).unwrap();
}
pub fn get_explanation(&self, module: &ModuleId, output_code: u64) -> Option<ErrorContext> {
let category = output_code & 0xFFu64;
let reason_code = output_code >> 8;
self.error_categories.get(&category).and_then(|category| {
self.module_error_maps.get(module).and_then(|module_map| {
module_map.get(&reason_code).map(|reason| ErrorContext {
category: category.clone(),
reason: reason.clone(),
})
})
})
}
}
pub struct ErrmapGen<'env> {
/// Options for error map generation
options: &'env ErrmapOptions,
/// Input definitions
env: &'env GlobalEnv,
/// Output error mapping
output: ErrorMapping,
}
impl<'env> ErrmapGen<'env> {
pub fn new(env: &'env GlobalEnv, options: &'env ErrmapOptions) -> Self {
Self {
options,
env,
output: ErrorMapping::default(),
}
}
pub fn save_result(&self) {
self.output.to_file(&self.options.output_file);
}
pub fn gen(&mut self) {
for module in self.env.get_modules() {
if !module.is_script_module() && !module.is_dependency() {
self.build_error_map(&module).unwrap()
}
}
}
fn build_error_map(&mut self, module: &ModuleEnv<'_>) -> Result<()> {
let module_id = self.get_module_id_for_name(module);
if module_id == self.options.error_category_module {
self.build_error_categories(module)?
} else {
self.build_error_map_for_module(&module_id, module)?
}
Ok(())
}
fn build_error_categories(&mut self, module: &ModuleEnv<'_>) -> Result<()> {
for named_constant in module.get_named_constants() {
let name = self.name_string(named_constant.get_name());
let error_category = self.get_abort_code(&named_constant)?;
self.output.add_error_category(
error_category,
ErrorDescription {
code_name: name.to_string(),
code_description: named_constant.get_doc().to_string(),
},
)?
}
Ok(())
}
fn build_error_map_for_module(
&mut self,
module_id: &ModuleId,
module: &ModuleEnv<'_>,
) -> Result<()> {
for named_constant in module.get_named_constants() {
let name = self.name_string(named_constant.get_name());
if name.starts_with(&self.options.error_prefix) {
let abort_code = self.get_abort_code(&named_constant)?;
self.output.add_module_error(
module_id.clone(),
abort_code,
ErrorDescription {
code_name: name.to_string(),
code_description: named_constant.get_doc().to_string(),
},
)?
}
}
Ok(())
}
fn get_abort_code(&self, constant: &NamedConstantEnv<'_>) -> Result<u64> {
match constant.get_value() {
Value::Number(big_int) => u64::try_from(big_int).map_err(|err| err.into()),
x => bail!(
"Invalid abort code constant {} found for code {}",
x,
self.name_string(constant.get_name())
),
}
}
fn get_module_id_for_name(&self, module: &ModuleEnv<'_>) -> ModuleId {
let name = module.get_name();
let addr = AccountAddress::from_hex_literal(&format!("0x{:x}", name.addr())).unwrap();
let name = Identifier::new(self.name_string(name.name()).to_string()).unwrap();
ModuleId::new(addr, name)
}
fn name_string(&self, symbol: Symbol) -> Rc<String> {
self.env.symbol_pool().string(symbol)
}
}
| 31.995781 | 96 | 0.58407 |
112e885377f952b32aa5c3e19a0b0f02ed51bff1
| 317 |
/// Return the Hamming distance between the strings,
/// or None if the lengths are mismatched.
pub fn hamming_distance(s1: &str, s2: &str) -> Option<usize> {
if s1.len() != s2.len() {
return None;
} else {
return Some(s1.chars().zip(s2.chars()).filter(|(c1,c2)| *c1 != *c2).count())
}
}
| 28.818182 | 84 | 0.586751 |
d94e30d2d6b12969893038695005384fcdc8f7ca
| 901 |
//! Account utility functions
use solana_program::{
account_info::AccountInfo,
entrypoint::ProgramResult,
program::{invoke_signed},
pubkey::Pubkey,
rent::Rent,
system_instruction,
};
/// Creates Program Derived Address for the given seeds
pub fn create_pda_account<'a>(
payer: &AccountInfo<'a>,
rent: &Rent,
space: usize,
owner: &Pubkey,
system_program: &AccountInfo<'a>,
new_pda_account: &AccountInfo<'a>,
new_pda_signer_seeds: &[&[u8]],
) -> ProgramResult {
invoke_signed(
&system_instruction::create_account(
payer.key,
new_pda_account.key,
1.max(rent.minimum_balance(space)),
space as u64,
owner,
),
&[
payer.clone(),
new_pda_account.clone(),
system_program.clone(),
],
&[new_pda_signer_seeds],
)
}
| 23.710526 | 55 | 0.587125 |
093182b8a7519dfab8d05289fc5c6fc8cae05fb3
| 3,774 |
/*
Advent of Code 2020
Caleb Stanford
Utilities
*/
use std::collections::HashSet;
use std::fmt::Debug;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::str::FromStr;
/* Parsing */
// Convert a file to a vector of its lines
pub fn file_to_vec_parsed<T>(filepath: &str) -> Vec<T>
where
T: FromStr,
<T as FromStr>::Err: Debug,
{
let file = File::open(filepath).unwrap();
let reader = BufReader::new(file);
reader.lines().map(|line| line.unwrap().parse().unwrap()).collect()
}
// Simple string version
pub fn file_to_vec(filepath: &str) -> Vec<String> {
file_to_vec_parsed(filepath)
}
// Version that is terminated with an empty line ("")
pub fn file_to_vec_el(filepath: &str) -> Vec<String> {
let mut v = file_to_vec(filepath);
v.push("".to_owned());
v
}
// Separate a line into whitespace-divided parts
pub fn line_to_words(line: &str) -> Vec<String> {
line.split_whitespace().map(|s| s.to_string()).collect()
}
// Parse an iterator (e.g. result of split) of length 2 into a tuple
pub fn iter_to_pair<T, I>(mut elems: I) -> (T, T)
where
I: Iterator<Item = T>,
T: Debug + PartialEq,
{
let elem1 = elems.next().unwrap();
let elem2 = elems.next().unwrap();
assert_eq!(elems.next(), None);
(elem1, elem2)
}
/* Useful iterators */
pub fn iter_prod<T, IterT, U, IterU>(
iter_t: IterT,
iter_u: IterU,
) -> impl Iterator<Item = (T, U)>
where
T: Clone,
IterT: Iterator<Item = T>,
IterU: Iterator<Item = U> + Clone,
{
iter_t.flat_map(move |t| iter_u.clone().map(move |u| (t.clone(), u)))
}
pub fn iter_rectangle(
x0: isize,
y0: isize,
x1: isize,
y1: isize,
) -> impl Iterator<Item = (isize, isize)> {
iter_prod(x0..=x1, y0..=y1)
}
/* Validation */
// Check if a list of integers contains every number from 1 to n, for some n.
pub fn unique_1_to_n<'a, I: Iterator<Item = &'a usize>>(ints: I) -> bool {
let mut seen = HashSet::new();
let mut high = None;
for &i in ints {
if i == 0 || seen.contains(&i) {
return false;
}
seen.insert(i);
high = high.max(Some(i));
}
high.unwrap_or(0) == seen.len()
}
// Version that checks between 0 and n instead
pub fn unique_0_to_n<'a, I: Iterator<Item = &'a usize>>(ints: I) -> bool {
let mut seen = HashSet::new();
let mut high = None;
for &i in ints {
if seen.contains(&i) {
return false;
}
seen.insert(i);
high = high.max(Some(i));
}
high.map_or(0, |x| x + 1) == seen.len()
}
// Weaker version that only checks uniqueness
pub fn unique<'a, I: Iterator<Item = &'a usize>>(ints: I) -> bool {
let mut seen = HashSet::new();
for &i in ints {
if i == 0 || seen.contains(&i) {
return false;
}
seen.insert(i);
}
true
}
/* Unit tests */
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_unique_1_to_n() {
assert!(unique_1_to_n([].iter()));
assert!(unique_1_to_n([1].iter()));
assert!(unique_1_to_n([1, 2].iter()));
assert!(unique_1_to_n([2, 1].iter()));
assert!(unique_1_to_n([1, 2, 3, 4, 5].iter()));
assert!(unique_1_to_n([5, 2, 4, 1, 3].iter()));
assert!(unique_1_to_n([1, 2, 5, 4, 3].iter()));
assert!(!unique_1_to_n([0].iter()));
assert!(!unique_1_to_n([2].iter()));
assert!(!unique_1_to_n([1, 1].iter()));
assert!(!unique_1_to_n([1, 3].iter()));
assert!(!unique_1_to_n([3, 2].iter()));
assert!(!unique_1_to_n([5, 5].iter()));
assert!(!unique_1_to_n([1, 2, 0].iter()));
assert!(!unique_1_to_n([1, 2, 4, 4, 5].iter()));
assert!(!unique_1_to_n([1, 2, 3, 4, 6].iter()));
}
}
| 25.849315 | 77 | 0.572072 |
2f10cfe0eaa64c248c4fdd0ae194e3a1cc8a9d18
| 6,513 |
use crate::error::Error::ConnectionClosed;
use crate::{request::Span, Error, Request};
/// Total size limit for all headers combined.
const MAX_HEADER_SIZE: usize = 8192;
/// Parse a raw HTTP request into a Request struct.
/// # Errors
/// Errors if the connection is unexpectedly closed or if the headers are too large.
pub fn parse(buffer: Vec<u8>) -> Result<Request, Error> {
let mut pos = 0;
macro_rules! peek {
($size:expr) => {
if buffer[pos..].len() < $size {
return Err(ConnectionClosed);
} else {
true
}
};
}
macro_rules! consume_whitespace {
() => {
while peek!(1) && buffer[pos].is_ascii_whitespace() {
pos += 1;
}
};
}
macro_rules! consume_whitespace_to_eol {
() => {
while buffer.get(pos).is_some()
&& buffer.get(pos).unwrap().is_ascii_whitespace()
&& (buffer[pos] != b'\r' && buffer[pos] != b'\n')
{
if !peek!(1) {
break;
}
pos += 1;
}
};
}
macro_rules! consume {
($word:expr) => {
consume!($word, false)
};
($word:expr, $error:expr) => {
let mut found = true;
for c in $word.bytes() {
if buffer.len() <= pos {
return Err(ConnectionClosed);
} else if buffer[pos] == c {
pos += 1;
} else {
found = false;
break;
}
}
if found {
true
} else {
$error
}
};
}
macro_rules! consume_eol {
() => {
consume_eol!(false)
};
($error:expr) => {
if (peek!(1) && buffer[pos] == b'\n') {
pos += 1;
true
} else if (peek!(2) && &buffer[pos..pos + 2] == b"\r\n") {
pos += 2;
true
} else {
$error
}
};
}
consume_whitespace!();
let method = parse_method(&buffer, &mut pos)?;
consume!(" ", return Err(Error::ParseError));
let path = parse_path(&buffer, &mut pos)?;
consume!(" ", return Err(Error::ParseError));
consume!("HTTP/1.1", return Err(Error::ParseVersion));
consume_eol!(return Err(Error::ExpectedCRLF));
// Expecting Headers but if there's another EOL we're done and we can just return the struct
if consume_eol!() {
return Ok(Request::new(method, path, Vec::new(), Span::new(), buffer));
}
// Parse headers
let start = pos;
let mut headers = Vec::with_capacity(16);
let mut content_length = 0;
loop {
let name = parse_header_name(&buffer, &mut pos)?;
consume!(":");
consume_whitespace_to_eol!();
let value = parse_header_value(&buffer, &mut pos);
if name.from_buf(&buffer).to_ascii_lowercase() == "content-length" {
content_length = value.from_buf(&buffer).parse().unwrap_or(0);
}
headers.push((name, value));
consume_eol!(return Err(ConnectionClosed));
if consume_eol!() {
break;
}
if pos - start > MAX_HEADER_SIZE {
return Err(Error::ParseHeaderValue);
}
}
let body = if content_length > 0 {
Span(pos, pos + content_length)
} else {
Span::new()
};
if body.1 > buffer.len() {
return Err(ConnectionClosed);
}
Ok(Request::new(method, path, headers, body, buffer))
}
fn parse_method(buffer: &[u8], pos: &mut usize) -> Result<Span, Error> {
let start = *pos;
if let Some(bytes) = buffer.get(start..start + 3) {
let size = match bytes {
b"GET" | b"PUT" => 3,
b"HEA" | b"POS" => match buffer.get(start..start + 4) {
Some(bytes) => match bytes {
b"HEAD" | b"POST" => 4,
_ => 0,
},
None => return Err(Error::ParseError),
},
b"PAT" | b"TRA" => match buffer.get(start..start + 5) {
Some(bytes) => match bytes {
b"PATCH" | b"TRACE" => 5,
_ => 0,
},
None => return Err(Error::ParseError),
},
b"DEL" => match buffer.get(0..6) {
Some(buffer) => {
if &buffer[0..6] == b"DELETE" {
6
} else {
0
}
}
None => return Err(Error::ParseError),
},
b"CON" | b"OPT" => match buffer.get(start..start + 7) {
Some(bytes) => match bytes {
b"CONNECT" | b"OPTIONS" => 7,
_ => 0,
},
None => return Err(Error::ParseError),
},
_ => 0,
};
if size == 0 {
return Err(Error::UnknownHTTPMethod("?".into()));
}
*pos += size;
return Ok(Span(start, start + size));
}
Err(Error::ParseError)
}
fn parse_path(buffer: &[u8], pos: &mut usize) -> Result<Span, Error> {
let start = *pos;
let end = buffer[start..].iter().position(|c| *c == b' ');
let end = end.unwrap_or(0);
if end == 0 {
return Err(Error::ParsePath);
};
*pos += end;
Ok(Span(start, start + end))
}
fn parse_header_name(buffer: &[u8], pos: &mut usize) -> Result<Span, Error> {
let start = *pos;
loop {
match buffer.get(*pos) {
Some(bytes) => match bytes {
b':' => break,
b'\r' | b'\n' | b' ' | b'\t' => return Err(Error::ParseHeaderName),
_ => {
*pos += 1;
if *pos == buffer.len() {
return Err(Error::ParseHeaderName);
}
}
},
None => return Err(Error::ParseError),
}
}
let end = *pos;
Ok(Span(start, end))
}
fn parse_header_value(buffer: &[u8], pos: &mut usize) -> Span {
let start = *pos;
while *pos < buffer.len() && (buffer[*pos] != b'\r' && buffer[*pos] != b'\n') {
*pos += 1;
}
let end = *pos;
Span(start, end)
}
| 28.565789 | 96 | 0.442346 |
612fe2ff9a70f983c597e6e2edcaa417ac9f666b
| 20,344 |
use std;
use std::fmt;
use std::ffi::{OsStr, OsString};
use std::os::unix::ffi::{OsStrExt, OsStringExt};
use std::borrow::Cow;
use std::error::Error;
use libc::c_ulong;
use libc::{MS_RDONLY, MS_NOSUID, MS_NODEV, MS_NOEXEC, MS_SYNCHRONOUS};
use libc::{MS_MANDLOCK, MS_DIRSYNC, MS_NOATIME, MS_NODIRATIME};
use libc::{MS_RELATIME, MS_STRICTATIME};
#[derive(Debug)]
pub struct ParseRowError(String);
impl fmt::Display for ParseRowError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Parse error: {}", self.0)
}
}
impl Error for ParseRowError {
fn description(&self) -> &str {
return &self.0;
}
}
#[derive(Debug)]
pub struct ParseError {
msg: String,
row_num: usize,
row: String,
}
impl ParseError {
pub fn new(msg: String, row_num: usize, row: String) -> ParseError {
ParseError {
msg: msg,
row_num: row_num,
row: row,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Parse error at line {}: {}\n{}",
self.row_num, self.description(), self.row)
}
}
impl Error for ParseError {
fn description(&self) -> &str {
return &self.msg;
}
}
// TODO: make public api some day
#[allow(dead_code)]
pub struct MountInfoParser<'a> {
data: &'a [u8],
row_num: usize,
exhausted: bool,
}
#[allow(dead_code)]
impl<'a> MountInfoParser<'a> {
pub fn new(data: &'a [u8]) -> MountInfoParser<'a> {
MountInfoParser {
data: data,
row_num: 0,
exhausted: false,
}
}
}
pub struct MountPoint<'a> {
pub mount_id: c_ulong,
pub parent_id: c_ulong,
pub major: c_ulong,
pub minor: c_ulong,
pub root: Cow<'a, OsStr>,
pub mount_point: Cow<'a, OsStr>,
pub mount_options: Cow<'a, OsStr>,
// TODO: we might need some enum which will have three states:
// empty, single Cow<OsStr> value or a vector Vec<Cow<OsStr>>
pub optional_fields: Cow<'a, OsStr>,
pub fstype: Cow<'a, OsStr>,
pub mount_source: Cow<'a, OsStr>,
pub super_options: Cow<'a, OsStr>,
}
impl<'a> MountPoint<'a> {
pub fn get_flags(&self) -> c_ulong {
let mut flags = 0 as c_ulong;
for opt in self.mount_options.as_bytes().split(|c| *c == b',') {
let opt = OsStr::from_bytes(opt);
if opt == OsStr::new("ro") { flags |= MS_RDONLY }
else if opt == OsStr::new("nosuid") { flags |= MS_NOSUID }
else if opt == OsStr::new("nodev") { flags |= MS_NODEV }
else if opt == OsStr::new("noexec") { flags |= MS_NOEXEC }
else if opt == OsStr::new("mand") { flags |= MS_MANDLOCK }
else if opt == OsStr::new("sync") { flags |= MS_SYNCHRONOUS }
else if opt == OsStr::new("dirsync") { flags |= MS_DIRSYNC }
else if opt == OsStr::new("noatime") { flags |= MS_NOATIME }
else if opt == OsStr::new("nodiratime") { flags |= MS_NODIRATIME }
else if opt == OsStr::new("relatime") { flags |= MS_RELATIME }
else if opt == OsStr::new("strictatime") { flags |= MS_STRICTATIME }
}
flags
}
}
impl<'a> Iterator for MountInfoParser<'a> {
type Item = Result<MountPoint<'a>, ParseError>;
fn next(&mut self) -> Option<Self::Item> {
if self.exhausted {
return None;
}
loop {
match self.data.iter().position(|c| *c == b'\n') {
Some(ix) => {
self.row_num += 1;
let row = &self.data[..ix];
self.data = &self.data[ix + 1..];
let res = match parse_mount_point(row) {
Ok(None) => continue,
Ok(Some(v)) => Ok(v),
Err(e) => Err(ParseError::new(e.0, self.row_num,
String::from_utf8_lossy(row).into_owned())),
};
return Some(res);
},
None => {
self.exhausted = true;
let res = match parse_mount_point(self.data) {
Ok(None) => return None,
Ok(Some(v)) => Ok(v),
Err(e) => Err(ParseError::new(e.0, self.row_num,
String::from_utf8_lossy(self.data).into_owned())),
};
return Some(res);
},
}
}
}
}
pub fn parse_mount_point<'a>(row: &'a [u8])
-> Result<Option<MountPoint<'a>>, ParseRowError>
{
let row = rstrip_cr(&row);
if is_comment_line(row) {
return Ok(None);
}
let (mount_id, row) = try!(parse_int(row));
let (parent_id, row) = try!(parse_int(row));
let (major, minor, row) = try!(parse_major_minor(row));
let (root, row) = try!(parse_os_str(row));
let (mount_point, row) = try!(parse_os_str(row));
let (mount_options, row) = try!(parse_os_str(row));
let (optional_fields, row) = try!(parse_optional(row));
let (fstype, row) = try!(parse_os_str(row));
let (mount_source, row) = try!(parse_os_str(row));
let (super_options, _) = try!(parse_os_str(row));
// TODO: should we ignore extra fields?
Ok(Some(MountPoint {
mount_id: mount_id,
parent_id: parent_id,
major: major,
minor: minor,
root: root,
mount_point: mount_point,
mount_options: mount_options,
optional_fields: optional_fields,
fstype: fstype,
mount_source: mount_source,
super_options: super_options,
}))
}
fn is_comment_line(row: &[u8]) -> bool {
if row.is_empty() {
return true;
}
for c in row {
if *c == b' ' || *c == b'\t' {
continue;
}
if *c == b'#' {
return true;
}
return false;
}
return false;
}
fn rstrip_cr(row: &[u8]) -> &[u8] {
if let Some((&b'\r', tail)) = row.split_last() {
tail
} else {
row
}
}
fn parse_field<'a>(data: &'a [u8], delimit: &'a [u8])
-> Result<(&'a [u8], &'a [u8]), ParseRowError>
{
if data.is_empty() {
return Err(ParseRowError(format!("Expected more fields")));
}
let data = lstrip_whitespaces(data);
Ok(split_by(data, delimit))
}
fn parse_os_str<'a>(data: &'a [u8])
-> Result<(Cow<'a, OsStr>, &'a [u8]), ParseRowError>
{
let (field, tail) = try!(parse_field(data, b" "));
Ok((unescape_octals(OsStr::from_bytes(field)), tail))
}
fn parse_int(data: &[u8])
-> Result<(c_ulong, &[u8]), ParseRowError>
{
let (field, tail) = try!(parse_field(data, b" "));
let v = try!(std::str::from_utf8(field).map_err(|e| {
ParseRowError(format!("Cannot parse integer {:?}: {}",
String::from_utf8_lossy(field).into_owned(), e))}));
let v = try!(c_ulong::from_str_radix(v, 10).map_err(|e| {
ParseRowError(format!("Cannot parse integer {:?}: {}",
String::from_utf8_lossy(field).into_owned(), e))}));
Ok((v, tail))
}
fn parse_major_minor(data: &[u8])
-> Result<(c_ulong, c_ulong, &[u8]), ParseRowError>
{
let (major_field, data) = try!(parse_field(data, b":"));
let (minor_field, tail) = try!(parse_field(data, b" "));
let (major, _) = try!(parse_int(major_field));
let (minor, _) = try!(parse_int(minor_field));
Ok((major, minor, tail))
}
fn parse_optional<'a>(data: &'a [u8])
-> Result<(Cow<'a, OsStr>, &'a [u8]), ParseRowError>
{
let (field, tail) = try!(parse_field(data, b"- "));
let field = rstrip_whitespaces(field);
Ok((unescape_octals(OsStr::from_bytes(field)), tail))
}
fn lstrip_whitespaces(v: &[u8]) -> &[u8] {
for (i, c) in v.iter().enumerate() {
if *c != b' ' {
return &v[i..];
}
}
return &v[0..0];
}
fn rstrip_whitespaces(v: &[u8]) -> &[u8] {
for (i, c) in v.iter().enumerate().rev() {
if *c != b' ' {
return &v[..i + 1];
}
}
return &v[0..0];
}
fn split_by<'a, 'b>(v: &'a [u8], needle: &'b [u8]) -> (&'a [u8], &'a [u8]) {
if needle.len() > v.len() {
return (&v[0..], &v[0..0]);
}
let mut i = 0;
while i <= v.len() - needle.len() {
let (head, tail) = v.split_at(i);
if tail.starts_with(needle) {
return (head, &tail[needle.len()..]);
}
i += 1;
}
return (&v[0..], &v[0..0]);
}
fn unescape_octals(s: &OsStr) -> Cow<OsStr> {
let (mut i, has_escapes) = {
let bytes = s.as_bytes();
let mut i = 0;
while i < bytes.len() {
if is_octal_encoding(&bytes[i..]) {
break;
}
i += 1;
}
(i, i < bytes.len())
};
if !has_escapes {
return Cow::Borrowed(s);
}
let mut v: Vec<u8> = vec!();
let bytes = s.as_bytes();
v.extend_from_slice(&bytes[..i]);
while i < bytes.len() {
if is_octal_encoding(&bytes[i..]) {
let c = parse_octal(&bytes[i + 1..]);
v.push(c);
i += 4;
} else {
v.push(bytes[i]);
i += 1;
}
}
Cow::Owned(OsString::from_vec(v))
}
fn is_octal_encoding(v: &[u8]) -> bool {
v.len() >= 4 && v[0] == b'\\'
&& is_oct(v[1]) && is_oct(v[2]) && is_oct(v[3])
}
fn is_oct(c: u8) -> bool {
c >= b'0' && c <= b'7'
}
fn parse_octal(v: &[u8]) -> u8 {
((v[0] & 7) << 6) + ((v[1] & 7) << 3) + (v[2] & 7)
}
#[cfg(test)]
mod test {
use std::path::Path;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use libc::{MS_NOSUID, MS_NODEV, MS_NOEXEC, MS_RELATIME};
use super::{MountInfoParser, ParseError};
use super::{is_octal_encoding, parse_octal, unescape_octals};
#[test]
fn test_is_octal_encoding() {
assert!(is_octal_encoding(b"\\000"));
assert!(is_octal_encoding(b"\\123"));
assert!(is_octal_encoding(b"\\777"));
assert!(!is_octal_encoding(b""));
assert!(!is_octal_encoding(b"\\"));
assert!(!is_octal_encoding(b"000"));
assert!(!is_octal_encoding(b"\\00"));
assert!(!is_octal_encoding(b"\\800"));
}
#[test]
fn test_parse_octal() {
assert_eq!(parse_octal(b"000"), 0);
assert_eq!(parse_octal(b"123"), 83);
assert_eq!(parse_octal(b"377"), 255);
// mount utility just ignores overflowing
assert_eq!(parse_octal(b"777"), 255);
}
#[test]
fn test_unescape_octals() {
assert_eq!(unescape_octals(OsStr::new("\\000")), OsStr::from_bytes(b"\x00"));
assert_eq!(unescape_octals(OsStr::new("\\00")), OsStr::new("\\00"));
assert_eq!(unescape_octals(OsStr::new("test\\040data")), OsStr::new("test data"));
}
#[test]
fn test_mount_info_parser_proc() {
let content = b"19 24 0:4 / /proc rw,nosuid,nodev,noexec,relatime shared:12 - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_id, 19);
assert_eq!(mount_point.parent_id, 24);
assert_eq!(mount_point.major, 0);
assert_eq!(mount_point.minor, 4);
assert_eq!(mount_point.root, Path::new("/"));
assert_eq!(mount_point.mount_point, Path::new("/proc"));
assert_eq!(mount_point.mount_options, OsStr::new("rw,nosuid,nodev,noexec,relatime"));
assert_eq!(mount_point.optional_fields, OsStr::new("shared:12"));
assert_eq!(mount_point.fstype, OsStr::new("proc"));
assert_eq!(mount_point.mount_source, OsStr::new("proc"));
assert_eq!(mount_point.super_options, OsStr::new("rw"));
assert_eq!(mount_point.get_flags(), MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_comment() {
let content = b"# Test comment\n\
\t # Another shifted comment\n\
19 24 0:4 / /#proc rw,nosuid,nodev,noexec,relatime shared:12 - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_id, 19);
assert_eq!(mount_point.parent_id, 24);
assert_eq!(mount_point.major, 0);
assert_eq!(mount_point.minor, 4);
assert_eq!(mount_point.root, Path::new("/"));
assert_eq!(mount_point.mount_point, Path::new("/#proc"));
assert_eq!(mount_point.mount_options, OsStr::new("rw,nosuid,nodev,noexec,relatime"));
assert_eq!(mount_point.optional_fields, OsStr::new("shared:12"));
assert_eq!(mount_point.fstype, OsStr::new("proc"));
assert_eq!(mount_point.mount_source, OsStr::new("proc"));
assert_eq!(mount_point.super_options, OsStr::new("rw"));
assert_eq!(mount_point.get_flags(), MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_missing_optional_fields() {
let content = b"335 294 0:56 / /proc rw,relatime - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_id, 335);
assert_eq!(mount_point.parent_id, 294);
assert_eq!(mount_point.major, 0);
assert_eq!(mount_point.minor, 56);
assert_eq!(mount_point.root, Path::new("/"));
assert_eq!(mount_point.mount_point, Path::new("/proc"));
assert_eq!(mount_point.mount_options, OsStr::new("rw,relatime"));
assert_eq!(mount_point.optional_fields, OsStr::new(""));
assert_eq!(mount_point.fstype, OsStr::new("proc"));
assert_eq!(mount_point.mount_source, OsStr::new("proc"));
assert_eq!(mount_point.super_options, OsStr::new("rw"));
assert_eq!(mount_point.get_flags(), MS_RELATIME);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_more_optional_fields() {
let content = b"335 294 0:56 / /proc rw,relatime shared:12 master:1 - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_id, 335);
assert_eq!(mount_point.parent_id, 294);
assert_eq!(mount_point.major, 0);
assert_eq!(mount_point.minor, 56);
assert_eq!(mount_point.root, Path::new("/"));
assert_eq!(mount_point.mount_point, Path::new("/proc"));
assert_eq!(mount_point.mount_options, OsStr::new("rw,relatime"));
assert_eq!(mount_point.optional_fields, OsStr::new("shared:12 master:1"));
assert_eq!(mount_point.fstype, OsStr::new("proc"));
assert_eq!(mount_point.mount_source, OsStr::new("proc"));
assert_eq!(mount_point.super_options, OsStr::new("rw"));
assert_eq!(mount_point.get_flags(), MS_RELATIME);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_escaping() {
let content = br"76 24 8:6 / /home/my\040super\011name\012\134 rw,relatime shared:29 - ext4 /dev/sda1 rw,data=ordered";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_id, 76);
assert_eq!(mount_point.parent_id, 24);
assert_eq!(mount_point.major, 8);
assert_eq!(mount_point.minor, 6);
assert_eq!(mount_point.root, Path::new("/"));
assert_eq!(mount_point.mount_point, Path::new("/home/my super\tname\n\\"));
assert_eq!(mount_point.mount_options, OsStr::new("rw,relatime"));
assert_eq!(mount_point.optional_fields, OsStr::new("shared:29"));
assert_eq!(mount_point.fstype, OsStr::new("ext4"));
assert_eq!(mount_point.mount_source, OsStr::new("/dev/sda1"));
assert_eq!(mount_point.super_options, OsStr::new("rw,data=ordered"));
assert_eq!(mount_point.get_flags(), MS_RELATIME);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_non_utf8() {
let content = b"22 24 0:19 / /\xff rw shared:5 - tmpfs tmpfs rw,mode=755";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_point, Path::new(OsStr::from_bytes(b"/\xff")));
assert_eq!(mount_point.mount_options, OsStr::new("rw"));
assert_eq!(mount_point.fstype, OsStr::new("tmpfs"));
assert_eq!(mount_point.mount_source, OsStr::new("tmpfs"));
assert_eq!(mount_point.get_flags(), 0);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_crlf() {
let content = b"26 20 0:21 / /tmp rw shared:4 - tmpfs tmpfs rw\r\n\
\n\
\r\n\
27 22 0:22 / /tmp rw,nosuid,nodev shared:6 - tmpfs tmpfs rw\r";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_point, Path::new("/tmp"));
assert_eq!(mount_point.mount_options, OsStr::new("rw"));
assert_eq!(mount_point.super_options, OsStr::new("rw"));
assert_eq!(mount_point.get_flags(), 0);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_point, Path::new("/tmp"));
assert_eq!(mount_point.mount_options, OsStr::new("rw,nosuid,nodev"));
assert_eq!(mount_point.super_options, OsStr::new("rw"));
assert_eq!(mount_point.get_flags(), MS_NOSUID | MS_NODEV);
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_incomplete_row() {
let content = b"19 24 0:4 / /proc rw,relatime shared:12 - proc proc";
let mut parser = MountInfoParser::new(&content[..]);
let mount_info_res = parser.next().unwrap();
assert!(mount_info_res.is_err());
match mount_info_res {
Err(ParseError {ref msg, ..}) => {
assert_eq!(msg, "Expected more fields");
},
_ => panic!("Expected incomplete row error")
}
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_invalid_int() {
let content = b"19 24b 0:4 / /proc rw,relatime - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_info_res = parser.next().unwrap();
assert!(mount_info_res.is_err());
match mount_info_res {
Err(ParseError {ref msg, ..}) => {
assert!(msg.starts_with("Cannot parse integer \"24b\":"));
},
_ => panic!("Expected invalid row error")
}
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_overflowed_int() {
let content = b"111111111111111111111";
let mut parser = MountInfoParser::new(&content[..]);
let mount_info_res = parser.next().unwrap();
assert!(mount_info_res.is_err());
match mount_info_res {
Err(ParseError {ref msg, ..}) => {
assert!(msg.starts_with("Cannot parse integer \"111111111111111111111\""));
},
_ => panic!("Expected invalid row error")
}
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_invalid_escape() {
let content = b"19 24 0:4 / /proc\\1 rw,relatime - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_point, Path::new("/proc\\1"));
assert!(parser.next().is_none());
}
#[test]
fn test_mount_info_parser_overflowed_escape() {
let content = b"19 24 0:4 / /proc\\400 rw,nosuid,nodev,noexec,relatime - proc proc rw";
let mut parser = MountInfoParser::new(&content[..]);
let mount_point = parser.next().unwrap().unwrap();
assert_eq!(mount_point.mount_point, Path::new(OsStr::from_bytes(b"/proc\x00")));
assert!(parser.next().is_none());
}
}
| 35.504363 | 127 | 0.568521 |
d64a7aab7bc8e5b624d90925189842253a2a3136
| 2,311 |
use super::*;
use proptest::strategy::Strategy;
#[test]
fn without_list_or_bitstring_returns_false() {
run!(
|arc_process| {
(
strategy::term::is_list(arc_process.clone()),
strategy::term(arc_process.clone())
.prop_filter("Right cannot be a list or bitstring", |right| {
!(right.is_list() || right.is_bitstring())
}),
)
},
|(left, right)| {
prop_assert_eq!(native(left, right), false.into());
Ok(())
},
);
}
#[test]
fn with_empty_list_right_returns_false() {
is_equal_or_less_than(|_, _| Term::NIL, false);
}
#[test]
fn with_lesser_list_right_returns_false() {
is_equal_or_less_than(
|_, process| {
process
.cons(process.integer(0).unwrap(), process.integer(0).unwrap())
.unwrap()
},
false,
);
}
#[test]
fn with_same_list_right_returns_true() {
is_equal_or_less_than(|left, _| left, true);
}
#[test]
fn with_same_value_list_right_returns_true() {
is_equal_or_less_than(
|_, process| {
process
.cons(process.integer(0).unwrap(), process.integer(1).unwrap())
.unwrap()
},
true,
);
}
#[test]
fn with_greater_list_right_returns_true() {
is_equal_or_less_than(
|_, process| {
process
.cons(process.integer(0).unwrap(), process.integer(2).unwrap())
.unwrap()
},
true,
);
}
#[test]
fn with_bitstring_right_returns_true() {
run!(
|arc_process| {
(
strategy::term::is_list(arc_process.clone()),
strategy::term::is_bitstring(arc_process.clone()),
)
},
|(left, right)| {
prop_assert_eq!(native(left, right), true.into());
Ok(())
},
);
}
fn is_equal_or_less_than<R>(right: R, expected: bool)
where
R: FnOnce(Term, &Process) -> Term,
{
super::is_equal_or_less_than(
|process| {
process
.cons(process.integer(0).unwrap(), process.integer(1).unwrap())
.unwrap()
},
right,
expected,
);
}
| 22.656863 | 81 | 0.512765 |
b9bf4ba13af807d702424c524773a8dd4cc1b10f
| 85,691 |
pub mod flags_register;
pub mod instruction;
pub mod registers;
use self::instruction::{
ADDHLTarget, ArithmeticTarget, BitPosition, IncDecTarget, Indirect, Instruction, JumpTest,
LoadByteSource, LoadByteTarget, LoadType, LoadWordTarget, PrefixTarget, StackTarget,
};
use self::registers::Registers;
use crate::memory_bus::{MemoryBus, LCDSTAT_VECTOR, TIMER_VECTOR, VBLANK_VECTOR};
/// # Macros
///
/// The following are macros for generating repetitive code needed for processing CPU
/// instructions. For more information on macros read [the chapter in the Rust book](https://doc.rust-lang.org/book/second-edition/appendix-04-macros.html).
// Macro for changing the CPU based on the value of a 8 bit register
macro_rules! manipulate_8bit_register {
// Macro pattern for getting a value from a register and doing some work on that value
//
// # Example Usage:
// ``` rust
// manipulate_8bit_register!(self, a => print_register)
// ```
//
// This above reads register `a` and then calls the method `print_register` with the
// value from `a`
( $self:ident : $getter:ident => $work:ident) => {
{
let value = $self.registers.$getter;
$self.$work(value)
}
};
// Macro pattern for getting a value from a register and doing some work on that value and
// writting it back to the register
//
// # Example Usage:
// ``` rust
// manipulate_8bit_register!(self, a => increment => d)
// ```
//
// This above reads register `a` and then calls the method `increment` with the
// value from `a` and then writes the result of `increment` into register `d`
( $self:ident : $getter:ident => $work:ident => $setter:ident) => {
{
let result = manipulate_8bit_register!($self: $getter => $work);
$self.registers.$setter = result;
}
};
// Macro pattern for getting a value from a register and doing some work on that value at a
// specific bit pattern
//
// # Example Usage:
// ``` rust
// manipulate_8bit_register!(self, a => increment @ BitPosition::B2)
// ```
//
// This above reads register `a` and then calls the method `increment` with the
// value from `a` and the bit position marker `B2`
( $self:ident : ( $register:ident @ $bit_position:ident ) => $work:ident ) => {
{
let value = $self.registers.$register;
$self.$work(value, $bit_position)
}
};
// Macro pattern for getting a value from a register and doing some work on that value at a
// specific bit pattern and writting it back to the register
//
// # Example Usage:
// ``` rust
// manipulate_8bit_register!(self, a => increment @ BitPosition::B2 => c)
// ```
//
// This above reads register `a` and then calls the method `increment` with the
// value from `a` and the bit position marker `B2` and then writes the result of the
// call to `increment` into the register `c`.
( $self:ident : ( $getter:ident @ $bit_position:ident ) => $work:ident => $setter:ident) => {
{
let result = manipulate_8bit_register!($self: ( $getter @ $bit_position ) => $work);
$self.registers.$setter = result;
}
};
}
// Macro for changing the value of a 16 bit register through some CPU method
// Arguments:
// * self (a.k.a the CPU)
// * a method for getting a register,
// * a method for changing register's value,
// * a method for setting a register,
//
// The macro gets the value from the register, performs work on that value and then sets the value back in the
// register
macro_rules! manipulate_16bit_register {
( $self:ident : $getter:ident => $work:ident => $setter:ident ) => {{
let amount = $self.registers.$getter();
let result = $self.$work(amount);
$self.registers.$setter(result);
}};
}
macro_rules! arithmetic_instruction {
// Macro pattern for matching a register and then manipulating the register
//
// # Example Usage:
// ``` rust
// arithmetic_instruction!(register, self.foo)
// ```
//
// The above matches a register and then calls the function `foo` to do work on the value
// in that register.
( $register:ident, $self:ident.$work:ident) => {
{
match $register {
ArithmeticTarget::A => manipulate_8bit_register!($self: a => $work),
ArithmeticTarget::B => manipulate_8bit_register!($self: b => $work),
ArithmeticTarget::C => manipulate_8bit_register!($self: c => $work),
ArithmeticTarget::D => manipulate_8bit_register!($self: d => $work),
ArithmeticTarget::E => manipulate_8bit_register!($self: e => $work),
ArithmeticTarget::H => manipulate_8bit_register!($self: h => $work),
ArithmeticTarget::L => manipulate_8bit_register!($self: l => $work),
ArithmeticTarget::D8 => {
let value = $self.read_next_byte();
$self.$work(value);
}
ArithmeticTarget::HLI => {
let value = $self.bus.read_byte($self.registers.get_hl());
$self.$work(value);
}
};
match $register {
ArithmeticTarget::D8 => ($self.pc.wrapping_add(2), 8),
ArithmeticTarget::HLI => ($self.pc.wrapping_add(1), 8),
_ => ($self.pc.wrapping_add(1), 4)
}
}
};
// Macro pattern for matching a register and then manipulating the register and writing the
// value back to the a register
//
// # Example Usage:
// ``` rust
// arithmetic_instruction!(register, self.foo => a)
// ```
//
// The above matches a register and then calls the function `foo` to do work on the value
// in that register and writes the result of `foo` into the a register.
( $register:ident, $self:ident.$work:ident => a) => {
{
match $register {
ArithmeticTarget::A => manipulate_8bit_register!($self: a => $work => a),
ArithmeticTarget::B => manipulate_8bit_register!($self: b => $work => a),
ArithmeticTarget::C => manipulate_8bit_register!($self: c => $work => a),
ArithmeticTarget::D => manipulate_8bit_register!($self: d => $work => a),
ArithmeticTarget::E => manipulate_8bit_register!($self: e => $work => a),
ArithmeticTarget::H => manipulate_8bit_register!($self: h => $work => a),
ArithmeticTarget::L => manipulate_8bit_register!($self: l => $work => a),
ArithmeticTarget::D8 => {
let value = $self.read_next_byte();
let result = $self.$work(value);
$self.registers.a = result;
}
ArithmeticTarget::HLI => {
let value = $self.bus.read_byte($self.registers.get_hl());
let result = $self.$work(value);
$self.registers.a = result;
}
};
match $register {
ArithmeticTarget::D8 => ($self.pc.wrapping_add(2), 8),
ArithmeticTarget::HLI => ($self.pc.wrapping_add(1), 8),
_ => ($self.pc.wrapping_add(1), 4)
}
}
};
}
macro_rules! prefix_instruction {
// Macro pattern for matching a register and then manipulating the register and writing the
// value back to the a register
//
// # Example Usage:
// ``` rust
// prefix_instruction!(register, self.foo => a)
// ```
//
// The above matches a register and then calls the function `foo` to do work on the value
// in that register and writes the result of `foo` into the `a` register.
( $register:ident, $self:ident.$work:ident => reg) => {
{
match $register {
PrefixTarget::A => manipulate_8bit_register!($self: a => $work => a),
PrefixTarget::B => manipulate_8bit_register!($self: b => $work => b),
PrefixTarget::C => manipulate_8bit_register!($self: c => $work => c),
PrefixTarget::D => manipulate_8bit_register!($self: d => $work => d),
PrefixTarget::E => manipulate_8bit_register!($self: e => $work => e),
PrefixTarget::H => manipulate_8bit_register!($self: h => $work => h),
PrefixTarget::L => manipulate_8bit_register!($self: l => $work => l),
PrefixTarget::HLI => {
let hl = $self.registers.get_hl();
let value = $self.bus.read_byte(hl);
let result = $self.$work(value);
$self.bus.write_byte(hl, result);
}
}
let cycles = match $register {
PrefixTarget::HLI => 16,
_ => 8
};
($self.pc.wrapping_add(2), cycles)
}
};
// Macro pattern for matching a register and then manipulating the register at a specific bit
// location and writing the value back to the a register
//
// # Example Usage:
// ``` rust
// prefix_instruction!(register, (self.foo @ bit_position) => a)
// ```
//
// The above matches a register and then calls the function `foo` to do work on the value
// in that register at the bit position `bit_position` and writes the result of `foo` into the `a` register.
( $register:ident, ( $self:ident.$work:ident @ $bit_position:ident ) => reg) => {
{
match $register {
PrefixTarget::A => manipulate_8bit_register!($self: (a @ $bit_position) => $work => a),
PrefixTarget::B => manipulate_8bit_register!($self: (b @ $bit_position) => $work => b),
PrefixTarget::C => manipulate_8bit_register!($self: (c @ $bit_position) => $work => c),
PrefixTarget::D => manipulate_8bit_register!($self: (d @ $bit_position) => $work => d),
PrefixTarget::E => manipulate_8bit_register!($self: (e @ $bit_position) => $work => e),
PrefixTarget::H => manipulate_8bit_register!($self: (h @ $bit_position) => $work => h),
PrefixTarget::L => manipulate_8bit_register!($self: (l @ $bit_position) => $work => l),
PrefixTarget::HLI => {
let hl = $self.registers.get_hl();
let value = $self.bus.read_byte(hl);
let result = $self.$work(value, $bit_position);
$self.bus.write_byte(hl, result);
}
}
let cycles = match $register {
PrefixTarget::HLI => 16,
_ => 8
};
($self.pc.wrapping_add(2), cycles)
}
};
// Macro pattern for matching a register and then manipulating the register at a specific bit
// location
//
// # Example Usage:
// ``` rust
// prefix_instruction!(register, (self.foo @ bit_position))
// ```
//
// The above matches a register and then calls the function `foo` to do work on the value
// in that register at the bit position `bit_position`
( $register:ident, $self:ident.$work:ident @ $bit_position:ident ) => {
{
match $register {
PrefixTarget::A => manipulate_8bit_register!($self: (a @ $bit_position) => $work),
PrefixTarget::B => manipulate_8bit_register!($self: (b @ $bit_position) => $work),
PrefixTarget::C => manipulate_8bit_register!($self: (c @ $bit_position) => $work),
PrefixTarget::D => manipulate_8bit_register!($self: (d @ $bit_position) => $work),
PrefixTarget::E => manipulate_8bit_register!($self: (e @ $bit_position) => $work),
PrefixTarget::H => manipulate_8bit_register!($self: (h @ $bit_position) => $work),
PrefixTarget::L => manipulate_8bit_register!($self: (l @ $bit_position) => $work),
PrefixTarget::HLI => {
let value = $self.bus.read_byte($self.registers.get_hl());
$self.$work(value, $bit_position);
}
}
let cycles = match $register {
PrefixTarget::HLI => 16,
_ => 8
};
($self.pc.wrapping_add(2), cycles)
}
};
}
#[cfg_attr(feature = "serialize", derive(Serialize))]
pub struct CPU {
pub registers: Registers,
pc: u16,
sp: u16,
pub bus: MemoryBus,
is_halted: bool,
interrupts_enabled: bool,
}
impl CPU {
pub fn new(boot_rom: Option<Vec<u8>>, game_rom: Vec<u8>) -> CPU {
CPU {
registers: Registers::new(),
pc: 0x0,
sp: 0x00,
bus: MemoryBus::new(boot_rom, game_rom),
is_halted: false,
interrupts_enabled: true,
}
}
pub fn step(&mut self) -> u8 {
let mut instruction_byte = self.bus.read_byte(self.pc);
let prefixed = instruction_byte == 0xCB;
if prefixed {
instruction_byte = self.read_next_byte();
}
let (next_pc, mut cycles) =
if let Some(instruction) = Instruction::from_byte(instruction_byte, prefixed) {
self.execute(instruction)
} else {
let description = format!(
"0x{}{:x}",
if prefixed { "cb" } else { "" },
instruction_byte
);
panic!(
"0x{:x}: Unknown instruction found - {}",
self.pc, description
)
};
self.bus.step(cycles);
if self.bus.has_interrupt() {
self.is_halted = false;
}
if !self.is_halted {
self.pc = next_pc;
}
let mut interrupted = false;
if self.interrupts_enabled {
if self.bus.interrupt_enable.vblank && self.bus.interrupt_flag.vblank {
interrupted = true;
self.bus.interrupt_flag.vblank = false;
self.interrupt(VBLANK_VECTOR)
}
if self.bus.interrupt_enable.lcdstat && self.bus.interrupt_flag.lcdstat {
interrupted = true;
self.bus.interrupt_flag.lcdstat = false;
self.interrupt(LCDSTAT_VECTOR)
}
if self.bus.interrupt_enable.timer && self.bus.interrupt_flag.timer {
interrupted = true;
self.bus.interrupt_flag.timer = false;
self.interrupt(TIMER_VECTOR)
}
}
if interrupted {
cycles += 12;
}
cycles
}
fn interrupt(&mut self, location: u16) {
self.interrupts_enabled = false;
self.push(self.pc);
self.pc = location;
self.bus.step(12);
}
pub fn execute(&mut self, instruction: Instruction) -> (u16, u8) {
// OPCodes Map: http://pastraiser.com/cpu/gameboy/gameboy_opcodes.html
// OPCodes Explanation: https://web.archive.org/web/20181009131634/http://www.chrisantonellis.com/files/gameboy/gb-instructions.txt
match instruction {
Instruction::INC(target) => {
// DESCRIPTION: (increment) - increment the value in a specific register by 1
// WHEN: target is 16 bit register
// PC: +1
// Cycles: 12
// Z:- S:- H:- C:-
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:? C:-
match target {
IncDecTarget::A => manipulate_8bit_register!(self: a => inc_8bit => a),
IncDecTarget::B => manipulate_8bit_register!(self: b => inc_8bit => b),
IncDecTarget::C => manipulate_8bit_register!(self: c => inc_8bit => c),
IncDecTarget::D => manipulate_8bit_register!(self: d => inc_8bit => d),
IncDecTarget::E => manipulate_8bit_register!(self: e => inc_8bit => e),
IncDecTarget::H => manipulate_8bit_register!(self: h => inc_8bit => h),
IncDecTarget::L => manipulate_8bit_register!(self: l => inc_8bit => l),
IncDecTarget::HLI => {
let hl = self.registers.get_hl();
let amount = self.bus.read_byte(hl);
let result = self.inc_8bit(amount);
self.bus.write_byte(hl, result);
}
IncDecTarget::BC => {
manipulate_16bit_register!(self: get_bc => inc_16bit => set_bc)
}
IncDecTarget::DE => {
manipulate_16bit_register!(self: get_de => inc_16bit => set_de)
}
IncDecTarget::HL => {
manipulate_16bit_register!(self: get_hl => inc_16bit => set_hl)
}
IncDecTarget::SP => {
let amount = self.sp;
let result = self.inc_16bit(amount);
self.sp = result;
}
};
let cycles = match target {
IncDecTarget::BC | IncDecTarget::DE | IncDecTarget::HL | IncDecTarget::SP => 8,
IncDecTarget::HLI => 12,
_ => 4,
};
(self.pc.wrapping_add(1), cycles)
}
Instruction::DEC(target) => {
// DESCRIPTION: (decrement) - decrement the value in a specific register by 1
// WHEN: target is 16 bit register
// PC: +1
// Cycles: 12
// Z:- S:- H:- C:-
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:? C:-
match target {
IncDecTarget::A => manipulate_8bit_register!(self: a => dec_8bit => a),
IncDecTarget::B => manipulate_8bit_register!(self: b => dec_8bit => b),
IncDecTarget::C => manipulate_8bit_register!(self: c => dec_8bit => c),
IncDecTarget::D => manipulate_8bit_register!(self: d => dec_8bit => d),
IncDecTarget::E => manipulate_8bit_register!(self: e => dec_8bit => e),
IncDecTarget::H => manipulate_8bit_register!(self: h => dec_8bit => h),
IncDecTarget::L => manipulate_8bit_register!(self: l => dec_8bit => l),
IncDecTarget::HLI => {
let hl = self.registers.get_hl();
let amount = self.bus.read_byte(hl);
let result = self.dec_8bit(amount);
self.bus.write_byte(hl, result);
}
IncDecTarget::BC => {
manipulate_16bit_register!(self: get_bc => dec_16bit => set_bc)
}
IncDecTarget::DE => {
manipulate_16bit_register!(self: get_de => dec_16bit => set_de)
}
IncDecTarget::HL => {
manipulate_16bit_register!(self: get_hl => dec_16bit => set_hl)
}
IncDecTarget::SP => {
let amount = self.sp;
let result = self.dec_16bit(amount);
self.sp = result;
}
};
let cycles = match target {
IncDecTarget::BC | IncDecTarget::DE | IncDecTarget::HL | IncDecTarget::SP => 8,
IncDecTarget::HLI => 12,
_ => 4,
};
(self.pc.wrapping_add(1), cycles)
}
Instruction::ADD(register) => {
// DESCRIPTION: (add) - add the value stored in a specific register
// with the value in the A register
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:? C:?
arithmetic_instruction!(register, self.add_without_carry => a)
}
Instruction::ADDHL(register) => {
// DESCRIPTION: (add) - add the value stored in a specific register
// with the value in the HL register
// PC:+1
// Cycles: 8
// Z:- S:0 H:? C:?
let value = match register {
ADDHLTarget::BC => self.registers.get_bc(),
ADDHLTarget::DE => self.registers.get_de(),
ADDHLTarget::HL => self.registers.get_hl(),
ADDHLTarget::SP => self.sp,
};
let result = self.add_hl(value);
self.registers.set_hl(result);
(self.pc.wrapping_add(1), 8)
}
Instruction::ADDSP => {
// DESCRIPTION: (add stack pointer) - add a one byte signed number to
// the value stored in the stack pointer register
// PC:+2
// Cycles: 16
// Z:0 S:0 H:? C:?
// First cast the byte as signed with `as i8` then extend it to 16 bits
// with `as i16` and then stop treating it like a signed integer with
// `as u16`
let value = self.read_next_byte() as i8 as i16 as u16;
let result = self.sp.wrapping_add(value);
// Half and whole carry are computed at the nibble and byte level instead
// of the byte and word level like you might expect for 16 bit values
let half_carry_mask = 0xF;
self.registers.f.half_carry =
(self.sp & half_carry_mask) + (value & half_carry_mask) > half_carry_mask;
let carry_mask = 0xff;
self.registers.f.carry = (self.sp & carry_mask) + (value & carry_mask) > carry_mask;
self.registers.f.zero = false;
self.registers.f.subtract = false;
self.sp = result;
(self.pc.wrapping_add(2), 16)
}
Instruction::ADC(register) => {
// DESCRIPTION: (add with carry) - add the value stored in a specific
// register with the value in the A register and the value in the carry flag
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:? C:?
arithmetic_instruction!(register, self.add_with_carry => a)
}
Instruction::SUB(register) => {
// DESCRIPTION: (subtract) - subtract the value stored in a specific register
// with the value in the A register
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:1 H:? C:?
arithmetic_instruction!(register, self.sub_without_carry => a)
}
Instruction::SBC(register) => {
// DESCRIPTION: (subtract) - subtract the value stored in a specific register
// with the value in the A register and the value in the carry flag
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:1 H:? C:?
arithmetic_instruction!(register, self.sub_with_carry => a)
}
Instruction::AND(register) => {
// DESCRIPTION: (AND) - do a bitwise and on the value in a specific
// register and the value in the A register
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:1 C:0
arithmetic_instruction!(register, self.and => a)
}
Instruction::OR(register) => {
// DESCRIPTION: (OR) - do a bitwise or on the value in a specific
// register and the value in the A register
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:0 C:0
arithmetic_instruction!(register, self.or => a)
}
Instruction::XOR(register) => {
// DESCRIPTION: (XOR) - do a bitwise xor on the value in a specific
// register and the value in the A register
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:0 H:0 C:0
arithmetic_instruction!(register, self.xor => a)
}
Instruction::CP(register) => {
// DESCRIPTION: (compare) - just like SUB except the result of the
// subtraction is not stored back into A
// WHEN: target is D8
// PC:+2
// Cycles: 8
// WHEN: target is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC: +1
// Cycles: 4
// Z:? S:1 H:? C:?
arithmetic_instruction!(register, self.compare)
}
Instruction::CCF => {
// DESCRIPTION: (complement carry flag) - toggle the value of the carry flag
// PC:+1
// Cycles: 4
// Z:- S:0 H:0 C:?
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = !self.registers.f.carry;
(self.pc.wrapping_add(1), 4)
}
Instruction::SCF => {
// DESCRIPTION: (set carry flag) - set the carry flag to true
// PC:+1
// Cycles: 4
// Z:- S:0 H:0 C:1
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = true;
(self.pc.wrapping_add(1), 4)
}
Instruction::RRA => {
// DESCRIPTION: (rotate right A register) - bit rotate A register right through the carry flag
// PC:+1
// Cycles: 4
// Z:0 S:0 H:0 C:?
manipulate_8bit_register!(self: a => rotate_right_through_carry_retain_zero => a);
(self.pc.wrapping_add(1), 4)
}
Instruction::RLA => {
// DESCRIPTION: (rotate left A register) - bit rotate A register left through the carry flag
// PC:+1
// Cycles: 4
// Z:0 S:0 H:0 C:?
manipulate_8bit_register!(self: a => rotate_left_through_carry_retain_zero => a);
(self.pc.wrapping_add(1), 4)
}
Instruction::RRCA => {
// DESCRIPTION: (rotate right A register) - bit rotate A register right (not through the carry flag)
// PC:+1
// Cycles: 4
// Z:0 S:0 H:0 C:?
manipulate_8bit_register!(self: a => rotate_right_retain_zero => a);
(self.pc.wrapping_add(1), 4)
}
Instruction::RLCA => {
// DESCRIPTION: (rotate left A register) - bit rotate A register left (not through the carry flag)
// PC:+1
// Cycles: 4
// Z:0 S:0 H:0 C:?
manipulate_8bit_register!(self: a => rotate_left_retain_zero => a);
(self.pc.wrapping_add(1), 4)
}
Instruction::CPL => {
// DESCRIPTION: (complement) - toggle every bit of the A register
// PC:+1
// Cycles: 4
// Z:- S:1 H:1 C:-
manipulate_8bit_register!(self: a => complement => a);
(self.pc.wrapping_add(1), 4)
}
Instruction::DAA => {
// PC:+1
// Cycles: 4
// Z:? S:- H:0 C:?
manipulate_8bit_register!(self: a => decimal_adjust => a);
(self.pc.wrapping_add(1), 4)
}
Instruction::BIT(register, bit_position) => {
// DESCRIPTION: (bit test) - test to see if a specific bit of a specific register is set
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:1 C:-
prefix_instruction!(register, self.bit_test @ bit_position)
}
Instruction::RES(register, bit_position) => {
// DESCRIPTION: (bit reset) - set a specific bit of a specific register to 0
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:- S:- H:- C:-
prefix_instruction!(register, (self.reset_bit @ bit_position) => reg)
}
Instruction::SET(register, bit_position) => {
// DESCRIPTION: (bit set) - set a specific bit of a specific register to 1
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:- S:- H:- C:-
prefix_instruction!(register, (self.set_bit @ bit_position) => reg)
}
Instruction::SRL(register) => {
// DESCRIPTION: (shift right logical) - bit shift a specific register right by 1
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.shift_right_logical => reg)
}
Instruction::RR(register) => {
// DESCRIPTION: (rotate right) - bit rotate a specific register right by 1 through the carry flag
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.rotate_right_through_carry_set_zero => reg)
}
Instruction::RL(register) => {
// DESCRIPTION: (rotate left) - bit rotate a specific register left by 1 through the carry flag
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.rotate_left_through_carry_set_zero => reg)
}
Instruction::RRC(register) => {
// DESCRIPTION: (rotate right) - bit rotate a specific register right by 1 (not through the carry flag)
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.rotate_right_set_zero => reg)
}
Instruction::RLC(register) => {
// DESCRIPTION: (rotate left) - bit rotate a specific register left by 1 (not through the carry flag)
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.rotate_left_set_zero => reg)
}
Instruction::SRA(register) => {
// DESCRIPTION: (shift right arithmetic) - arithmetic shift a specific register right by 1
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.shift_right_arithmetic => reg)
}
Instruction::SLA(register) => {
// DESCRIPTION: (shift left arithmetic) - arithmetic shift a specific register left by 1
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:?
prefix_instruction!(register, self.shift_left_arithmetic => reg)
}
Instruction::SWAP(register) => {
// DESCRIPTION: switch upper and lower nibble of a specific register
// PC:+2
// WHEN: target is (HL):
// Cycles: 16
// ELSE:
// Cycles: 8
// Z:? S:0 H:0 C:0
prefix_instruction!(register, self.swap_nibbles => reg)
}
Instruction::JP(test) => {
// DESCRIPTION: conditionally jump to the address stored in the next word in memory
// PC:?/+3
// Cycles: 16/12
// Z:- N:- H:- C:-
let jump_condition = match test {
JumpTest::NotZero => !self.registers.f.zero,
JumpTest::NotCarry => !self.registers.f.carry,
JumpTest::Zero => self.registers.f.zero,
JumpTest::Carry => self.registers.f.carry,
JumpTest::Always => true,
};
self.jump(jump_condition)
}
Instruction::JR(test) => {
// DESCRIPTION: conditionally jump to the address that is N bytes away in memory
// where N is the next byte in memory interpreted as a signed byte
// PC:?/+2
// Cycles: 16/12
// Z:- N:- H:- C:-
let jump_condition = match test {
JumpTest::NotZero => !self.registers.f.zero,
JumpTest::NotCarry => !self.registers.f.carry,
JumpTest::Zero => self.registers.f.zero,
JumpTest::Carry => self.registers.f.carry,
JumpTest::Always => true,
};
self.jump_relative(jump_condition)
}
Instruction::JPI => {
// DESCRIPTION: jump to the address stored in HL
// 1
// PC:HL
// Cycles: 4
// Z:- N:- H:- C:-
(self.registers.get_hl(), 4)
}
Instruction::LD(load_type) => {
match load_type {
// DESCRIPTION: load byte store in a particular register into another
// particular register
// WHEN: source is d8
// PC:+2
// Cycles: 8
// WHEN: source is (HL)
// PC:+1
// Cycles: 8
// ELSE:
// PC:+1
// Cycles: 4
// Z:- N:- H:- C:-
LoadType::Byte(target, source) => {
let source_value = match source {
LoadByteSource::A => self.registers.a,
LoadByteSource::B => self.registers.b,
LoadByteSource::C => self.registers.c,
LoadByteSource::D => self.registers.d,
LoadByteSource::E => self.registers.e,
LoadByteSource::H => self.registers.h,
LoadByteSource::L => self.registers.l,
LoadByteSource::D8 => self.read_next_byte(),
LoadByteSource::HLI => self.bus.read_byte(self.registers.get_hl()),
};
match target {
LoadByteTarget::A => self.registers.a = source_value,
LoadByteTarget::B => self.registers.b = source_value,
LoadByteTarget::C => self.registers.c = source_value,
LoadByteTarget::D => self.registers.d = source_value,
LoadByteTarget::E => self.registers.e = source_value,
LoadByteTarget::H => self.registers.h = source_value,
LoadByteTarget::L => self.registers.l = source_value,
LoadByteTarget::HLI => {
self.bus.write_byte(self.registers.get_hl(), source_value)
}
};
match source {
LoadByteSource::D8 => (self.pc.wrapping_add(2), 8),
LoadByteSource::HLI => (self.pc.wrapping_add(1), 8),
_ => (self.pc.wrapping_add(1), 4),
}
}
// DESCRIPTION: load next word in memory into a particular register
// PC:+3
// Cycles: 12
// Z:- N:- H:- C:-
LoadType::Word(target) => {
let word = self.read_next_word();
match target {
LoadWordTarget::BC => self.registers.set_bc(word),
LoadWordTarget::DE => self.registers.set_de(word),
LoadWordTarget::HL => self.registers.set_hl(word),
LoadWordTarget::SP => self.sp = word,
};
(self.pc.wrapping_add(3), 12)
}
// DESCRIPTION: load a particular value stored at the source address into A
// WHEN: source is word indirect
// PC:+3
// Cycles: 16
// ELSE:
// PC:+1
// Cycles: 8
// Z:- N:- H:- C:-
LoadType::AFromIndirect(source) => {
self.registers.a = match source {
Indirect::BCIndirect => self.bus.read_byte(self.registers.get_bc()),
Indirect::DEIndirect => self.bus.read_byte(self.registers.get_de()),
Indirect::HLIndirectMinus => {
let hl = self.registers.get_hl();
self.registers.set_hl(hl.wrapping_sub(1));
self.bus.read_byte(hl)
}
Indirect::HLIndirectPlus => {
let hl = self.registers.get_hl();
self.registers.set_hl(hl.wrapping_add(1));
self.bus.read_byte(hl)
}
Indirect::WordIndirect => self.bus.read_byte(self.read_next_word()),
Indirect::LastByteIndirect => {
self.bus.read_byte(0xFF00 + self.registers.c as u16)
}
};
match source {
Indirect::WordIndirect => (self.pc.wrapping_add(3), 16),
_ => (self.pc.wrapping_add(1), 8),
}
}
// DESCRIPTION: load the A register into memory at the source address
// WHEN: instruction.source is word indirect
// PC:+3
// Cycles: 16
// ELSE:
// PC:+1
// Cycles: 8
// Z:- N:- H:- C:-
LoadType::IndirectFromA(target) => {
let a = self.registers.a;
match target {
Indirect::BCIndirect => {
let bc = self.registers.get_bc();
self.bus.write_byte(bc, a)
}
Indirect::DEIndirect => {
let de = self.registers.get_de();
self.bus.write_byte(de, a)
}
Indirect::HLIndirectMinus => {
let hl = self.registers.get_hl();
self.registers.set_hl(hl.wrapping_sub(1));
self.bus.write_byte(hl, a);
}
Indirect::HLIndirectPlus => {
let hl = self.registers.get_hl();
self.registers.set_hl(hl.wrapping_add(1));
self.bus.write_byte(hl, a);
}
Indirect::WordIndirect => {
let word = self.read_next_word();
self.bus.write_byte(word, a);
}
Indirect::LastByteIndirect => {
let c = self.registers.c as u16;
self.bus.write_byte(0xFF00 + c, a);
}
};
match target {
Indirect::WordIndirect => (self.pc.wrapping_add(3), 16),
_ => (self.pc.wrapping_add(1), 8),
}
}
// DESCRIPTION: Load the value in A into memory location located at 0xFF plus
// an offset stored as the next byte in memory
// PC:+2
// Cycles: 12
// Z:- N:- H:- C:-
LoadType::ByteAddressFromA => {
let offset = self.read_next_byte() as u16;
self.bus.write_byte(0xFF00 + offset, self.registers.a);
(self.pc.wrapping_add(2), 12)
}
// DESCRIPTION: Load the value located at 0xFF plus an offset stored as the next byte in memory into A
// PC:+2
// Cycles: 12
// Z:- N:- H:- C:-
LoadType::AFromByteAddress => {
let offset = self.read_next_byte() as u16;
self.registers.a = self.bus.read_byte(0xFF00 + offset);
(self.pc.wrapping_add(2), 12)
}
// DESCRIPTION: Load the value in HL into SP
// PC:+1
// Cycles: 8
// Z:- N:- H:- C:-
LoadType::SPFromHL => {
self.sp = self.registers.get_hl();
(self.pc.wrapping_add(1), 8)
}
// DESCRIPTION: Load memory address with the contents of SP
// PC:+3
// Cycles: 20
// Z:- N:- H:- C:-
LoadType::IndirectFromSP => {
let address = self.read_next_word();
let sp = self.sp;
self.bus.write_byte(address, (sp & 0xFF) as u8);
self.bus
.write_byte(address.wrapping_add(1), ((sp & 0xFF00) >> 8) as u8);
(self.pc.wrapping_add(3), 20)
}
// DESCRIPTION: load HL with SP plus some specified byte
// PC:+2
// Cycles: 12
// Z:0 N:0 H:? C:?
LoadType::HLFromSPN => {
let value = self.read_next_byte() as i8 as i16 as u16;
let result = self.sp.wrapping_add(value);
self.registers.set_hl(result);
self.registers.f.zero = false;
self.registers.f.subtract = false;
// Half and whole carry are computed at the nibble and byte level instead
// of the byte and word level like you might expect for 16 bit values
self.registers.f.half_carry = (self.sp & 0xF) + (value & 0xF) > 0xF;
self.registers.f.carry = (self.sp & 0xFF) + (value & 0xFF) > 0xFF;
(self.pc.wrapping_add(2), 12)
}
}
}
Instruction::PUSH(target) => {
// DESCRIPTION: push a value from a given register on to the stack
// PC:+1
// Cycles: 16
// Z:- N:- H:- C:-
let value = match target {
StackTarget::AF => self.registers.get_af(),
StackTarget::BC => self.registers.get_bc(),
StackTarget::DE => self.registers.get_de(),
StackTarget::HL => self.registers.get_hl(),
};
self.push(value);
(self.pc.wrapping_add(1), 16)
}
Instruction::POP(target) => {
// DESCRIPTION: pop a value from the stack and store it in a given register
// PC:+1
// Cycles: 12
// WHEN: target is AF
// Z:? N:? H:? C:?
// ELSE:
// Z:- N:- H:- C:-
let result = self.pop();
match target {
StackTarget::AF => self.registers.set_af(result),
StackTarget::BC => self.registers.set_bc(result),
StackTarget::DE => self.registers.set_de(result),
StackTarget::HL => self.registers.set_hl(result),
};
(self.pc.wrapping_add(1), 12)
}
Instruction::CALL(test) => {
// DESCRIPTION: Conditionally PUSH the would be instruction on to the
// stack and then jump to a specific address
// PC:?/+3
// Cycles: 24/12
// Z:- N:- H:- C:-
let jump_condition = match test {
JumpTest::NotZero => !self.registers.f.zero,
JumpTest::NotCarry => !self.registers.f.carry,
JumpTest::Zero => self.registers.f.zero,
JumpTest::Carry => self.registers.f.carry,
JumpTest::Always => true,
};
self.call(jump_condition)
}
Instruction::RET(test) => {
// DESCRIPTION: Conditionally POP two bytes from the stack and jump to that address
// PC:?/+1
// WHEN: condition is 'always'
// Cycles: 16/8
// ELSE:
// Cycles: 20/8
// Z:- N:- H:- C:-
let jump_condition = match test {
JumpTest::NotZero => !self.registers.f.zero,
JumpTest::NotCarry => !self.registers.f.carry,
JumpTest::Zero => self.registers.f.zero,
JumpTest::Carry => self.registers.f.carry,
JumpTest::Always => true,
};
let next_pc = self.return_(jump_condition);
let cycles = if jump_condition && test == JumpTest::Always {
16
} else if jump_condition {
20
} else {
8
};
(next_pc, cycles)
}
Instruction::RETI => {
// PC:?
// Cycles: 16
// Z:- N:- H:- C:-
self.interrupts_enabled = true;
(self.pop(), 16)
}
Instruction::RST(loc) => {
// PC:?
// Cycles: 24
// Z:- N:- H:- C:-
self.rst();
(loc.to_hex(), 24)
}
Instruction::NOP => {
// PC:+1
// Cycles: 4
// Z:- N:- H:- C:-
(self.pc.wrapping_add(1), 4)
}
Instruction::HALT => {
// PC:+1
// Cycles: 4
// Z:- N:- H:- C:-
self.is_halted = true;
(self.pc.wrapping_add(1), 4)
}
Instruction::DI => {
// PC:+1
// Cycles: 4
// Z:- N:- H:- C:-
self.interrupts_enabled = false;
(self.pc.wrapping_add(1), 4)
}
Instruction::EI => {
// PC:+1
// Cycles: 4
// Z:- N:- H:- C:-
self.interrupts_enabled = true;
(self.pc.wrapping_add(1), 4)
}
}
}
#[inline(always)]
fn push(&mut self, value: u16) {
self.sp = self.sp.wrapping_sub(1);
self.bus.write_byte(self.sp, ((value & 0xFF00) >> 8) as u8);
self.sp = self.sp.wrapping_sub(1);
self.bus.write_byte(self.sp, (value & 0xFF) as u8);
}
#[inline(always)]
fn pop(&mut self) -> u16 {
let lsb = self.bus.read_byte(self.sp) as u16;
self.sp = self.sp.wrapping_add(1);
let msb = self.bus.read_byte(self.sp) as u16;
self.sp = self.sp.wrapping_add(1);
(msb << 8) | lsb
}
#[inline(always)]
fn read_next_word(&self) -> u16 {
// Gameboy is little endian so read pc + 2 as most significant bit
// and pc + 1 as least significant bit
((self.bus.read_byte(self.pc + 2) as u16) << 8) | (self.bus.read_byte(self.pc + 1) as u16)
}
#[inline(always)]
fn read_next_byte(&self) -> u8 {
self.bus.read_byte(self.pc + 1)
}
#[inline(always)]
fn inc_8bit(&mut self, value: u8) -> u8 {
let new_value = value.wrapping_add(1);
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
// Half Carry is set if the lower nibble of the value is equal to 0xF.
// If the nibble is equal to 0xF (0b1111) that means incrementing the value
// by 1 would cause a carry from the lower nibble to the upper nibble.
self.registers.f.half_carry = value & 0xF == 0xF;
new_value
}
#[inline(always)]
fn inc_16bit(&mut self, value: u16) -> u16 {
value.wrapping_add(1)
}
#[inline(always)]
fn dec_8bit(&mut self, value: u8) -> u8 {
let new_value = value.wrapping_sub(1);
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = true;
// Half Carry is set if the lower nibble of the value is equal to 0x0.
// If the nibble is equal to 0x0 (0b0000) that means decrementing the value
// by 1 would cause a carry from the upper nibble to the lower nibble.
self.registers.f.half_carry = value & 0xF == 0x0;
new_value
}
#[inline(always)]
fn dec_16bit(&mut self, value: u16) -> u16 {
value.wrapping_sub(1)
}
#[inline(always)]
fn add_without_carry(&mut self, value: u8) -> u8 {
self.add(value, false)
}
#[inline(always)]
fn add_with_carry(&mut self, value: u8) -> u8 {
self.add(value, true)
}
#[inline(always)]
fn add(&mut self, value: u8, add_carry: bool) -> u8 {
let additional_carry = if add_carry && self.registers.f.carry {
1
} else {
0
};
let (add, carry) = self.registers.a.overflowing_add(value);
let (add2, carry2) = add.overflowing_add(additional_carry);
self.registers.f.zero = add2 == 0;
self.registers.f.subtract = false;
self.registers.f.carry = carry || carry2;
// Half Carry is set if adding the lower nibbles of the value and register A
// together (plus the optional carry bit) result in a value bigger the 0xF.
// If the result is larger than 0xF than the addition caused a carry from
// the lower nibble to the upper nibble.
self.registers.f.half_carry =
((self.registers.a & 0xF) + (value & 0xF) + additional_carry) > 0xF;
add2
}
#[inline(always)]
fn add_hl(&mut self, value: u16) -> u16 {
let hl = self.registers.get_hl();
let (result, carry) = hl.overflowing_add(value);
self.registers.f.carry = carry;
self.registers.f.subtract = false;
// Half carry tests if we flow over the 11th bit i.e. does adding the two
// numbers together cause the 11th bit to flip
let mask = 0b111_1111_1111; // mask out bits 11-15
self.registers.f.half_carry = (value & mask) + (hl & mask) > mask;
result
}
#[inline(always)]
fn sub_without_carry(&mut self, value: u8) -> u8 {
self.sub(value, false)
}
#[inline(always)]
fn sub_with_carry(&mut self, value: u8) -> u8 {
self.sub(value, true)
}
#[inline(always)]
fn sub(&mut self, value: u8, sub_carry: bool) -> u8 {
let additional_carry = if sub_carry && self.registers.f.carry {
1
} else {
0
};
let (sub, carry) = self.registers.a.overflowing_sub(value);
let (sub2, carry2) = sub.overflowing_sub(additional_carry);
self.registers.f.zero = sub2 == 0;
self.registers.f.subtract = true;
self.registers.f.carry = carry || carry2;
// Half Carry is set if subtracting the lower nibbles of the value (and the
// optional carry bit) with register a will result in a value lower than 0x0.
// To avoid underflowing in this test, we can check if the lower nibble of a
// is less than the lower nibble of the value (with the additional carry)
self.registers.f.half_carry = (self.registers.a & 0xF) < (value & 0xF) + additional_carry;
sub2
}
#[inline(always)]
fn and(&mut self, value: u8) -> u8 {
let new_value = self.registers.a & value;
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = true;
self.registers.f.carry = false;
new_value
}
#[inline(always)]
fn or(&mut self, value: u8) -> u8 {
let new_value = self.registers.a | value;
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = false;
new_value
}
#[inline(always)]
fn xor(&mut self, value: u8) -> u8 {
let new_value = self.registers.a ^ value;
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = false;
new_value
}
#[inline(always)]
fn compare(&mut self, value: u8) {
self.registers.f.zero = self.registers.a == value;
self.registers.f.subtract = true;
// Half Carry is set if subtracting the lower nibbles of the value with register
// a will result in a value lower than 0x0. To avoid underflowing in this test,
// we can check if the lower nibble of a is less than the lower nibble of the value
self.registers.f.half_carry = (self.registers.a & 0xF) < (value & 0xF);
self.registers.f.carry = self.registers.a < value;
}
#[inline(always)]
fn decimal_adjust(&mut self, value: u8) -> u8 {
// huge help from: https://github.com/Gekkio/mooneye-gb/blob/754403792d60821e12835ba454d7e8b66553ed22/core/src/cpu/mod.rs#L812-L846
let flags = self.registers.f;
let mut carry = false;
let result = if !flags.subtract {
let mut result = value;
if flags.carry || value > 0x99 {
carry = true;
result = result.wrapping_add(0x60);
}
if flags.half_carry || value & 0x0F > 0x09 {
result = result.wrapping_add(0x06);
}
result
} else if flags.carry {
carry = true;
let add = if flags.half_carry { 0x9A } else { 0xA0 };
value.wrapping_add(add)
} else if flags.half_carry {
value.wrapping_add(0xFA)
} else {
value
};
self.registers.f.zero = result == 0;
self.registers.f.half_carry = false;
self.registers.f.carry = carry;
result
}
#[inline(always)]
fn rotate_right_through_carry_retain_zero(&mut self, value: u8) -> u8 {
self.rotate_right_through_carry(value, false)
}
#[inline(always)]
fn rotate_right_through_carry_set_zero(&mut self, value: u8) -> u8 {
self.rotate_right_through_carry(value, true)
}
#[inline(always)]
fn rotate_right_through_carry(&mut self, value: u8, set_zero: bool) -> u8 {
let carry_bit = if self.registers.f.carry { 1 } else { 0 } << 7;
let new_value = carry_bit | (value >> 1);
self.registers.f.zero = set_zero && new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = value & 0b1 == 0b1;
new_value
}
#[inline(always)]
fn rotate_left_through_carry_retain_zero(&mut self, value: u8) -> u8 {
self.rotate_left_through_carry(value, false)
}
#[inline(always)]
fn rotate_left_through_carry_set_zero(&mut self, value: u8) -> u8 {
self.rotate_left_through_carry(value, true)
}
#[inline(always)]
fn rotate_left_through_carry(&mut self, value: u8, set_zero: bool) -> u8 {
let carry_bit = if self.registers.f.carry { 1 } else { 0 };
let new_value = (value << 1) | carry_bit;
self.registers.f.zero = set_zero && new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = (value & 0x80) == 0x80;
new_value
}
#[inline(always)]
fn rotate_right_set_zero(&mut self, value: u8) -> u8 {
self.rotate_right(value, true)
}
#[inline(always)]
fn rotate_right_retain_zero(&mut self, value: u8) -> u8 {
self.rotate_right(value, false)
}
#[inline(always)]
fn rotate_left_set_zero(&mut self, value: u8) -> u8 {
self.rotate_left(value, true)
}
#[inline(always)]
fn rotate_left_retain_zero(&mut self, value: u8) -> u8 {
self.rotate_left(value, false)
}
#[inline(always)]
fn rotate_left(&mut self, value: u8, set_zero: bool) -> u8 {
let carry = (value & 0x80) >> 7;
let new_value = value.rotate_left(1) | carry;
self.registers.f.zero = set_zero && new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = carry == 0x01;
new_value
}
#[inline(always)]
fn rotate_right(&mut self, value: u8, set_zero: bool) -> u8 {
let new_value = value.rotate_right(1);
self.registers.f.zero = set_zero && new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = value & 0b1 == 0b1;
new_value
}
#[inline(always)]
fn complement(&mut self, value: u8) -> u8 {
let new_value = !value;
self.registers.f.subtract = true;
self.registers.f.half_carry = true;
new_value
}
#[inline(always)]
fn bit_test(&mut self, value: u8, bit_position: BitPosition) {
let bit_position: u8 = bit_position.into();
let result = (value >> bit_position) & 0b1;
self.registers.f.zero = result == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = true;
}
#[inline(always)]
fn reset_bit(&mut self, value: u8, bit_position: BitPosition) -> u8 {
let bit_position: u8 = bit_position.into();
value & !(1 << bit_position)
}
#[inline(always)]
fn set_bit(&mut self, value: u8, bit_position: BitPosition) -> u8 {
let bit_position: u8 = bit_position.into();
value | (1 << bit_position)
}
#[inline(always)]
fn shift_right_logical(&mut self, value: u8) -> u8 {
let new_value = value >> 1;
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = value & 0b1 == 0b1;
new_value
}
#[inline(always)]
fn shift_right_arithmetic(&mut self, value: u8) -> u8 {
let msb = value & 0x80;
let new_value = msb | (value >> 1);
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = value & 0b1 == 0b1;
new_value
}
#[inline(always)]
fn shift_left_arithmetic(&mut self, value: u8) -> u8 {
let new_value = value << 1;
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = value & 0x80 == 0x80;
new_value
}
#[inline(always)]
fn swap_nibbles(&mut self, value: u8) -> u8 {
let new_value = ((value & 0xf) << 4) | ((value & 0xf0) >> 4);
self.registers.f.zero = new_value == 0;
self.registers.f.subtract = false;
self.registers.f.half_carry = false;
self.registers.f.carry = false;
new_value
}
#[inline(always)]
fn jump(&self, should_jump: bool) -> (u16, u8) {
if should_jump {
(self.read_next_word(), 16)
} else {
(self.pc.wrapping_add(3), 12)
}
}
#[inline(always)]
fn jump_relative(&self, should_jump: bool) -> (u16, u8) {
let next_step = self.pc.wrapping_add(2);
if should_jump {
let offset = self.read_next_byte() as i8;
let pc = if offset >= 0 {
next_step.wrapping_add(offset as u16)
} else {
next_step.wrapping_sub(offset.abs() as u16)
};
(pc, 16)
} else {
(next_step, 12)
}
}
#[inline(always)]
fn call(&mut self, should_jump: bool) -> (u16, u8) {
let next_pc = self.pc.wrapping_add(3);
if should_jump {
self.push(next_pc);
(self.read_next_word(), 24)
} else {
(next_pc, 12)
}
}
#[inline(always)]
fn return_(&mut self, should_jump: bool) -> u16 {
if should_jump {
self.pop()
} else {
self.pc.wrapping_add(1)
}
}
#[inline(always)]
fn rst(&mut self) {
self.push(self.pc.wrapping_add(1));
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! test_instruction {
( $instruction:expr, $( $($register:ident).* => $value:expr ),* ) => {
{
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
$(
cpu.registers$(.$register)* = $value;
)*
cpu.execute($instruction);
cpu
}
};
}
macro_rules! check_flags {
( $cpu:ident, zero => $zero:ident, subtract => $subtract:ident, half_carry => $half_carry:ident, carry => $carry:ident ) => {{
let flags = $cpu.registers.f;
println!("Flags: {:?}", flags);
assert_eq!(flags.zero, $zero);
assert_eq!(flags.subtract, $subtract);
assert_eq!(flags.half_carry, $half_carry);
assert_eq!(flags.carry, $carry);
}};
}
// INC
#[test]
fn execute_inc_8bit_non_overflow() {
let cpu = test_instruction!(Instruction::INC(IncDecTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x8);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_inc_8bit_half_carry() {
let cpu = test_instruction!(Instruction::INC(IncDecTarget::A), a => 0xF);
assert_eq!(cpu.registers.a, 0x10);
check_flags!(cpu, zero => false, subtract => false, half_carry => true, carry => false);
}
#[test]
fn execute_inc_8bit_overflow() {
let cpu = test_instruction!(Instruction::INC(IncDecTarget::A), a => 0xFF);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => true, subtract => false, half_carry => true, carry => false);
}
#[test]
fn execute_inc_16bit_byte_overflow() {
let instruction = Instruction::INC(IncDecTarget::BC);
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.registers.set_bc(0xFF);
cpu.execute(instruction);
assert_eq!(cpu.registers.get_bc(), 0x0100);
assert_eq!(cpu.registers.b, 0x01);
assert_eq!(cpu.registers.c, 0x00);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_inc_16bit_overflow() {
let instruction = Instruction::INC(IncDecTarget::BC);
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.registers.set_bc(0xFFFF);
cpu.execute(instruction);
assert_eq!(cpu.registers.get_bc(), 0x0);
assert_eq!(cpu.registers.b, 0x00);
assert_eq!(cpu.registers.c, 0x00);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// DEC
#[test]
fn execute_dec_8bit_non_overflow() {
let cpu = test_instruction!(Instruction::DEC(IncDecTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x6);
check_flags!(cpu, zero => false, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_dec_8bit_half_carry() {
let cpu = test_instruction!(Instruction::DEC(IncDecTarget::A), a => 0x80);
assert_eq!(cpu.registers.a, 0x7f);
check_flags!(cpu, zero => false, subtract => true, half_carry => true, carry => false);
}
#[test]
fn execute_dec_8bit_underflow() {
let cpu = test_instruction!(Instruction::DEC(IncDecTarget::A), a => 0x0);
assert_eq!(cpu.registers.a, 0xFF);
check_flags!(cpu, zero => false, subtract => true, half_carry => true, carry => false);
}
#[test]
fn execute_dec_16bit_underflow() {
let instruction = Instruction::DEC(IncDecTarget::BC);
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.registers.set_bc(0x0000);
cpu.execute(instruction);
assert_eq!(cpu.registers.get_bc(), 0xFFFF);
assert_eq!(cpu.registers.b, 0xFF);
assert_eq!(cpu.registers.c, 0xFF);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// ADD
#[test]
fn execute_add_8bit_non_overflow_target_a() {
let cpu = test_instruction!(Instruction::ADD(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0xe);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_add_8bit_non_overflow_target_c() {
let cpu = test_instruction!(Instruction::ADD(ArithmeticTarget::C), a => 0x7, c => 0x3);
assert_eq!(cpu.registers.a, 0xa);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_add_8bit_non_overflow_target_c_with_carry() {
let cpu = test_instruction!(Instruction::ADD(ArithmeticTarget::C), a => 0x7, c => 0x3, f.carry => true);
assert_eq!(cpu.registers.a, 0xa);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_add_8bit_carry() {
let cpu = test_instruction!(Instruction::ADD(ArithmeticTarget::B), a => 0xFC, b => 0x9);
assert_eq!(cpu.registers.a, 0x05);
check_flags!(cpu, zero => false, subtract => false, half_carry => true, carry => true);
}
// ADDHL
#[test]
fn execute_add_hl() {
let cpu = test_instruction!(Instruction::ADDHL(ADDHLTarget::BC), b => 0x07, c => 0x00, h => 0x03, l => 0x00);
assert_eq!(cpu.registers.get_hl(), 0x0A00);
check_flags!(cpu, zero => false, subtract => false, half_carry => true, carry => false);
}
// ADC
#[test]
fn execute_addc_8bit_non_overflow_target_a_no_carry() {
let cpu = test_instruction!(Instruction::ADD(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0xe);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_addc_8bit_non_overflow_target_a_with_carry() {
let cpu =
test_instruction!(Instruction::ADC(ArithmeticTarget::A), a => 0x7, f.carry => true);
assert_eq!(cpu.registers.a, 0xf);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_addc_8bit_non_overflow_target_c_with_carry() {
let cpu = test_instruction!(Instruction::ADC(ArithmeticTarget::C), a => 0x7, c => 0x3, f.carry => true);
assert_eq!(cpu.registers.a, 0xb);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_addc_8bit_carry_with_carry() {
let cpu = test_instruction!(Instruction::ADC(ArithmeticTarget::B), a => 0xFC, b => 0x3, f.carry => true);
assert_eq!(cpu.registers.a, 0x00);
check_flags!(cpu, zero => true, subtract => false, half_carry => true, carry => true);
}
// SUB
#[test]
fn execute_sub_8bit_non_underflow_target_a() {
let cpu = test_instruction!(Instruction::SUB(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => true, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_sub_8bit_non_underflow_target_c() {
let cpu = test_instruction!(Instruction::SUB(ArithmeticTarget::C), a => 0x7, c => 0x3);
assert_eq!(cpu.registers.a, 0x4);
check_flags!(cpu, zero => false, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_sub_8bit_non_overflow_target_c_with_carry() {
let cpu = test_instruction!(Instruction::SUB(ArithmeticTarget::C), a => 0x7, c => 0x3, f.carry => true);
assert_eq!(cpu.registers.a, 0x4);
check_flags!(cpu, zero => false, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_sub_8bit_carry() {
let cpu = test_instruction!(Instruction::SUB(ArithmeticTarget::B), a => 0x4, b => 0x9);
assert_eq!(cpu.registers.a, 0xFB);
check_flags!(cpu, zero => false, subtract => true, half_carry => true, carry => true);
}
// SBC
#[test]
fn execute_subc_8bit_non_overflow_target_a_no_carry() {
let cpu = test_instruction!(Instruction::SBC(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => true, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_subc_8bit_non_overflow_target_a_with_carry() {
let cpu =
test_instruction!(Instruction::SBC(ArithmeticTarget::A), a => 0x7, f.carry => true);
assert_eq!(cpu.registers.a, 0xFF);
check_flags!(cpu, zero => false, subtract => true, half_carry => true, carry => true);
}
#[test]
fn execute_subc_8bit_non_overflow_target_c_with_carry() {
let cpu = test_instruction!(Instruction::SBC(ArithmeticTarget::C), a => 0x7, c => 0x3, f.carry => true);
assert_eq!(cpu.registers.a, 0x3);
check_flags!(cpu, zero => false, subtract => true, half_carry => false, carry => false);
}
// AND
#[test]
fn execute_and_8bit() {
let cpu = test_instruction!(Instruction::AND(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x7);
check_flags!(cpu, zero => false, subtract => false, half_carry => true, carry => false);
}
#[test]
fn execute_and_8bit_with_zero() {
let cpu = test_instruction!(Instruction::AND(ArithmeticTarget::B), a => 0x8);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => true, subtract => false, half_carry => true, carry => false);
}
// OR
#[test]
fn execute_or_8bit() {
let cpu = test_instruction!(Instruction::OR(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x7);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_or_8bit_with_zero() {
let cpu = test_instruction!(Instruction::OR(ArithmeticTarget::B), a => 0x8);
assert_eq!(cpu.registers.a, 0x8);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// XOR
#[test]
fn execute_xor_8bit() {
let cpu = test_instruction!(Instruction::XOR(ArithmeticTarget::A), a => 0b0000_0111);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => true, subtract => false, half_carry => false, carry => false);
}
#[test]
fn execute_xor_8bit_with_zero() {
let cpu = test_instruction!(Instruction::XOR(ArithmeticTarget::B), a => 0x8);
assert_eq!(cpu.registers.a, 0x8);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// CP
#[test]
fn execute_cp_8bit_non_underflow_target_a() {
let cpu = test_instruction!(Instruction::CP(ArithmeticTarget::A), a => 0x7);
assert_eq!(cpu.registers.a, 0x7);
check_flags!(cpu, zero => true, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_cp_8bit_non_underflow_target_c() {
let cpu = test_instruction!(Instruction::CP(ArithmeticTarget::C), a => 0x7, c => 0x3);
assert_eq!(cpu.registers.a, 0x7);
check_flags!(cpu, zero => false, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_cp_8bit_non_overflow_target_c_with_carry() {
let cpu = test_instruction!(Instruction::CP(ArithmeticTarget::C), a => 0x7, c => 0x3, f.carry => true);
assert_eq!(cpu.registers.a, 0x7);
check_flags!(cpu, zero => false, subtract => true, half_carry => false, carry => false);
}
#[test]
fn execute_cp_8bit_carry() {
let cpu = test_instruction!(Instruction::CP(ArithmeticTarget::B), a => 0x4, b => 0x9);
assert_eq!(cpu.registers.a, 0x4);
check_flags!(cpu, zero => false, subtract => true, half_carry => true, carry => true);
}
// RRA
#[test]
fn execute_rra_8bit() {
let cpu = test_instruction!(Instruction::RRA, a => 0b1);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// RLA
#[test]
fn execute_rla_8bit() {
let cpu = test_instruction!(Instruction::RLA, a => 0x80);
assert_eq!(cpu.registers.a, 0x0);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// RRCA
#[test]
fn execute_rrca_8bit() {
let cpu = test_instruction!(Instruction::RRCA, a => 0b1, f.carry => true);
assert_eq!(cpu.registers.a, 0x80);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// RLCA
#[test]
fn execute_rlca_8bit() {
let cpu = test_instruction!(Instruction::RLCA, a => 0x80, f.carry => true);
assert_eq!(cpu.registers.a, 0x1);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// CPL
#[test]
fn execute_cpl_8bit() {
let cpu = test_instruction!(Instruction::CPL, a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b0100_1011);
check_flags!(cpu, zero => false, subtract => true, half_carry => true, carry => false);
}
// BIT
#[test]
fn execute_bit_8bit() {
let cpu =
test_instruction!(Instruction::BIT(PrefixTarget::A, BitPosition::B2), a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b1011_0100);
check_flags!(cpu, zero => false, subtract => false, half_carry => true, carry => false);
let cpu =
test_instruction!(Instruction::BIT(PrefixTarget::A, BitPosition::B1), a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b1011_0100);
check_flags!(cpu, zero => true, subtract => false, half_carry => true, carry => false);
}
// RES
#[test]
fn execute_res_8bit() {
let cpu =
test_instruction!(Instruction::RES(PrefixTarget::A, BitPosition::B2), a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b1011_0000);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
let cpu =
test_instruction!(Instruction::RES(PrefixTarget::A, BitPosition::B1), a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b1011_0100);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// SET
#[test]
fn execute_set_8bit() {
let cpu =
test_instruction!(Instruction::SET(PrefixTarget::A, BitPosition::B2), a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b1011_0100);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
let cpu =
test_instruction!(Instruction::SET(PrefixTarget::A, BitPosition::B1), a => 0b1011_0100);
assert_eq!(cpu.registers.a, 0b1011_0110);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// SRL
#[test]
fn execute_srl_8bit() {
let cpu = test_instruction!(Instruction::SRL(PrefixTarget::A), a => 0b1011_0101);
assert_eq!(cpu.registers.a, 0b0101_1010);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// RR
#[test]
fn execute_rr() {
let cpu = test_instruction!(Instruction::RR(PrefixTarget::A), a => 0b1011_0101);
assert_eq!(cpu.registers.a, 0b0101_1010);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
let cpu =
test_instruction!(Instruction::RR(PrefixTarget::A), a => 0b1011_0101, f.carry => true);
assert_eq!(cpu.registers.a, 0b1101_1010);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// RL
#[test]
fn execute_rl() {
let cpu = test_instruction!(Instruction::RL(PrefixTarget::A), a => 0b1011_0101);
assert_eq!(cpu.registers.a, 0b0110_1010);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
let cpu =
test_instruction!(Instruction::RL(PrefixTarget::A), a => 0b1011_0101, f.carry => true);
assert_eq!(cpu.registers.a, 0b0110_1011);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// SRA
#[test]
fn execute_sra() {
let cpu = test_instruction!(Instruction::SRA(PrefixTarget::A), a => 0b1011_0101);
assert_eq!(cpu.registers.a, 0b1101_1010);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// SLA
#[test]
fn execute_sla() {
let cpu = test_instruction!(Instruction::SLA(PrefixTarget::A), a => 0b1011_0101);
assert_eq!(cpu.registers.a, 0b0110_1010);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => true);
}
// SWAP
#[test]
fn execute_swap() {
let cpu = test_instruction!(Instruction::SWAP(PrefixTarget::A), a => 0b1011_0101);
assert_eq!(cpu.registers.a, 0b0101_1011);
check_flags!(cpu, zero => false, subtract => false, half_carry => false, carry => false);
}
// JP
#[test]
fn execute_jp() {
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.pc = 0xF8;
cpu.bus.write_byte(0xF9, 0xFC);
cpu.bus.write_byte(0xFA, 0x02);
let (next_pc, _) = cpu.execute(Instruction::JP(JumpTest::Always));
assert_eq!(next_pc, 0x02FC);
let (next_pc, _) = cpu.execute(Instruction::JP(JumpTest::Carry));
assert_eq!(next_pc, 0xFB);
}
// JR
#[test]
fn execute_jr() {
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.pc = 0xF8;
cpu.bus.write_byte(0xF9, 0x4);
let (next_pc, _) = cpu.execute(Instruction::JR(JumpTest::Always));
assert_eq!(next_pc, 0xFE);
cpu.bus.write_byte(0xF9, 0xFC); // == -4
let (next_pc, _) = cpu.execute(Instruction::JR(JumpTest::Always));
assert_eq!(next_pc, 0xF6);
}
// LD a, (??)
#[test]
fn execute_ld_a_indirect() {
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.registers.set_bc(0xF9);
cpu.bus.write_byte(0xF9, 0x4);
cpu.execute(Instruction::LD(LoadType::AFromIndirect(
Indirect::BCIndirect,
)));
assert_eq!(cpu.registers.a, 0x04);
cpu.registers.set_hl(0xA1);
cpu.bus.write_byte(0xA1, 0x9);
cpu.execute(Instruction::LD(LoadType::AFromIndirect(
Indirect::HLIndirectPlus,
)));
assert_eq!(cpu.registers.a, 0x09);
assert_eq!(cpu.registers.get_hl(), 0xA2);
}
// LD ?, ?
#[test]
fn execute_ld_byte() {
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.registers.b = 0x4;
cpu.execute(Instruction::LD(LoadType::Byte(
LoadByteTarget::D,
LoadByteSource::B,
)));
assert_eq!(cpu.registers.b, 0x4);
assert_eq!(cpu.registers.d, 0x4);
}
// PUSH/POP
#[test]
fn execute_push_pop() {
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.registers.b = 0x4;
cpu.registers.c = 0x89;
cpu.sp = 0x10;
cpu.execute(Instruction::PUSH(StackTarget::BC));
assert_eq!(cpu.bus.read_byte(0xF), 0x04);
assert_eq!(cpu.bus.read_byte(0xE), 0x89);
assert_eq!(cpu.sp, 0xE);
cpu.execute(Instruction::POP(StackTarget::DE));
assert_eq!(cpu.registers.d, 0x04);
assert_eq!(cpu.registers.e, 0x89);
}
// -----------------------------------------------------------------------------
// Step
#[test]
fn test_step() {
let mut cpu = CPU::new(None, vec![0; 0xFFFF]);
cpu.bus.write_byte(0, 0x23); //INC(HL)
cpu.bus.write_byte(1, 0xB5); //OR(L)
cpu.bus.write_byte(2, 0xCB); //PREFIX
cpu.bus.write_byte(3, 0xe8); //SET(B, 5)
for _ in 0..3 {
cpu.step();
}
assert_eq!(cpu.registers.h, 0b0);
assert_eq!(cpu.registers.l, 0b1);
assert_eq!(cpu.registers.a, 0b1);
assert_eq!(cpu.registers.b, 0b0010_0000);
}
}
| 39.470751 | 156 | 0.506296 |
e610dbfe6910fbcdb81e6f783c1c471a7796cabf
| 12,395 |
use ctxt::get_ctxt;
use gc::root::Slot;
use gc::space::Space;
use gc::swiper::card::{CardEntry, CardTable};
use gc::swiper::crossing::{CrossingEntry, CrossingMap};
use gc::swiper::large::LargeSpace;
use gc::swiper::old::OldGen;
use gc::swiper::on_different_cards;
use gc::swiper::young::YoungGen;
use gc::swiper::CARD_SIZE;
use gc::{Address, Region};
use mem;
use object::{offset_of_array_data, Obj};
#[derive(Copy, Clone)]
pub enum VerifierPhase {
PreMinor,
PostMinor,
PreFull,
PostFull,
}
impl VerifierPhase {
fn is_pre(self) -> bool {
match self {
VerifierPhase::PreMinor => true,
VerifierPhase::PostMinor => false,
VerifierPhase::PreFull => true,
VerifierPhase::PostFull => false,
}
}
fn is_post_minor(self) -> bool {
match self {
VerifierPhase::PostMinor => true,
_ => false,
}
}
fn is_pre_full(self) -> bool {
match self {
VerifierPhase::PreFull => true,
_ => false,
}
}
}
pub struct Verifier<'a> {
young: &'a YoungGen,
old: &'a OldGen,
card_table: &'a CardTable,
crossing_map: &'a CrossingMap,
rootset: &'a [Slot],
large: &'a LargeSpace,
perm_space: &'a Space,
refs_to_young_gen: usize,
in_old: bool,
in_large: bool,
old_active: Region,
young_total: Region,
eden_active: Region,
from_active: Region,
to_active: Region,
reserved_area: Region,
phase: VerifierPhase,
}
impl<'a> Verifier<'a> {
pub fn new(
young: &'a YoungGen,
old: &'a OldGen,
card_table: &'a CardTable,
crossing_map: &'a CrossingMap,
rootset: &'a [Slot],
large: &'a LargeSpace,
perm_space: &'a Space,
reserved_area: Region,
phase: VerifierPhase,
) -> Verifier<'a> {
Verifier {
young: young,
old: old,
card_table: card_table,
crossing_map: crossing_map,
rootset: rootset,
perm_space: perm_space,
large: large,
refs_to_young_gen: 0,
in_old: false,
in_large: false,
eden_active: young.eden_active(),
from_active: young.from_active(),
to_active: young.to_active(),
old_active: old.active(),
young_total: young.total(),
reserved_area: reserved_area,
phase: phase,
}
}
pub fn verify(&mut self) {
self.verify_roots();
self.verify_young();
self.verify_old();
self.verify_large();
}
fn verify_young(&mut self) {
let region = self.young.eden_active();
self.verify_objects(region, "young gen (eden)");
let region = self.from_active.clone();
self.verify_objects(region, "young gen (from)");
let region = self.to_active.clone();
if !self.phase.is_post_minor() && !self.phase.is_pre_full() {
assert!(region.size() == 0, "to-space should be empty.");
}
self.verify_objects(region, "young gen (to)");
}
fn verify_old(&mut self) {
let region = self.old_active.clone();
self.in_old = true;
self.verify_objects(region, "old gen");
self.in_old = false;
}
fn verify_large(&mut self) {
self.in_large = true;
self.large.visit_objects(|addr| {
let object = unsafe { &mut *addr.to_mut_ptr::<Obj>() };
let region = Region::new(addr, addr.offset(object.size()));
self.verify_objects(region, "large space");
});
self.in_large = false;
}
fn verify_roots(&mut self) {
for root in self.rootset {
self.verify_reference(*root, Address::null(), "root set");
}
}
fn verify_objects(&mut self, region: Region, name: &str) {
let mut curr = region.start;
self.refs_to_young_gen = 0;
if self.in_old {
// we should start at card start
assert!(self.card_table.is_aligned(curr));
self.verify_crossing(curr, curr, false);
}
while curr < region.end {
let object = unsafe { &mut *curr.to_mut_ptr::<Obj>() };
if object.header().vtblptr().is_null() {
curr = curr.add_ptr(1);
continue;
}
let next = if object.is_array_ref() {
self.verify_array_ref(object, curr, name)
} else {
self.verify_object(object, curr, name)
};
curr = next;
}
assert!(curr == region.end, "object doesn't end at region end");
if (self.in_old || self.in_large) && !self.card_table.is_aligned(curr) {
self.verify_card(curr);
}
}
fn verify_array_ref(&mut self, object: &mut Obj, mut curr: Address, name: &str) -> Address {
let object_address = curr;
object.visit_reference_fields(|element| {
if (self.in_old || self.in_large) && on_different_cards(curr, element.address()) {
self.verify_card(curr);
curr = element.address();
}
self.verify_reference(element, object_address, name);
});
let next = object_address.offset(object.size());
if self.in_old && on_different_cards(object_address, next) {
self.verify_crossing(object_address, next, true);
}
next
}
fn verify_object(&mut self, object: &mut Obj, curr: Address, name: &str) -> Address {
object.visit_reference_fields(|child| {
self.verify_reference(child, curr, name);
});
let next = curr.offset(object.size());
if (self.in_old || self.in_large) && on_different_cards(curr, next) {
self.verify_card(curr);
if self.in_old {
self.verify_crossing(curr, next, false);
}
}
next
}
fn verify_card(&mut self, curr: Address) {
let curr_card = self.card_table.card_idx(curr);
let card_entry = self.card_table.get(curr_card);
let expected_card_entry = if self.refs_to_young_gen > 0 {
CardEntry::Dirty
} else {
CardEntry::Clean
};
// In the verify-phase before the collection the card's dirty-entry isn't
// guaranteed to be exact. It could be `dirty` although this card doesn't
// actually contain any references into the young generation. But it should never
// be clean when there are actual references into the young generation.
if self.phase.is_pre() && expected_card_entry == CardEntry::Clean {
self.refs_to_young_gen = 0;
return;
}
if card_entry != expected_card_entry {
let card_text = match card_entry {
CardEntry::Dirty => "dirty",
CardEntry::Clean => "clean",
};
println!(
"CARD: {} is marked {} but has {} reference(s).",
curr_card.to_usize(),
card_text,
self.refs_to_young_gen
);
panic!("card table entry wrong.");
}
assert!(card_entry == expected_card_entry);
self.refs_to_young_gen = 0;
}
fn verify_crossing(&mut self, old: Address, new: Address, array_ref: bool) {
let new_card_idx = self.card_table.card_idx(new);
let old_card_idx = self.card_table.card_idx(old);
if new_card_idx == old_card_idx {
return;
}
let new_card_start = self.card_table.to_address(new_card_idx);
let old_card_end = self.card_table.to_address(old_card_idx).offset(CARD_SIZE);
let offset = new.offset_from(new_card_start);
let offset_words = (offset / mem::ptr_width_usize()) as u8;
let crossing_middle;
let loop_start;
if array_ref {
let refs_per_card = (CARD_SIZE / mem::ptr_width_usize()) as u8;
crossing_middle = CrossingEntry::LeadingRefs(refs_per_card);
if old.offset(offset_of_array_data() as usize) > old_card_end {
let old_next = old_card_idx.to_usize() + 1;
let crossing = self.crossing_map.get(old_next.into());
let diff_words = old_card_end.offset_from(old) / mem::ptr_width_usize();
assert!(crossing == CrossingEntry::ArrayStart(diff_words as u8));
loop_start = old_card_idx.to_usize() + 2;
} else {
loop_start = old_card_idx.to_usize() + 1;
}
if new_card_idx.to_usize() >= loop_start {
let crossing = self.crossing_map.get(new_card_idx);
let expected = if offset_words > 0 {
CrossingEntry::LeadingRefs(offset_words)
} else {
CrossingEntry::FirstObject(0)
};
assert!(crossing == expected, "array crossing at end not correct.");
}
} else {
crossing_middle = CrossingEntry::NoRefs;
loop_start = old_card_idx.to_usize() + 1;
let crossing = self.crossing_map.get(new_card_idx);
let expected = CrossingEntry::FirstObject(offset_words);
assert!(crossing == expected, "crossing at end not correct.");
}
for c in loop_start..new_card_idx.to_usize() {
assert!(
self.crossing_map.get(c.into()) == crossing_middle,
"middle crossing not correct."
);
}
}
fn verify_reference(&mut self, slot: Slot, container_obj: Address, name: &str) {
let reference = slot.get();
if reference.is_null() {
return;
}
if self.old_active.contains(reference)
|| self.eden_active.contains(reference)
|| self.from_active.contains(reference)
|| self.perm_space.contains(reference)
|| self.large.contains(reference)
|| (self.to_active.contains(reference)
&& (self.phase.is_post_minor() || self.phase.is_pre_full()))
{
let object = reference.to_obj();
// Verify that the address is the start of an object,
// for this access its size.
// To make sure this isn't optimized out by the compiler,
// make sure that the size doesn't equal 1.
assert!(object.size() != 1, "object size shouldn't be 1");
if self.young_total.contains(reference) {
self.refs_to_young_gen += 1;
}
return;
}
let perm_region = self.perm_space.used_region();
println!(
"PRM: {}; active: {} (size 0x{:x})",
self.perm_space.total(),
perm_region,
perm_region.size(),
);
println!(
"FRM: {}; active: {} (size 0x{:x})",
self.young.from_total(),
self.from_active,
self.from_active.size(),
);
println!(
"OLD: {}; active: {} (size 0x{:x})",
self.old.total(),
self.old_active,
self.old_active.size(),
);
println!(
"LRG: {}-{}",
self.large.total().start,
self.large.total().end
);
println!(
"TTL: {}-{}",
self.reserved_area.start, self.reserved_area.end
);
println!(
"found invalid reference to {} in {} (at {}, in object {}).",
reference,
name,
slot.address(),
container_obj
);
if self.young.contains(reference)
&& !self.from_active.contains(reference)
&& !self.eden_active.contains(reference)
{
println!("reference points into young generation but not into the active semi-space.");
}
println!("try print object size and class:");
let object = reference.to_obj();
println!("\tsize {}", object.size());
let cls = object.header().vtbl().class();
println!("\tclass {}", cls.name(get_ctxt()));
panic!("reference neither pointing into young nor old generation.");
}
}
| 30.379902 | 99 | 0.547559 |
113cc025acf300dbef919ace20f122e54b3d4507
| 2,786 |
// Copyright © 2017-2018 Mozilla Foundation
//
// This program is made available under an ISC-style license. See the
// accompanying file LICENSE for details.
use callbacks::cubeb_device_changed_callback;
use channel::cubeb_channel_layout;
use device::cubeb_device;
use format::cubeb_sample_format;
use std::os::raw::{c_char, c_float, c_int, c_uint, c_void};
use std::{fmt, mem};
cubeb_enum! {
pub enum cubeb_stream_prefs {
CUBEB_STREAM_PREF_NONE = 0x00,
CUBEB_STREAM_PREF_LOOPBACK = 0x01,
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING = 0x02,
CUBEB_STREAM_PREF_VOICE = 0x04,
}
}
cubeb_enum! {
pub enum cubeb_state {
CUBEB_STATE_STARTED,
CUBEB_STATE_STOPPED,
CUBEB_STATE_DRAINED,
CUBEB_STATE_ERROR,
}
}
pub enum cubeb_stream {}
#[repr(C)]
#[derive(Clone, Copy)]
pub struct cubeb_stream_params {
pub format: cubeb_sample_format,
pub rate: c_uint,
pub channels: c_uint,
pub layout: cubeb_channel_layout,
pub prefs: cubeb_stream_prefs,
}
impl Default for cubeb_stream_params {
fn default() -> Self {
unsafe { mem::zeroed() }
}
}
// Explicit Debug impl to work around bug in ctest
impl fmt::Debug for cubeb_stream_params {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("cubeb_stream_params")
.field("format", &self.format)
.field("rate", &self.rate)
.field("channels", &self.channels)
.field("layout", &self.layout)
.field("prefs", &self.prefs)
.finish()
}
}
extern "C" {
pub fn cubeb_stream_destroy(stream: *mut cubeb_stream);
pub fn cubeb_stream_start(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_stop(stream: *mut cubeb_stream) -> c_int;
pub fn cubeb_stream_get_position(stream: *mut cubeb_stream, position: *mut u64) -> c_int;
pub fn cubeb_stream_get_latency(stream: *mut cubeb_stream, latency: *mut c_uint) -> c_int;
pub fn cubeb_stream_get_input_latency(stream: *mut cubeb_stream, latency: *mut c_uint)
-> c_int;
pub fn cubeb_stream_set_volume(stream: *mut cubeb_stream, volume: c_float) -> c_int;
pub fn cubeb_stream_set_name(stream: *mut cubeb_stream, name: *const c_char) -> c_int;
pub fn cubeb_stream_get_current_device(
stream: *mut cubeb_stream,
device: *mut *mut cubeb_device,
) -> c_int;
pub fn cubeb_stream_device_destroy(
stream: *mut cubeb_stream,
devices: *mut cubeb_device,
) -> c_int;
pub fn cubeb_stream_register_device_changed_callback(
stream: *mut cubeb_stream,
device_changed_callback: cubeb_device_changed_callback,
) -> c_int;
pub fn cubeb_stream_user_ptr(stream: *mut cubeb_stream) -> *mut c_void;
}
| 32.395349 | 94 | 0.68234 |
f801e3f21cb47dc9155e4c5bb05cfacd6a4c8e25
| 6,405 |
///! Busy handler (when the database is locked)
use std::mem;
use std::os::raw::{c_int, c_void};
use std::panic::catch_unwind;
use std::ptr;
use std::time::Duration;
use crate::ffi;
use crate::{Connection, InnerConnection, Result};
impl Connection {
/// Set a busy handler that sleeps for a specified amount of time when a
/// table is locked. The handler will sleep multiple times until at
/// least "ms" milliseconds of sleeping have accumulated.
///
/// Calling this routine with an argument equal to zero turns off all busy
/// handlers.
///
/// There can only be a single busy handler for a particular database
/// connection at any given moment. If another busy handler was defined
/// (using `busy_handler`) prior to calling this routine, that other
/// busy handler is cleared.
pub fn busy_timeout(&self, timeout: Duration) -> Result<()> {
let ms = timeout
.as_secs()
.checked_mul(1000)
.and_then(|t| t.checked_add(timeout.subsec_millis().into()))
.expect("too big");
self.db.borrow_mut().busy_timeout(ms as i32)
}
/// Register a callback to handle `SQLITE_BUSY` errors.
///
/// If the busy callback is `None`, then `SQLITE_BUSY is returned
/// immediately upon encountering the lock.` The argument to the busy
/// handler callback is the number of times that the
/// busy handler has been invoked previously for the
/// same locking event. If the busy callback returns `false`, then no
/// additional attempts are made to access the
/// database and `SQLITE_BUSY` is returned to the
/// application. If the callback returns `true`, then another attempt
/// is made to access the database and the cycle repeats.
///
/// There can only be a single busy handler defined for each database
/// connection. Setting a new busy handler clears any previously set
/// handler. Note that calling `busy_timeout()` or evaluating `PRAGMA
/// busy_timeout=N` will change the busy handler and thus
/// clear any previously set busy handler.
pub fn busy_handler(&self, callback: Option<fn(i32) -> bool>) -> Result<()> {
unsafe extern "C" fn busy_handler_callback(p_arg: *mut c_void, count: c_int) -> c_int {
let handler_fn: fn(i32) -> bool = mem::transmute(p_arg);
if let Ok(true) = catch_unwind(|| handler_fn(count)) {
1
} else {
0
}
}
let mut c = self.db.borrow_mut();
let r = match callback {
Some(f) => unsafe {
ffi::sqlite3_busy_handler(c.db(), Some(busy_handler_callback), mem::transmute(f))
},
None => unsafe { ffi::sqlite3_busy_handler(c.db(), None, ptr::null_mut()) },
};
c.decode_result(r)
}
}
impl InnerConnection {
fn busy_timeout(&mut self, timeout: c_int) -> Result<()> {
let r = unsafe { ffi::sqlite3_busy_timeout(self.db, timeout) };
self.decode_result(r)
}
}
#[cfg(test)]
mod test {
use self::tempdir::TempDir;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::sync_channel;
use std::thread;
use std::time::Duration;
use tempdir;
use crate::{Connection, Error, ErrorCode, Result, TransactionBehavior, NO_PARAMS};
#[test]
fn test_default_busy() {
let temp_dir = TempDir::new("test_default_busy").unwrap();
let path = temp_dir.path().join("test.db3");
let mut db1 = Connection::open(&path).unwrap();
let tx1 = db1
.transaction_with_behavior(TransactionBehavior::Exclusive)
.unwrap();
let db2 = Connection::open(&path).unwrap();
let r: Result<()> = db2.query_row("PRAGMA schema_version", NO_PARAMS, |_| unreachable!());
match r.unwrap_err() {
Error::SqliteFailure(err, _) => {
assert_eq!(err.code, ErrorCode::DatabaseBusy);
}
err => panic!("Unexpected error {}", err),
}
tx1.rollback().unwrap();
}
#[test]
#[ignore] // FIXME: unstable
fn test_busy_timeout() {
let temp_dir = TempDir::new("test_busy_timeout").unwrap();
let path = temp_dir.path().join("test.db3");
let db2 = Connection::open(&path).unwrap();
db2.busy_timeout(Duration::from_secs(1)).unwrap();
let (rx, tx) = sync_channel(0);
let child = thread::spawn(move || {
let mut db1 = Connection::open(&path).unwrap();
let tx1 = db1
.transaction_with_behavior(TransactionBehavior::Exclusive)
.unwrap();
rx.send(1).unwrap();
thread::sleep(Duration::from_millis(100));
tx1.rollback().unwrap();
});
assert_eq!(tx.recv().unwrap(), 1);
let _ = db2
.query_row("PRAGMA schema_version", NO_PARAMS, |row| {
row.get::<_, i32>(0)
})
.expect("unexpected error");
child.join().unwrap();
}
#[test]
#[ignore] // FIXME: unstable
fn test_busy_handler() {
lazy_static! {
static ref CALLED: AtomicBool = AtomicBool::new(false);
}
fn busy_handler(_: i32) -> bool {
CALLED.store(true, Ordering::Relaxed);
thread::sleep(Duration::from_millis(100));
true
}
let temp_dir = TempDir::new("test_busy_handler").unwrap();
let path = temp_dir.path().join("test.db3");
let db2 = Connection::open(&path).unwrap();
db2.busy_handler(Some(busy_handler)).unwrap();
let (rx, tx) = sync_channel(0);
let child = thread::spawn(move || {
let mut db1 = Connection::open(&path).unwrap();
let tx1 = db1
.transaction_with_behavior(TransactionBehavior::Exclusive)
.unwrap();
rx.send(1).unwrap();
thread::sleep(Duration::from_millis(100));
tx1.rollback().unwrap();
});
assert_eq!(tx.recv().unwrap(), 1);
let _ = db2
.query_row("PRAGMA schema_version", NO_PARAMS, |row| {
row.get::<_, i32>(0)
})
.expect("unexpected error");
assert_eq!(CALLED.load(Ordering::Relaxed), true);
child.join().unwrap();
}
}
| 36.186441 | 98 | 0.582982 |
268fbcade8b1a45617846750726f99985552bdb6
| 1,637 |
extern crate failure;
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::HashMap;
use failure::{Error, format_err};
struct Square {
id: i64,
x: i64,
y: i64,
w: i64,
h: i64,
}
impl Square {
fn parse(s: &str) -> Result<Square, Error> {
let el: Vec<_> = s.split(|x: char| !x.is_numeric())
.filter_map(|x| x.parse::<i64>().ok())
.collect();
if el.len() != 5 {
return Err(format_err!("error parsing: {}", s));
}
Ok(Square {
id: el[0],
x: el[1],
y: el[2],
w: el[3],
h: el[4],
})
}
}
fn main() -> Result<(), Error> {
let file = BufReader::new(File::open("data/p3.txt")?);
let mut squares = vec![];
for line in file.lines() {
let sq = Square::parse(&line?)?;
squares.push(sq);
}
let mut fabric = HashMap::new();
for sq in &squares {
for x in 0..sq.w {
for y in 0..sq.h {
let square = fabric.entry((x + sq.x, y + sq.y)).or_insert(sq.id);
if *square != sq.id {
*square = -1;
}
}
}
}
let count = fabric.values().filter(|&x| *x == -1).count();
println!("Part 1: {}", count);
let mut counts = HashMap::new();
for x in fabric.values() {
*counts.entry(x).or_insert(0) += 1;
}
for sq in &squares {
if *counts.get(&sq.id).unwrap_or(&0) == (sq.w * sq.h) {
println!("Part 2: {}", sq.id);
break;
}
}
Ok(())
}
| 20.721519 | 81 | 0.445327 |
2366b7e186122b6ce25dbd2fb1f81200af33ce33
| 23,491 |
mod supreme {
//! Re-exported items from `nom_supreme`.
//!
//! This module collapses `nom_supreme` and exposes its items using similar
//! names to `nom`. Parsers are used via this module such that they resemble
//! the use of `nom` parsers. See the `parse` function.
pub use nom_supreme::error::{BaseErrorKind, ErrorTree, Expectation, StackContext};
pub use nom_supreme::multi::{
collect_separated_terminated as many1, parse_separated_terminated as fold1,
};
pub use nom_supreme::parser_ext::ParserExt;
pub use nom_supreme::tag::complete::tag;
}
#[cfg(feature = "diagnostics-report")]
use miette::{self, Diagnostic, LabeledSpan, SourceCode};
use pori::{Located, Location, Stateful};
use std::borrow::Cow;
use std::fmt::{self, Display, Formatter};
use std::str::FromStr;
use thiserror::Error;
#[cfg(any(feature = "diagnostics-inspect", feature = "diagnostics-report"))]
use crate::diagnostics::Span;
use crate::token::{
Alternative, Archetype, Class, Evaluation, Literal, Repetition, Separator, Token, TokenKind,
Tokenized, Wildcard,
};
use crate::PATHS_ARE_CASE_INSENSITIVE;
#[cfg(any(feature = "diagnostics-inspect", feature = "diagnostics-report"))]
pub type Annotation = Span;
#[cfg(all(
not(feature = "diagnostics-inspect"),
not(feature = "diagnostics-report"),
))]
pub type Annotation = ();
type BaseErrorKind =
supreme::BaseErrorKind<&'static str, Box<dyn std::error::Error + Send + Sync + 'static>>;
type StackContext = supreme::StackContext<&'static str>;
type Expression<'i> = Located<'i, str>;
type Input<'i> = Stateful<Expression<'i>, ParserState>;
type ErrorTree<'i> = supreme::ErrorTree<Input<'i>>;
type ErrorMode<'t> = nom::Err<ErrorTree<'t>>;
#[derive(Clone, Debug)]
struct ErrorLocation {
location: usize,
context: String,
}
impl<'e, 'i> From<ErrorEntry<'e, Input<'i>>> for ErrorLocation {
fn from(entry: ErrorEntry<'e, Input<'i>>) -> Self {
ErrorLocation {
location: entry.input.location(),
context: entry.context.to_string(),
}
}
}
#[cfg(feature = "diagnostics-report")]
impl From<ErrorLocation> for LabeledSpan {
fn from(location: ErrorLocation) -> Self {
let ErrorLocation { location, context } = location;
LabeledSpan::new(Some(context), location, 1)
}
}
impl Display for ErrorLocation {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "at offset {}: {}", self.location, self.context)
}
}
#[derive(Clone, Debug)]
struct ErrorEntry<'e, I> {
depth: usize,
input: &'e I,
context: ErrorContext<'e>,
}
#[derive(Clone, Debug)]
enum ErrorContext<'e> {
Kind(&'e BaseErrorKind),
Stack(&'e StackContext),
}
impl Display for ErrorContext<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
ErrorContext::Kind(kind) => match kind {
BaseErrorKind::Expected(_) | BaseErrorKind::Kind(_) => write!(f, "{}", kind),
// Omit any "external error" prefix as seen in the `Display`
// implementation of `BaseErrorKind`.
BaseErrorKind::External(error) => write!(f, "{}", error),
},
ErrorContext::Stack(stack) => write!(f, "{}", stack),
}
}
}
trait ErrorTreeExt<I> {
fn for_each<F>(&self, f: F)
where
F: FnMut(ErrorEntry<I>);
fn bounding_error_locations(&self) -> (Vec<ErrorLocation>, Vec<ErrorLocation>);
}
impl<'i> ErrorTreeExt<Input<'i>> for ErrorTree<'i> {
fn for_each<F>(&self, mut f: F)
where
F: FnMut(ErrorEntry<Input<'i>>),
{
fn recurse<'i, F>(tree: &'_ ErrorTree<'i>, depth: usize, f: &mut F)
where
F: FnMut(ErrorEntry<Input<'i>>),
{
match tree {
ErrorTree::Base {
ref location,
ref kind,
} => f(ErrorEntry {
depth,
input: location,
context: ErrorContext::Kind(kind),
}),
ErrorTree::Stack {
ref base,
ref contexts,
} => {
for (location, context) in contexts {
f(ErrorEntry {
depth: depth + 1,
input: location,
context: ErrorContext::Stack(context),
});
}
recurse(base, depth + 1, f);
},
ErrorTree::Alt(ref trees) => {
for tree in trees {
recurse(tree, depth + 1, f);
}
},
}
}
recurse(self, 0, &mut f);
}
fn bounding_error_locations(&self) -> (Vec<ErrorLocation>, Vec<ErrorLocation>) {
let mut min: Option<(usize, Vec<ErrorLocation>)> = None;
let mut max: Option<(usize, Vec<ErrorLocation>)> = None;
self.for_each(|entry| {
let write_if =
|locations: &mut Option<(_, _)>, push: fn(&_, &_) -> _, reset: fn(&_, &_) -> _| {
let locations = locations.get_or_insert_with(|| (entry.depth, vec![]));
if reset(&entry.depth, &locations.0) {
*locations = (entry.depth, vec![]);
}
if push(&entry.depth, &locations.0) {
locations.1.push(entry.clone().into());
}
};
write_if(&mut min, usize::eq, |&depth, &min| depth < min);
write_if(&mut max, usize::eq, |&depth, &max| depth > max);
});
(
min.map(|(_, locations)| locations).unwrap_or_default(),
max.map(|(_, locations)| locations).unwrap_or_default(),
)
}
}
/// Describes errors that occur when parsing a glob expression.
///
/// Common examples of glob expressions that cannot be parsed are alternative
/// and repetition patterns with missing delimiters and ambiguous patterns, such
/// as `src/***/*.rs` or `{.local,.config/**/*.toml`.
///
/// When the `diagnostics-report` feature is enabled, this error implements the
/// [`Diagnostic`] trait and provides more detailed information about the parse
/// failure.
///
/// [`Diagnostic`]: miette::Diagnostic
#[derive(Clone, Debug, Error)]
#[error("failed to parse glob expression")]
pub struct ParseError<'t> {
expression: Cow<'t, str>,
start: ErrorLocation,
ends: Vec<ErrorLocation>,
}
impl<'t> ParseError<'t> {
fn new(expression: &'t str, error: ErrorMode<'t>) -> Self {
match error {
ErrorMode::Incomplete(_) => {
panic!("unexpected parse error: incomplete input")
},
ErrorMode::Error(error) | ErrorMode::Failure(error) => {
let (starts, ends) = error.bounding_error_locations();
ParseError {
expression: expression.into(),
start: starts
.into_iter()
.next()
.expect("expected lower bound error location"),
ends,
}
},
}
}
/// Clones any borrowed data into an owning instance.
pub fn into_owned(self) -> ParseError<'static> {
let ParseError {
expression,
start,
ends,
} = self;
ParseError {
expression: expression.into_owned().into(),
start,
ends,
}
}
/// Gets the glob expression that failed to parse.
pub fn expression(&self) -> &str {
self.expression.as_ref()
}
}
#[cfg(feature = "diagnostics-report")]
#[cfg_attr(docsrs, doc(cfg(feature = "diagnostics-report")))]
impl Diagnostic for ParseError<'_> {
fn code<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
Some(Box::new("wax::glob::parse"))
}
fn source_code(&self) -> Option<&dyn SourceCode> {
Some(&self.expression)
}
// Surfacing useful parsing errors is difficult. This code replaces any
// lower bound errors with a simple label noting the beginning of the
// parsing error. Details are discarded, because these are typically
// top-level alternative errors that do not provide any useful insight.
// Upper bound errors are labeled as-is, though they only sometimes provide
// useful context.
fn labels(&self) -> Option<Box<dyn Iterator<Item = LabeledSpan> + '_>> {
Some(Box::new(
Some(LabeledSpan::new(
Some(String::from("starting here")),
self.start.location,
1,
))
.into_iter()
.chain(self.ends.iter().cloned().map(|end| {
LabeledSpan::new(
Some(end.context),
self.start.location,
end.location.saturating_sub(self.start.location) + 1,
)
})),
))
}
}
#[derive(Clone, Copy, Debug, Default)]
struct ParserState {
flags: FlagState,
subexpression: usize,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct FlagState {
is_case_insensitive: bool,
}
impl Default for FlagState {
fn default() -> Self {
FlagState {
is_case_insensitive: PATHS_ARE_CASE_INSENSITIVE,
}
}
}
#[derive(Clone, Copy, Debug)]
enum FlagToggle {
CaseInsensitive(bool),
}
pub fn parse(expression: &str) -> Result<Tokenized, ParseError> {
use nom::bytes::complete as bytes;
use nom::character::complete as character;
use nom::{branch, combinator, multi, sequence, IResult, Parser};
use supreme::ParserExt;
use crate::token::parse::FlagToggle::CaseInsensitive;
#[derive(Clone, Copy, Debug, Error)]
#[error("expected {subject}")]
struct Expectation {
subject: &'static str,
}
type ParseResult<'i, O> = IResult<Input<'i>, O, ErrorTree<'i>>;
fn boe(input: Input) -> ParseResult<Input> {
if input.state.subexpression == input.location() {
Ok((input, input))
}
else {
Err(ErrorMode::Error(ErrorTree::Base {
location: input,
kind: BaseErrorKind::External(Box::new(Expectation {
subject: "beginning of expression",
})),
}))
}
}
fn identity(input: Input) -> ParseResult<Input> {
Ok((input, input))
}
fn flags<'i, F>(
mut toggle: impl FnMut(FlagToggle) -> F,
) -> impl FnMut(Input<'i>) -> ParseResult<'i, ()>
where
F: Parser<Input<'i>, (), ErrorTree<'i>>,
{
move |input| {
let (input, _) = multi::many0(sequence::delimited(
supreme::tag("(?"),
multi::many1(branch::alt((
sequence::tuple((supreme::tag("i"), toggle(CaseInsensitive(true)))),
sequence::tuple((supreme::tag("-i"), toggle(CaseInsensitive(false)))),
))),
supreme::tag(")"),
))(input)?;
Ok((input, ()))
}
}
// Explicit lifetimes prevent inference errors.
#[allow(clippy::needless_lifetimes)]
fn flags_with_state<'i>(input: Input<'i>) -> ParseResult<'i, ()> {
flags(move |toggle| {
move |mut input: Input<'i>| {
match toggle {
CaseInsensitive(toggle) => {
input.state.flags.is_case_insensitive = toggle;
},
}
Ok((input, ()))
}
})(input)
}
// Explicit lifetimes prevent inference errors.
#[allow(clippy::needless_lifetimes)]
fn flags_without_state<'i>(input: Input<'i>) -> ParseResult<'i, ()> {
flags(move |_| move |input: Input<'i>| Ok((input, ())))(input)
}
fn literal(input: Input) -> ParseResult<TokenKind<Annotation>> {
combinator::map(
combinator::verify(
bytes::escaped_transform(
bytes::is_not("/?*$:<>()[]{},\\"),
'\\',
branch::alt((
combinator::value("?", supreme::tag("?")),
combinator::value("*", supreme::tag("*")),
combinator::value("$", supreme::tag("$")),
combinator::value(":", supreme::tag(":")),
combinator::value("<", supreme::tag("<")),
combinator::value(">", supreme::tag(">")),
combinator::value("(", supreme::tag("(")),
combinator::value(")", supreme::tag(")")),
combinator::value("[", supreme::tag("[")),
combinator::value("]", supreme::tag("]")),
combinator::value("{", supreme::tag("{")),
combinator::value("}", supreme::tag("}")),
combinator::value(",", supreme::tag(",")),
)),
),
|text: &str| !text.is_empty(),
),
move |text| {
TokenKind::Literal(Literal {
text: text.into(),
is_case_insensitive: input.state.flags.is_case_insensitive,
})
},
)(input)
}
fn separator(input: Input) -> ParseResult<TokenKind<Annotation>> {
combinator::value(TokenKind::Separator(Separator), supreme::tag("/"))(input)
}
fn wildcard<'i>(
terminator: impl Clone + Parser<Input<'i>, Input<'i>, ErrorTree<'i>>,
) -> impl FnMut(Input<'i>) -> ParseResult<'i, TokenKind<'i, Annotation>> {
branch::alt((
combinator::map(supreme::tag("?"), |_| TokenKind::from(Wildcard::One))
.context("exactly-one"),
combinator::map(
sequence::tuple((
combinator::map(
branch::alt((
sequence::tuple((
combinator::value(true, supreme::tag("/")),
flags_with_state,
)),
sequence::tuple((combinator::value(false, boe), flags_with_state)),
)),
|(prefix, _)| prefix,
)
.context("prefix"),
sequence::terminated(
supreme::tag("**"),
branch::alt((
combinator::map(
sequence::tuple((flags_with_state, supreme::tag("/"))),
|(_, postfix)| postfix,
),
terminator.clone(),
))
.context("postfix"),
),
)),
|(has_root, _)| Wildcard::Tree { has_root }.into(),
)
.context("tree"),
combinator::map(
sequence::terminated(
supreme::tag("*"),
branch::alt((
combinator::map(
combinator::peek(sequence::tuple((
flags_without_state,
bytes::is_not("*$").context("no terminating wildcard"),
))),
|(_, right)| right,
),
terminator.clone(),
)),
),
|_| Wildcard::ZeroOrMore(Evaluation::Eager).into(),
)
.context("zero-or-more"),
combinator::map(
sequence::terminated(
supreme::tag("$"),
branch::alt((
combinator::map(
combinator::peek(sequence::tuple((
flags_without_state,
bytes::is_not("*$").context("no terminating wildcard"),
))),
|(_, right)| right,
),
terminator,
)),
),
|_| Wildcard::ZeroOrMore(Evaluation::Lazy).into(),
)
.context("zero-or-more"),
))
}
fn repetition(input: Input) -> ParseResult<TokenKind<Annotation>> {
fn bounds(input: Input) -> ParseResult<(usize, Option<usize>)> {
type BoundResult<T> = Result<T, <usize as FromStr>::Err>;
branch::alt((
sequence::preceded(
supreme::tag(":"),
branch::alt((
combinator::map_res(
sequence::separated_pair(
character::digit1,
supreme::tag(","),
combinator::opt(character::digit1),
),
|(lower, upper): (Input, Option<_>)| -> BoundResult<_> {
let lower = lower.parse::<usize>()?;
let upper =
upper.map(|upper| upper.parse::<usize>()).transpose()?;
Ok((lower, upper))
},
)
.context("range"),
combinator::map_res(character::digit1, |n: Input| -> BoundResult<_> {
let n = n.parse::<usize>()?;
Ok((n, Some(n)))
})
.context("converged"),
combinator::success((1, None)),
)),
),
combinator::success((0, None)),
))(input)
}
combinator::map(
sequence::delimited(
supreme::tag("<"),
sequence::tuple((
glob(move |input| {
combinator::peek(branch::alt((supreme::tag(":"), supreme::tag(">"))))(input)
})
.context("sub-glob"),
bounds.context("bounds"),
)),
supreme::tag(">"),
),
|(tokens, (lower, upper))| {
Repetition {
tokens,
lower,
upper,
}
.into()
},
)(input)
}
fn class(input: Input) -> ParseResult<TokenKind<Annotation>> {
fn archetypes(input: Input) -> ParseResult<Vec<Archetype>> {
let escaped_character = |input| {
branch::alt((
character::none_of("[]-\\"),
branch::alt((
combinator::value('[', supreme::tag("\\[")),
combinator::value(']', supreme::tag("\\]")),
combinator::value('-', supreme::tag("\\-")),
)),
))(input)
};
multi::many1(branch::alt((
combinator::map(
sequence::separated_pair(
escaped_character,
supreme::tag("-"),
escaped_character,
),
Archetype::from,
),
combinator::map(escaped_character, Archetype::from),
)))(input)
}
combinator::map(
sequence::delimited(
supreme::tag("["),
sequence::tuple((combinator::opt(supreme::tag("!")), archetypes)),
supreme::tag("]"),
),
|(negation, archetypes)| {
Class {
is_negated: negation.is_some(),
archetypes,
}
.into()
},
)(input)
}
fn alternative(input: Input) -> ParseResult<TokenKind<Annotation>> {
sequence::preceded(
supreme::tag("{"),
combinator::map(
supreme::many1(
glob(move |input| {
combinator::peek(branch::alt((supreme::tag(","), supreme::tag("}"))))(input)
})
.context("sub-glob"),
supreme::tag(","),
supreme::tag("}"),
),
|alternatives: Vec<Vec<_>>| Alternative::from(alternatives).into(),
),
)(input)
}
fn glob<'i>(
terminator: impl 'i + Clone + Parser<Input<'i>, Input<'i>, ErrorTree<'i>>,
) -> impl Parser<Input<'i>, Vec<Token<'i, Annotation>>, ErrorTree<'i>> {
#[cfg(any(feature = "diagnostics-inspect", feature = "diagnostics-report"))]
fn annotate<'i, F>(
parser: F,
) -> impl FnMut(Input<'i>) -> ParseResult<'i, Token<'i, Annotation>>
where
F: 'i + Parser<Input<'i>, TokenKind<'i, Annotation>, ErrorTree<'i>>,
{
combinator::map(pori::span(parser), |(span, kind)| Token::new(kind, span))
}
#[cfg(all(
not(feature = "diagnostics-inspect"),
not(feature = "diagnostics-report"),
))]
fn annotate<'i, F>(
parser: F,
) -> impl FnMut(Input<'i>) -> ParseResult<'i, Token<'i, Annotation>>
where
F: 'i + Parser<Input<'i>, TokenKind<'i, Annotation>, ErrorTree<'i>>,
{
combinator::map(parser, |kind| Token::new(kind, ()))
}
move |mut input: Input<'i>| {
input.state.subexpression = input.location();
supreme::many1(
branch::alt((
annotate(sequence::preceded(flags_with_state, literal)).context("literal"),
annotate(sequence::preceded(flags_with_state, repetition))
.context("repetition"),
annotate(sequence::preceded(flags_with_state, alternative))
.context("alternative"),
annotate(sequence::preceded(
flags_with_state,
wildcard(terminator.clone()),
))
.context("wildcard"),
annotate(sequence::preceded(flags_with_state, class)).context("class"),
annotate(sequence::preceded(flags_with_state, separator)).context("separator"),
)),
identity,
terminator.clone(),
)
.parse(input)
}
}
if expression.is_empty() {
Ok(Tokenized {
expression: expression.into(),
tokens: vec![],
})
}
else {
let input = Input::new(Expression::from(expression), ParserState::default());
let tokens = combinator::all_consuming(glob(combinator::eof))(input)
.map(|(_, tokens)| tokens)
.map_err(|error| ParseError::new(expression, error))?;
Ok(Tokenized {
expression: expression.into(),
tokens,
})
}
}
| 35.484894 | 100 | 0.471031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.