hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
d6e50209f72ddf73471091869bd6d72ea42a31d6 | 5,103 | //! Error Reporting for Anonymous Region Lifetime Errors
//! where both the regions are anonymous.
use crate::infer::error_reporting::nice_region_error::util::AnonymousParamInfo;
use crate::infer::error_reporting::nice_region_error::NiceRegionError;
use crate::util::common::ErrorReported;
use rustc_error_codes::*;
use rustc_errors::struct_span_err;
impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
/// Print the error message for lifetime errors when both the concerned regions are anonymous.
///
/// Consider a case where we have
///
/// ```no_run
/// fn foo(x: &mut Vec<&u8>, y: &u8) {
/// x.push(y);
/// }
/// ```
///
/// The example gives
///
/// ```text
/// fn foo(x: &mut Vec<&u8>, y: &u8) {
/// --- --- these references are declared with different lifetimes...
/// x.push(y);
/// ^ ...but data from `y` flows into `x` here
/// ```
///
/// It has been extended for the case of structs too.
///
/// Consider the example
///
/// ```no_run
/// struct Ref<'a> { x: &'a u32 }
/// ```
///
/// ```text
/// fn foo(mut x: Vec<Ref>, y: Ref) {
/// --- --- these structs are declared with different lifetimes...
/// x.push(y);
/// ^ ...but data from `y` flows into `x` here
/// }
/// ```
///
/// It will later be extended to trait objects.
pub(super) fn try_report_anon_anon_conflict(&self) -> Option<ErrorReported> {
let (span, sub, sup) = self.regions();
// Determine whether the sub and sup consist of both anonymous (elided) regions.
let anon_reg_sup = self.tcx().is_suitable_region(sup)?;
let anon_reg_sub = self.tcx().is_suitable_region(sub)?;
let scope_def_id_sup = anon_reg_sup.def_id;
let bregion_sup = anon_reg_sup.boundregion;
let scope_def_id_sub = anon_reg_sub.def_id;
let bregion_sub = anon_reg_sub.boundregion;
let ty_sup = self.find_anon_type(sup, &bregion_sup)?;
let ty_sub = self.find_anon_type(sub, &bregion_sub)?;
debug!(
"try_report_anon_anon_conflict: found_param1={:?} sup={:?} br1={:?}",
ty_sub, sup, bregion_sup
);
debug!(
"try_report_anon_anon_conflict: found_param2={:?} sub={:?} br2={:?}",
ty_sup, sub, bregion_sub
);
let (ty_sup, ty_fndecl_sup) = ty_sup;
let (ty_sub, ty_fndecl_sub) = ty_sub;
let AnonymousParamInfo { param: anon_param_sup, .. } =
self.find_param_with_region(sup, sup)?;
let AnonymousParamInfo { param: anon_param_sub, .. } =
self.find_param_with_region(sub, sub)?;
let sup_is_ret_type =
self.is_return_type_anon(scope_def_id_sup, bregion_sup, ty_fndecl_sup);
let sub_is_ret_type =
self.is_return_type_anon(scope_def_id_sub, bregion_sub, ty_fndecl_sub);
let span_label_var1 = match anon_param_sup.pat.simple_ident() {
Some(simple_ident) => format!(" from `{}`", simple_ident),
None => String::new(),
};
let span_label_var2 = match anon_param_sub.pat.simple_ident() {
Some(simple_ident) => format!(" into `{}`", simple_ident),
None => String::new(),
};
let (span_1, span_2, main_label, span_label) = match (sup_is_ret_type, sub_is_ret_type) {
(None, None) => {
let (main_label_1, span_label_1) = if ty_sup.hir_id == ty_sub.hir_id {
(
"this type is declared with multiple lifetimes...".to_owned(),
"...but data with one lifetime flows into the other here".to_owned(),
)
} else {
(
"these two types are declared with different lifetimes...".to_owned(),
format!("...but data{} flows{} here", span_label_var1, span_label_var2),
)
};
(ty_sup.span, ty_sub.span, main_label_1, span_label_1)
}
(Some(ret_span), _) => (
ty_sub.span,
ret_span,
"this parameter and the return type are declared \
with different lifetimes..."
.to_owned(),
format!("...but data{} is returned here", span_label_var1),
),
(_, Some(ret_span)) => (
ty_sup.span,
ret_span,
"this parameter and the return type are declared \
with different lifetimes..."
.to_owned(),
format!("...but data{} is returned here", span_label_var1),
),
};
struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch")
.span_label(span_1, main_label)
.span_label(span_2, String::new())
.span_label(span, span_label)
.emit();
return Some(ErrorReported);
}
}
| 36.978261 | 98 | 0.542622 |
e52c72301096b18b889348b1f2b31d68c60e4e78 | 7,185 | use super::*;
use crate::support::int;
use crate::support::CxxVTable;
use crate::support::FieldOffset;
use crate::support::Opaque;
use crate::support::RustVTable;
use crate::support::UniquePtr;
// class Channel {
// public:
// virtual ~Channel() = default;
// virtual void sendResponse(int callId,
// std::unique_ptr<StringBuffer> message) = 0;
// virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;
// virtual void flushProtocolNotifications() = 0;
// };
extern "C" {
fn v8_inspector__V8Inspector__Channel__BASE__CONSTRUCT(
buf: &mut std::mem::MaybeUninit<Channel>,
) -> ();
fn v8_inspector__V8Inspector__Channel__sendResponse(
this: &mut Channel,
call_id: int,
message: UniquePtr<StringBuffer>,
) -> ();
fn v8_inspector__V8Inspector__Channel__sendNotification(
this: &mut Channel,
message: UniquePtr<StringBuffer>,
) -> ();
fn v8_inspector__V8Inspector__Channel__flushProtocolNotifications(
this: &mut Channel,
) -> ();
}
#[no_mangle]
pub unsafe extern "C" fn v8_inspector__V8Inspector__Channel__BASE__sendResponse(
this: &mut Channel,
call_id: int,
message: UniquePtr<StringBuffer>,
) {
ChannelBase::dispatch_mut(this).send_response(call_id, message)
}
#[no_mangle]
pub unsafe extern "C" fn v8_inspector__V8Inspector__Channel__BASE__sendNotification(
this: &mut Channel,
message: UniquePtr<StringBuffer>,
) {
ChannelBase::dispatch_mut(this).send_notification(message)
}
#[no_mangle]
pub unsafe extern "C" fn v8_inspector__V8Inspector__Channel__BASE__flushProtocolNotifications(
this: &mut Channel,
) {
ChannelBase::dispatch_mut(this).flush_protocol_notifications()
}
#[repr(C)]
pub struct Channel {
_cxx_vtable: CxxVTable,
}
impl Channel {
pub fn send_response(
&mut self,
call_id: int,
message: UniquePtr<StringBuffer>,
) {
unsafe {
v8_inspector__V8Inspector__Channel__sendResponse(self, call_id, message)
}
}
pub fn send_notification(&mut self, message: UniquePtr<StringBuffer>) {
unsafe {
v8_inspector__V8Inspector__Channel__sendNotification(self, message)
}
}
pub fn flush_protocol_notifications(&mut self) {
unsafe {
v8_inspector__V8Inspector__Channel__flushProtocolNotifications(self)
}
}
}
pub trait AsChannel {
fn as_channel(&self) -> &Channel;
fn as_channel_mut(&mut self) -> &mut Channel;
}
impl AsChannel for Channel {
fn as_channel(&self) -> &Channel {
self
}
fn as_channel_mut(&mut self) -> &mut Channel {
self
}
}
impl<T> AsChannel for T
where
T: ChannelImpl,
{
fn as_channel(&self) -> &Channel {
&self.base().cxx_base
}
fn as_channel_mut(&mut self) -> &mut Channel {
&mut self.base_mut().cxx_base
}
}
pub trait ChannelImpl: AsChannel {
fn base(&self) -> &ChannelBase;
fn base_mut(&mut self) -> &mut ChannelBase;
fn send_response(
&mut self,
call_id: int,
message: UniquePtr<StringBuffer>,
) -> ();
fn send_notification(&mut self, message: UniquePtr<StringBuffer>) -> ();
fn flush_protocol_notifications(&mut self) -> ();
}
pub struct ChannelBase {
cxx_base: Channel,
offset_within_embedder: FieldOffset<Self>,
rust_vtable: RustVTable<&'static dyn ChannelImpl>,
}
impl ChannelBase {
fn construct_cxx_base() -> Channel {
unsafe {
let mut buf = std::mem::MaybeUninit::<Channel>::uninit();
v8_inspector__V8Inspector__Channel__BASE__CONSTRUCT(&mut buf);
buf.assume_init()
}
}
fn get_cxx_base_offset() -> FieldOffset<Channel> {
let buf = std::mem::MaybeUninit::<Self>::uninit();
FieldOffset::from_ptrs(buf.as_ptr(), unsafe { &(*buf.as_ptr()).cxx_base })
}
fn get_offset_within_embedder<T>() -> FieldOffset<Self>
where
T: ChannelImpl,
{
let buf = std::mem::MaybeUninit::<T>::uninit();
let embedder_ptr: *const T = buf.as_ptr();
let self_ptr: *const Self = unsafe { (*embedder_ptr).base() };
FieldOffset::from_ptrs(embedder_ptr, self_ptr)
}
fn get_rust_vtable<T>() -> RustVTable<&'static dyn ChannelImpl>
where
T: ChannelImpl,
{
let buf = std::mem::MaybeUninit::<T>::uninit();
let embedder_ptr = buf.as_ptr();
let trait_object: *const dyn ChannelImpl = embedder_ptr;
let (data_ptr, vtable): (*const T, RustVTable<_>) =
unsafe { std::mem::transmute(trait_object) };
assert_eq!(data_ptr, embedder_ptr);
vtable
}
pub fn new<T>() -> Self
where
T: ChannelImpl,
{
Self {
cxx_base: Self::construct_cxx_base(),
offset_within_embedder: Self::get_offset_within_embedder::<T>(),
rust_vtable: Self::get_rust_vtable::<T>(),
}
}
pub unsafe fn dispatch(channel: &Channel) -> &dyn ChannelImpl {
let this = Self::get_cxx_base_offset().to_embedder::<Self>(channel);
let embedder = this.offset_within_embedder.to_embedder::<Opaque>(this);
std::mem::transmute((embedder, this.rust_vtable))
}
pub unsafe fn dispatch_mut(channel: &mut Channel) -> &mut dyn ChannelImpl {
let this = Self::get_cxx_base_offset().to_embedder_mut::<Self>(channel);
let vtable = this.rust_vtable;
let embedder = this.offset_within_embedder.to_embedder_mut::<Opaque>(this);
std::mem::transmute((embedder, vtable))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::support::UniquePtr;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
static MESSAGE: &[u8] = b"Hello Pluto!";
static CALL_COUNT: AtomicUsize = AtomicUsize::new(0);
// Using repr(C) to preserve field ordering and test that everything works
// when the ChannelBase field is not the first element of the struct.
#[repr(C)]
pub struct TestChannel {
field1: i32,
base: ChannelBase,
field2: u64,
}
impl ChannelImpl for TestChannel {
fn base(&self) -> &ChannelBase {
&self.base
}
fn base_mut(&mut self) -> &mut ChannelBase {
&mut self.base
}
fn send_response(
&mut self,
call_id: i32,
mut message: UniquePtr<StringBuffer>,
) {
assert_eq!(call_id, 999);
assert_eq!(message.as_mut().unwrap().string().len(), MESSAGE.len());
self.log_call();
}
fn send_notification(&mut self, mut message: UniquePtr<StringBuffer>) {
assert_eq!(message.as_mut().unwrap().string().len(), MESSAGE.len());
self.log_call();
}
fn flush_protocol_notifications(&mut self) {
self.log_call()
}
}
impl TestChannel {
pub fn new() -> Self {
Self {
base: ChannelBase::new::<Self>(),
field1: -42,
field2: 420,
}
}
fn log_call(&self) {
assert_eq!(self.field1, -42);
assert_eq!(self.field2, 420);
CALL_COUNT.fetch_add(1, SeqCst);
}
}
#[test]
fn test_channel() {
let mut channel = TestChannel::new();
let msg_view = StringView::from(MESSAGE);
channel.send_response(999, StringBuffer::create(&msg_view));
assert_eq!(CALL_COUNT.swap(0, SeqCst), 1);
channel.send_notification(StringBuffer::create(&msg_view));
assert_eq!(CALL_COUNT.swap(0, SeqCst), 1);
channel.flush_protocol_notifications();
assert_eq!(CALL_COUNT.swap(0, SeqCst), 1);
}
}
| 26.910112 | 94 | 0.672373 |
f9bb51861b797415db2e6b077d1a0195ef58e7d0 | 40,972 | // This file is dual licensed under the terms of the Apache License, Version
// 2.0, and the BSD License. See the LICENSE file in the root of this repository
// for complete details.
use crate::asn1::{big_asn1_uint_to_py, AttributeTypeValue, Name, PyAsn1Error};
use chrono::{Datelike, Timelike};
use pyo3::conversion::ToPyObject;
use pyo3::types::IntoPyDict;
use std::collections::hash_map::DefaultHasher;
use std::collections::HashSet;
use std::convert::TryInto;
use std::hash::{Hash, Hasher};
lazy_static::lazy_static! {
static ref TLS_FEATURE_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.5.5.7.1.24").unwrap();
static ref PRECERT_POISON_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.4.1.11129.2.4.3").unwrap();
static ref OCSP_NO_CHECK_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.5.5.7.48.1.5").unwrap();
static ref AUTHORITY_INFORMATION_ACCESS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.5.5.7.1.1").unwrap();
static ref SUBJECT_INFORMATION_ACCESS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.5.5.7.1.11").unwrap();
static ref KEY_USAGE_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.15").unwrap();
static ref POLICY_CONSTRAINTS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.36").unwrap();
static ref AUTHORITY_KEY_IDENTIFIER_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.35").unwrap();
static ref EXTENDED_KEY_USAGE_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.37").unwrap();
static ref BASIC_CONSTRAINTS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.19").unwrap();
static ref SUBJECT_KEY_IDENTIFIER_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.14").unwrap();
static ref INHIBIT_ANY_POLICY_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.54").unwrap();
static ref CRL_REASON_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.21").unwrap();
static ref ISSUING_DISTRIBUTION_POINT_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.28").unwrap();
static ref CERTIFICATE_ISSUER_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.29").unwrap();
static ref NAME_CONSTRAINTS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.30").unwrap();
static ref CRL_DISTRIBUTION_POINTS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.31").unwrap();
static ref CERTIFICATE_POLICIES_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.32").unwrap();
static ref FRESHEST_CRL_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.46").unwrap();
static ref CRL_NUMBER_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.20").unwrap();
static ref INVALIDITY_DATE_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.24").unwrap();
static ref DELTA_CRL_INDICATOR_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.27").unwrap();
static ref SUBJECT_ALTERNATIVE_NAME_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.17").unwrap();
static ref ISSUER_ALTERNATIVE_NAME_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("2.5.29.18").unwrap();
static ref PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.4.1.11129.2.4.2").unwrap();
static ref CP_CPS_URI_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.5.5.7.2.1").unwrap();
static ref CP_USER_NOTICE_OID: asn1::ObjectIdentifier<'static> = asn1::ObjectIdentifier::from_string("1.3.6.1.5.5.7.2.2").unwrap();
}
pub(crate) fn parse_and_cache_extensions<
'p,
F: Fn(&asn1::ObjectIdentifier<'_>, &[u8]) -> Result<Option<&'p pyo3::PyAny>, PyAsn1Error>,
>(
py: pyo3::Python<'p>,
cached_extensions: &mut Option<pyo3::PyObject>,
raw_exts: &Option<Extensions<'_>>,
parse_ext: F,
) -> pyo3::PyResult<pyo3::PyObject> {
if let Some(cached) = cached_extensions {
return Ok(cached.clone_ref(py));
}
let x509_module = py.import("cryptography.x509")?;
let exts = pyo3::types::PyList::empty(py);
let mut seen_oids = HashSet::new();
if let Some(raw_exts) = raw_exts {
for raw_ext in raw_exts.clone() {
let oid_obj =
x509_module.call_method1("ObjectIdentifier", (raw_ext.extn_id.to_string(),))?;
if seen_oids.contains(&raw_ext.extn_id) {
return Err(pyo3::PyErr::from_instance(x509_module.call_method1(
"DuplicateExtension",
(
format!("Duplicate {} extension found", raw_ext.extn_id),
oid_obj,
),
)?));
}
let extn_value = match parse_ext(&raw_ext.extn_id, raw_ext.extn_value)? {
Some(e) => e,
None => x509_module
.call_method1("UnrecognizedExtension", (oid_obj, raw_ext.extn_value))?,
};
let ext_obj =
x509_module.call_method1("Extension", (oid_obj, raw_ext.critical, extn_value))?;
exts.append(ext_obj)?;
seen_oids.insert(raw_ext.extn_id);
}
}
let extensions = x509_module
.call_method1("Extensions", (exts,))?
.to_object(py);
*cached_extensions = Some(extensions.clone_ref(py));
Ok(extensions)
}
pub(crate) type Extensions<'a> = asn1::SequenceOf<'a, Extension<'a>>;
#[derive(asn1::Asn1Read, asn1::Asn1Write)]
pub(crate) struct AlgorithmIdentifier<'a> {
pub(crate) oid: asn1::ObjectIdentifier<'a>,
pub(crate) _params: Option<asn1::Tlv<'a>>,
}
#[derive(asn1::Asn1Read, asn1::Asn1Write)]
pub(crate) struct Extension<'a> {
pub(crate) extn_id: asn1::ObjectIdentifier<'a>,
#[default(false)]
pub(crate) critical: bool,
pub(crate) extn_value: &'a [u8],
}
#[derive(asn1::Asn1Read)]
struct PolicyInformation<'a> {
policy_identifier: asn1::ObjectIdentifier<'a>,
policy_qualifiers: Option<asn1::SequenceOf<'a, PolicyQualifierInfo<'a>>>,
}
#[derive(asn1::Asn1Read)]
struct PolicyQualifierInfo<'a> {
policy_qualifier_id: asn1::ObjectIdentifier<'a>,
qualifier: Qualifier<'a>,
}
#[derive(asn1::Asn1Read)]
enum Qualifier<'a> {
CpsUri(asn1::IA5String<'a>),
UserNotice(UserNotice<'a>),
}
#[derive(asn1::Asn1Read)]
struct UserNotice<'a> {
notice_ref: Option<NoticeReference<'a>>,
explicit_text: Option<DisplayText<'a>>,
}
#[derive(asn1::Asn1Read)]
struct NoticeReference<'a> {
organization: DisplayText<'a>,
notice_numbers: asn1::SequenceOf<'a, asn1::BigUint<'a>>,
}
// DisplayText also allows BMPString, which we currently do not support.
#[allow(clippy::enum_variant_names)]
#[derive(asn1::Asn1Read)]
enum DisplayText<'a> {
IA5String(asn1::IA5String<'a>),
Utf8String(asn1::Utf8String<'a>),
VisibleString(asn1::VisibleString<'a>),
}
fn parse_display_text(py: pyo3::Python<'_>, text: DisplayText<'_>) -> pyo3::PyObject {
match text {
DisplayText::IA5String(o) => pyo3::types::PyString::new(py, o.as_str()).to_object(py),
DisplayText::Utf8String(o) => pyo3::types::PyString::new(py, o.as_str()).to_object(py),
DisplayText::VisibleString(o) => pyo3::types::PyString::new(py, o.as_str()).to_object(py),
}
}
fn parse_user_notice(
py: pyo3::Python<'_>,
un: UserNotice<'_>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let x509_module = py.import("cryptography.x509")?;
let et = match un.explicit_text {
Some(data) => parse_display_text(py, data),
None => py.None(),
};
let nr = match un.notice_ref {
Some(data) => {
let org = parse_display_text(py, data.organization);
let numbers = pyo3::types::PyList::empty(py);
for num in data.notice_numbers {
numbers.append(big_asn1_uint_to_py(py, num)?.to_object(py))?;
}
x509_module
.call_method1("NoticeReference", (org, numbers))?
.to_object(py)
}
None => py.None(),
};
Ok(x509_module
.call_method1("UserNotice", (nr, et))?
.to_object(py))
}
fn parse_policy_qualifiers<'a>(
py: pyo3::Python<'_>,
policy_qualifiers: asn1::SequenceOf<'a, PolicyQualifierInfo<'a>>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let py_pq = pyo3::types::PyList::empty(py);
for pqi in policy_qualifiers {
let qualifier = match pqi.qualifier {
Qualifier::CpsUri(data) => {
if pqi.policy_qualifier_id == *CP_CPS_URI_OID {
pyo3::types::PyString::new(py, data.as_str()).to_object(py)
} else {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
"CpsUri ASN.1 structure found but OID did not match",
)));
}
}
Qualifier::UserNotice(un) => {
if pqi.policy_qualifier_id != *CP_USER_NOTICE_OID {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
"UserNotice ASN.1 structure found but OID did not match",
)));
}
parse_user_notice(py, un)?
}
};
py_pq.append(qualifier)?;
}
Ok(py_pq.to_object(py))
}
fn parse_cp(py: pyo3::Python<'_>, ext_data: &[u8]) -> Result<pyo3::PyObject, PyAsn1Error> {
let cp = asn1::parse_single::<asn1::SequenceOf<'_, PolicyInformation<'_>>>(ext_data)?;
let x509_module = py.import("cryptography.x509")?;
let certificate_policies = pyo3::types::PyList::empty(py);
for policyinfo in cp {
let pi_oid = x509_module
.call_method1(
"ObjectIdentifier",
(policyinfo.policy_identifier.to_string(),),
)?
.to_object(py);
let py_pqis = match policyinfo.policy_qualifiers {
Some(policy_qualifiers) => parse_policy_qualifiers(py, policy_qualifiers)?,
None => py.None(),
};
let pi = x509_module
.call_method1("PolicyInformation", (pi_oid, py_pqis))?
.to_object(py);
certificate_policies.append(pi)?;
}
Ok(certificate_policies.to_object(py))
}
fn chrono_to_py<'p>(
py: pyo3::Python<'p>,
dt: &chrono::DateTime<chrono::Utc>,
) -> pyo3::PyResult<&'p pyo3::PyAny> {
let datetime_module = py.import("datetime")?;
datetime_module.call1(
"datetime",
(
dt.year(),
dt.month(),
dt.day(),
dt.hour(),
dt.minute(),
dt.second(),
),
)
}
struct UnvalidatedIA5String<'a>(&'a str);
impl<'a> asn1::SimpleAsn1Readable<'a> for UnvalidatedIA5String<'a> {
const TAG: u8 = 0x16;
fn parse_data(data: &'a [u8]) -> asn1::ParseResult<Self> {
Ok(UnvalidatedIA5String(
std::str::from_utf8(data).map_err(|_| asn1::ParseError::InvalidValue)?,
))
}
}
#[derive(asn1::Asn1Read)]
struct NameConstraints<'a> {
#[implicit(0)]
permitted_subtrees: Option<asn1::SequenceOf<'a, GeneralSubtree<'a>>>,
#[implicit(1)]
excluded_subtrees: Option<asn1::SequenceOf<'a, GeneralSubtree<'a>>>,
}
#[derive(asn1::Asn1Read)]
struct GeneralSubtree<'a> {
base: GeneralName<'a>,
#[implicit(0)]
#[default(0u64)]
_minimum: u64,
#[implicit(1)]
_maximum: Option<u64>,
}
fn parse_general_subtrees<'a>(
py: pyo3::Python<'_>,
subtrees: asn1::SequenceOf<'a, GeneralSubtree<'a>>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let gns = pyo3::types::PyList::empty(py);
for gs in subtrees {
gns.append(parse_general_name(py, gs.base)?)?;
}
Ok(gns.to_object(py))
}
#[derive(asn1::Asn1Read)]
struct IssuingDistributionPoint<'a> {
#[explicit(0)]
distribution_point: Option<DistributionPointName<'a>>,
#[implicit(1)]
#[default(false)]
only_contains_user_certs: bool,
#[implicit(2)]
#[default(false)]
only_contains_ca_certs: bool,
#[implicit(3)]
only_some_reasons: Option<asn1::BitString<'a>>,
#[implicit(4)]
#[default(false)]
indirect_crl: bool,
#[implicit(5)]
#[default(false)]
only_contains_attribute_certs: bool,
}
#[derive(asn1::Asn1Read)]
struct DistributionPoint<'a> {
#[explicit(0)]
distribution_point: Option<DistributionPointName<'a>>,
#[implicit(1)]
reasons: Option<asn1::BitString<'a>>,
#[implicit(2)]
crl_issuer: Option<asn1::SequenceOf<'a, GeneralName<'a>>>,
}
#[derive(asn1::Asn1Read)]
enum DistributionPointName<'a> {
#[implicit(0)]
FullName(asn1::SequenceOf<'a, GeneralName<'a>>),
#[implicit(1)]
NameRelativeToCRLIssuer(asn1::SetOf<'a, AttributeTypeValue<'a>>),
}
#[derive(asn1::Asn1Read)]
struct AuthorityKeyIdentifier<'a> {
#[implicit(0)]
key_identifier: Option<&'a [u8]>,
#[implicit(1)]
authority_cert_issuer: Option<asn1::SequenceOf<'a, GeneralName<'a>>>,
#[implicit(2)]
authority_cert_serial_number: Option<asn1::BigUint<'a>>,
}
fn parse_distribution_point_name(
py: pyo3::Python<'_>,
dp: DistributionPointName<'_>,
) -> Result<(pyo3::PyObject, pyo3::PyObject), PyAsn1Error> {
Ok(match dp {
DistributionPointName::FullName(data) => (parse_general_names(py, data)?, py.None()),
DistributionPointName::NameRelativeToCRLIssuer(data) => (py.None(), parse_rdn(py, data)?),
})
}
fn parse_distribution_point(
py: pyo3::Python<'_>,
dp: DistributionPoint<'_>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let (full_name, relative_name) = match dp.distribution_point {
Some(data) => parse_distribution_point_name(py, data)?,
None => (py.None(), py.None()),
};
let reasons = parse_distribution_point_reasons(py, dp.reasons)?;
let crl_issuer = match dp.crl_issuer {
Some(aci) => parse_general_names(py, aci)?,
None => py.None(),
};
let x509_module = py.import("cryptography.x509")?;
Ok(x509_module
.call1(
"DistributionPoint",
(full_name, relative_name, reasons, crl_issuer),
)?
.to_object(py))
}
fn parse_distribution_points(
py: pyo3::Python<'_>,
data: &[u8],
) -> Result<pyo3::PyObject, PyAsn1Error> {
let dps = asn1::parse_single::<asn1::SequenceOf<'_, DistributionPoint<'_>>>(data)?;
let py_dps = pyo3::types::PyList::empty(py);
for dp in dps {
let py_dp = parse_distribution_point(py, dp)?;
py_dps.append(py_dp)?;
}
Ok(py_dps.to_object(py))
}
fn parse_distribution_point_reasons(
py: pyo3::Python<'_>,
reasons: Option<asn1::BitString<'_>>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let reason_bit_mapping = py
.import("cryptography.x509.extensions")?
.getattr("_REASON_BIT_MAPPING")?;
Ok(match reasons {
Some(bs) => {
let mut vec = Vec::new();
for i in 1..=8 {
if bs.has_bit_set(i) {
vec.push(reason_bit_mapping.get_item(i)?);
}
}
pyo3::types::PyFrozenSet::new(py, &vec)?.to_object(py)
}
None => py.None(),
})
}
#[derive(asn1::Asn1Read)]
enum GeneralName<'a> {
#[implicit(0)]
OtherName(AttributeTypeValue<'a>),
#[implicit(1)]
RFC822Name(UnvalidatedIA5String<'a>),
#[implicit(2)]
DNSName(UnvalidatedIA5String<'a>),
#[implicit(3)]
// unsupported
X400Address(asn1::Sequence<'a>),
// Name is explicit per RFC 5280 Appendix A.1.
#[explicit(4)]
DirectoryName(Name<'a>),
#[implicit(5)]
// unsupported
EDIPartyName(asn1::Sequence<'a>),
#[implicit(6)]
UniformResourceIdentifier(UnvalidatedIA5String<'a>),
#[implicit(7)]
IPAddress(&'a [u8]),
#[implicit(8)]
RegisteredID(asn1::ObjectIdentifier<'a>),
}
#[derive(asn1::Asn1Read)]
struct BasicConstraints {
#[default(false)]
ca: bool,
path_length: Option<u64>,
}
#[derive(asn1::Asn1Read)]
struct PolicyConstraints {
#[implicit(0)]
require_explicit_policy: Option<u64>,
#[implicit(1)]
inhibit_policy_mapping: Option<u64>,
}
#[derive(asn1::Asn1Read)]
struct AccessDescription<'a> {
access_method: asn1::ObjectIdentifier<'a>,
access_location: GeneralName<'a>,
}
fn parse_authority_key_identifier<'p>(
py: pyo3::Python<'p>,
ext_data: &[u8],
) -> Result<&'p pyo3::PyAny, PyAsn1Error> {
let x509_module = py.import("cryptography.x509")?;
let aki = asn1::parse_single::<AuthorityKeyIdentifier<'_>>(ext_data)?;
let serial = match aki.authority_cert_serial_number {
Some(biguint) => big_asn1_uint_to_py(py, biguint)?.to_object(py),
None => py.None(),
};
let issuer = match aki.authority_cert_issuer {
Some(aci) => parse_general_names(py, aci)?,
None => py.None(),
};
Ok(x509_module.call1(
"AuthorityKeyIdentifier",
(aki.key_identifier, issuer, serial),
)?)
}
fn parse_name_attribute(
py: pyo3::Python<'_>,
attribute: AttributeTypeValue<'_>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let x509_module = py.import("cryptography.x509")?;
let oid = x509_module
.call_method1("ObjectIdentifier", (attribute.type_id.to_string(),))?
.to_object(py);
let tag_enum = py
.import("cryptography.x509.name")?
.getattr("_ASN1_TYPE_TO_ENUM")?;
let py_tag = tag_enum.get_item(attribute.value.tag().to_object(py))?;
let py_data =
std::str::from_utf8(attribute.value.data()).map_err(|_| asn1::ParseError::InvalidValue)?;
Ok(x509_module
.call_method1("NameAttribute", (oid, py_data, py_tag))?
.to_object(py))
}
fn parse_rdn<'a>(
py: pyo3::Python<'_>,
rdn: asn1::SetOf<'a, AttributeTypeValue<'a>>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let x509_module = py.import("cryptography.x509")?;
let py_attrs = pyo3::types::PySet::empty(py)?;
for attribute in rdn {
let na = parse_name_attribute(py, attribute)?;
py_attrs.add(na)?;
}
Ok(x509_module
.call_method1("RelativeDistinguishedName", (py_attrs,))?
.to_object(py))
}
fn parse_name<'p>(py: pyo3::Python<'p>, name: &Name<'_>) -> pyo3::PyResult<&'p pyo3::PyAny> {
let x509_module = py.import("cryptography.x509")?;
let py_rdns = pyo3::types::PyList::empty(py);
for rdn in name.clone() {
let py_rdn = parse_rdn(py, rdn)?;
py_rdns.append(py_rdn)?;
}
x509_module.call_method1("Name", (py_rdns,))
}
fn ipv4_netmask(num: u32) -> Result<u32, PyAsn1Error> {
// we invert and check leading zeros because leading_ones wasn't stabilized
// until 1.46.0. When we raise our MSRV we should change this
if (!num).leading_zeros() + num.trailing_zeros() != 32 {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
"Invalid netmask",
)));
}
Ok((!num).leading_zeros())
}
fn ipv6_netmask(num: u128) -> Result<u32, PyAsn1Error> {
// we invert and check leading zeros because leading_ones wasn't stabilized
// until 1.46.0. When we raise our MSRV we should change this
if (!num).leading_zeros() + num.trailing_zeros() != 128 {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
"Invalid netmask",
)));
}
Ok((!num).leading_zeros())
}
fn create_ip_network(py: pyo3::Python<'_>, data: &[u8]) -> Result<pyo3::PyObject, PyAsn1Error> {
let ip_module = py.import("ipaddress")?;
let x509_module = py.import("cryptography.x509")?;
let prefix = match data.len() {
8 => {
let num = u32::from_be_bytes(data[4..].try_into().unwrap());
ipv4_netmask(num)
}
32 => {
let num = u128::from_be_bytes(data[16..].try_into().unwrap());
ipv6_netmask(num)
}
_ => Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
format!("Invalid IPNetwork, must be 8 bytes for IPv4 and 32 bytes for IPv6. Found length: {}", data.len()),
))),
};
let base = ip_module.call_method1(
"ip_address",
(pyo3::types::PyBytes::new(py, &data[..data.len() / 2]),),
)?;
let net = format!(
"{}/{}",
base.getattr("exploded")?.extract::<&str>()?,
prefix?
);
let addr = ip_module.call_method1("ip_network", (net,))?.to_object(py);
Ok(x509_module
.call_method1("IPAddress", (addr,))?
.to_object(py))
}
fn parse_general_name(
py: pyo3::Python<'_>,
gn: GeneralName<'_>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let x509_module = py.import("cryptography.x509")?;
let py_gn = match gn {
GeneralName::OtherName(data) => {
let oid = x509_module
.call_method1("ObjectIdentifier", (data.type_id.to_string(),))?
.to_object(py);
x509_module
.call_method1("OtherName", (oid, data.value.data()))?
.to_object(py)
}
GeneralName::RFC822Name(data) => x509_module
.getattr("RFC822Name")?
.call_method1("_init_without_validation", (data.0,))?
.to_object(py),
GeneralName::DNSName(data) => x509_module
.getattr("DNSName")?
.call_method1("_init_without_validation", (data.0,))?
.to_object(py),
GeneralName::DirectoryName(data) => {
let py_name = parse_name(py, &data)?;
x509_module
.call_method1("DirectoryName", (py_name,))?
.to_object(py)
}
GeneralName::UniformResourceIdentifier(data) => x509_module
.getattr("UniformResourceIdentifier")?
.call_method1("_init_without_validation", (data.0,))?
.to_object(py),
GeneralName::IPAddress(data) => {
let ip_module = py.import("ipaddress")?;
if data.len() == 4 || data.len() == 16 {
let addr = ip_module.call_method1("ip_address", (data,))?.to_object(py);
x509_module
.call_method1("IPAddress", (addr,))?
.to_object(py)
} else {
// if it's not an IPv4 or IPv6 we assume it's an IPNetwork and
// verify length in this function.
create_ip_network(py, data)?
}
}
GeneralName::RegisteredID(data) => {
let oid = x509_module
.call_method1("ObjectIdentifier", (data.to_string(),))?
.to_object(py);
x509_module
.call_method1("RegisteredID", (oid,))?
.to_object(py)
}
_ => {
return Err(PyAsn1Error::from(pyo3::PyErr::from_instance(
x509_module.call_method1(
"UnsupportedGeneralNameType",
("x400Address/EDIPartyName are not supported types",),
)?,
)))
}
};
Ok(py_gn)
}
fn parse_general_names<'a>(
py: pyo3::Python<'_>,
gn_seq: asn1::SequenceOf<'a, GeneralName<'a>>,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let gns = pyo3::types::PyList::empty(py);
for gn in gn_seq {
let py_gn = parse_general_name(py, gn)?;
gns.append(py_gn)?;
}
Ok(gns.to_object(py))
}
fn parse_access_descriptions(
py: pyo3::Python<'_>,
ext_data: &[u8],
) -> Result<pyo3::PyObject, PyAsn1Error> {
let x509_module = py.import("cryptography.x509")?;
let ads = pyo3::types::PyList::empty(py);
for access in asn1::parse_single::<asn1::SequenceOf<'_, AccessDescription<'_>>>(ext_data)? {
let py_oid = x509_module
.call_method1("ObjectIdentifier", (access.access_method.to_string(),))?
.to_object(py);
let gn = parse_general_name(py, access.access_location)?;
let ad = x509_module
.call1("AccessDescription", (py_oid, gn))?
.to_object(py);
ads.append(ad)?;
}
Ok(ads.to_object(py))
}
struct TLSReader<'a> {
data: &'a [u8],
}
impl<'a> TLSReader<'a> {
fn new(data: &'a [u8]) -> TLSReader<'a> {
TLSReader { data }
}
fn is_empty(&self) -> bool {
self.data.is_empty()
}
fn read_byte(&mut self) -> Result<u8, PyAsn1Error> {
Ok(self.read_exact(1)?[0])
}
fn read_exact(&mut self, length: usize) -> Result<&'a [u8], PyAsn1Error> {
if length > self.data.len() {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
"Invalid SCT length",
)));
}
let (result, data) = self.data.split_at(length);
self.data = data;
Ok(result)
}
fn read_length_prefixed(&mut self) -> Result<TLSReader<'a>, PyAsn1Error> {
let length = u16::from_be_bytes(self.read_exact(2)?.try_into().unwrap());
Ok(TLSReader::new(self.read_exact(length.into())?))
}
}
#[derive(Clone)]
pub(crate) enum LogEntryType {
Certificate,
PreCertificate,
}
#[pyo3::prelude::pyclass]
struct Sct {
log_id: [u8; 32],
timestamp: u64,
entry_type: LogEntryType,
sct_data: Vec<u8>,
}
#[pyo3::prelude::pymethods]
impl Sct {
#[getter]
fn version<'p>(&self, py: pyo3::Python<'p>) -> pyo3::PyResult<&'p pyo3::PyAny> {
py.import("cryptography.x509.certificate_transparency")?
.getattr("Version")?
.getattr("v1")
}
#[getter]
fn log_id(&self) -> &[u8] {
&self.log_id
}
#[getter]
fn timestamp<'p>(&self, py: pyo3::Python<'p>) -> pyo3::PyResult<&'p pyo3::PyAny> {
let datetime_class = py.import("datetime")?.getattr("datetime")?;
datetime_class
.call_method1("utcfromtimestamp", (self.timestamp / 1000,))?
.call_method(
"replace",
(),
Some(vec![("microsecond", self.timestamp % 1000 * 1000)].into_py_dict(py)),
)
}
#[getter]
fn entry_type<'p>(&self, py: pyo3::Python<'p>) -> pyo3::PyResult<&'p pyo3::PyAny> {
let et_class = py
.import("cryptography.x509.certificate_transparency")?
.getattr("LogEntryType")?;
let attr_name = match self.entry_type {
LogEntryType::Certificate => "X509_CERTIFICATE",
LogEntryType::PreCertificate => "PRE_CERTIFICATE",
};
et_class.getattr(attr_name)
}
}
#[pyo3::prelude::pyproto]
impl pyo3::class::basic::PyObjectProtocol for Sct {
fn __richcmp__(
&self,
other: pyo3::pycell::PyRef<Sct>,
op: pyo3::class::basic::CompareOp,
) -> pyo3::PyResult<bool> {
match op {
pyo3::class::basic::CompareOp::Eq => Ok(self.sct_data == other.sct_data),
pyo3::class::basic::CompareOp::Ne => Ok(self.sct_data != other.sct_data),
_ => Err(pyo3::exceptions::PyTypeError::new_err(
"SCTs cannot be ordered",
)),
}
}
fn __hash__(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.sct_data.hash(&mut hasher);
hasher.finish()
}
}
#[pyo3::prelude::pyfunction]
fn encode_precertificate_signed_certificate_timestamps(
py: pyo3::Python<'_>,
extension: &pyo3::PyAny,
) -> pyo3::PyResult<pyo3::PyObject> {
let mut length = 0;
for sct in extension.iter()? {
let sct = sct?.downcast::<pyo3::pycell::PyCell<Sct>>()?;
length += sct.borrow().sct_data.len() + 2;
}
let mut result = vec![];
result.extend_from_slice(&(length as u16).to_be_bytes());
for sct in extension.iter()? {
let sct = sct?.downcast::<pyo3::pycell::PyCell<Sct>>()?;
result.extend_from_slice(&(sct.borrow().sct_data.len() as u16).to_be_bytes());
result.extend_from_slice(&sct.borrow().sct_data);
}
Ok(pyo3::types::PyBytes::new(py, &asn1::write_single(&result.as_slice())).to_object(py))
}
pub(crate) fn parse_scts(
py: pyo3::Python<'_>,
data: &[u8],
entry_type: LogEntryType,
) -> Result<pyo3::PyObject, PyAsn1Error> {
let mut reader = TLSReader::new(data).read_length_prefixed()?;
let py_scts = pyo3::types::PyList::empty(py);
while !reader.is_empty() {
let mut sct_data = reader.read_length_prefixed()?;
let raw_sct_data = sct_data.data.to_vec();
let version = sct_data.read_byte()?;
if version != 0 {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
"Invalid SCT version",
)));
}
let log_id = sct_data.read_exact(32)?.try_into().unwrap();
let timestamp = u64::from_be_bytes(sct_data.read_exact(8)?.try_into().unwrap());
let _extensions = sct_data.read_length_prefixed()?;
let _sig_alg = sct_data.read_exact(2)?;
let _signature = sct_data.read_length_prefixed()?;
let sct = Sct {
log_id,
timestamp,
entry_type: entry_type.clone(),
sct_data: raw_sct_data,
};
py_scts.append(pyo3::PyCell::new(py, sct)?)?;
}
Ok(py_scts.to_object(py))
}
#[pyo3::prelude::pyfunction]
fn parse_x509_extension(
py: pyo3::Python<'_>,
der_oid: &[u8],
ext_data: &[u8],
) -> Result<pyo3::PyObject, PyAsn1Error> {
let oid = asn1::ObjectIdentifier::from_der(der_oid).unwrap();
let x509_module = py.import("cryptography.x509")?;
if oid == *SUBJECT_ALTERNATIVE_NAME_OID {
let gn_seq = asn1::parse_single::<asn1::SequenceOf<'_, GeneralName<'_>>>(ext_data)?;
let sans = parse_general_names(py, gn_seq)?;
Ok(x509_module
.call1("SubjectAlternativeName", (sans,))?
.to_object(py))
} else if oid == *ISSUER_ALTERNATIVE_NAME_OID {
let gn_seq = asn1::parse_single::<asn1::SequenceOf<'_, GeneralName<'_>>>(ext_data)?;
let ians = parse_general_names(py, gn_seq)?;
Ok(x509_module
.call1("IssuerAlternativeName", (ians,))?
.to_object(py))
} else if oid == *TLS_FEATURE_OID {
let tls_feature_type_to_enum = py
.import("cryptography.x509.extensions")?
.getattr("_TLS_FEATURE_TYPE_TO_ENUM")?;
let features = pyo3::types::PyList::empty(py);
for feature in asn1::parse_single::<asn1::SequenceOf<'_, u64>>(ext_data)? {
let py_feature = tls_feature_type_to_enum.get_item(feature.to_object(py))?;
features.append(py_feature)?;
}
Ok(x509_module.call1("TLSFeature", (features,))?.to_object(py))
} else if oid == *SUBJECT_KEY_IDENTIFIER_OID {
let identifier = asn1::parse_single::<&[u8]>(ext_data)?;
Ok(x509_module
.call1("SubjectKeyIdentifier", (identifier,))?
.to_object(py))
} else if oid == *EXTENDED_KEY_USAGE_OID {
let ekus = pyo3::types::PyList::empty(py);
for oid in asn1::parse_single::<asn1::SequenceOf<'_, asn1::ObjectIdentifier<'_>>>(ext_data)?
{
let oid_obj = x509_module.call_method1("ObjectIdentifier", (oid.to_string(),))?;
ekus.append(oid_obj)?;
}
Ok(x509_module
.call1("ExtendedKeyUsage", (ekus,))?
.to_object(py))
} else if oid == *KEY_USAGE_OID {
let kus = asn1::parse_single::<asn1::BitString<'_>>(ext_data)?;
let digital_signature = kus.has_bit_set(0);
let content_comitment = kus.has_bit_set(1);
let key_encipherment = kus.has_bit_set(2);
let data_encipherment = kus.has_bit_set(3);
let key_agreement = kus.has_bit_set(4);
let key_cert_sign = kus.has_bit_set(5);
let crl_sign = kus.has_bit_set(6);
let encipher_only = kus.has_bit_set(7);
let decipher_only = kus.has_bit_set(8);
Ok(x509_module
.call1(
"KeyUsage",
(
digital_signature,
content_comitment,
key_encipherment,
data_encipherment,
key_agreement,
key_cert_sign,
crl_sign,
encipher_only,
decipher_only,
),
)?
.to_object(py))
} else if oid == *AUTHORITY_INFORMATION_ACCESS_OID {
let ads = parse_access_descriptions(py, ext_data)?;
Ok(x509_module
.call1("AuthorityInformationAccess", (ads,))?
.to_object(py))
} else if oid == *SUBJECT_INFORMATION_ACCESS_OID {
let ads = parse_access_descriptions(py, ext_data)?;
Ok(x509_module
.call1("SubjectInformationAccess", (ads,))?
.to_object(py))
} else if oid == *CERTIFICATE_POLICIES_OID {
let cp = parse_cp(py, ext_data)?;
Ok(x509_module
.call_method1("CertificatePolicies", (cp,))?
.to_object(py))
} else if oid == *POLICY_CONSTRAINTS_OID {
let pc = asn1::parse_single::<PolicyConstraints>(ext_data)?;
Ok(x509_module
.call1(
"PolicyConstraints",
(pc.require_explicit_policy, pc.inhibit_policy_mapping),
)?
.to_object(py))
} else if oid == *PRECERT_POISON_OID {
asn1::parse_single::<()>(ext_data)?;
Ok(x509_module.call0("PrecertPoison")?.to_object(py))
} else if oid == *OCSP_NO_CHECK_OID {
asn1::parse_single::<()>(ext_data)?;
Ok(x509_module.call0("OCSPNoCheck")?.to_object(py))
} else if oid == *INHIBIT_ANY_POLICY_OID {
let bignum = asn1::parse_single::<asn1::BigUint<'_>>(ext_data)?;
let pynum = big_asn1_uint_to_py(py, bignum)?;
Ok(x509_module
.call1("InhibitAnyPolicy", (pynum,))?
.to_object(py))
} else if oid == *BASIC_CONSTRAINTS_OID {
let bc = asn1::parse_single::<BasicConstraints>(ext_data)?;
Ok(x509_module
.call1("BasicConstraints", (bc.ca, bc.path_length))?
.to_object(py))
} else if oid == *AUTHORITY_KEY_IDENTIFIER_OID {
Ok(parse_authority_key_identifier(py, ext_data)?.to_object(py))
} else if oid == *CRL_DISTRIBUTION_POINTS_OID {
let dp = parse_distribution_points(py, ext_data)?;
Ok(x509_module
.call1("CRLDistributionPoints", (dp,))?
.to_object(py))
} else if oid == *FRESHEST_CRL_OID {
Ok(x509_module
.call1("FreshestCRL", (parse_distribution_points(py, ext_data)?,))?
.to_object(py))
} else if oid == *PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS_OID {
let contents = asn1::parse_single::<&[u8]>(ext_data)?;
let scts = parse_scts(py, contents, LogEntryType::PreCertificate)?;
Ok(x509_module
.call1("PrecertificateSignedCertificateTimestamps", (scts,))?
.to_object(py))
} else if oid == *NAME_CONSTRAINTS_OID {
let nc = asn1::parse_single::<NameConstraints<'_>>(ext_data)?;
let permitted_subtrees = match nc.permitted_subtrees {
Some(data) => parse_general_subtrees(py, data)?,
None => py.None(),
};
let excluded_subtrees = match nc.excluded_subtrees {
Some(data) => parse_general_subtrees(py, data)?,
None => py.None(),
};
Ok(x509_module
.call1("NameConstraints", (permitted_subtrees, excluded_subtrees))?
.to_object(py))
} else {
Ok(py.None())
}
}
#[pyo3::prelude::pyfunction]
pub(crate) fn parse_crl_entry_extension(
py: pyo3::Python<'_>,
der_oid: &[u8],
ext_data: &[u8],
) -> Result<pyo3::PyObject, PyAsn1Error> {
let oid = asn1::ObjectIdentifier::from_der(der_oid).unwrap();
let x509_module = py.import("cryptography.x509")?;
if oid == *CRL_REASON_OID {
let flag_name = match asn1::parse_single::<asn1::Enumerated>(ext_data)?.value() {
0 => "unspecified",
1 => "key_compromise",
2 => "ca_compromise",
3 => "affiliation_changed",
4 => "superseded",
5 => "cessation_of_operation",
6 => "certificate_hold",
8 => "remove_from_crl",
9 => "privilege_withdrawn",
10 => "aa_compromise",
value => {
return Err(PyAsn1Error::from(pyo3::exceptions::PyValueError::new_err(
format!("Unsupported reason code: {}", value),
)))
}
};
let flag = x509_module.getattr("ReasonFlags")?.getattr(flag_name)?;
Ok(x509_module.call1("CRLReason", (flag,))?.to_object(py))
} else if oid == *CERTIFICATE_ISSUER_OID {
let gn_seq = asn1::parse_single::<asn1::SequenceOf<'_, GeneralName<'_>>>(ext_data)?;
let gns = parse_general_names(py, gn_seq)?;
Ok(x509_module
.call1("CertificateIssuer", (gns,))?
.to_object(py))
} else if oid == *INVALIDITY_DATE_OID {
let time = asn1::parse_single::<asn1::GeneralizedTime>(ext_data)?;
let py_dt = chrono_to_py(py, time.as_chrono())?;
Ok(x509_module.call1("InvalidityDate", (py_dt,))?.to_object(py))
} else {
Ok(py.None())
}
}
#[pyo3::prelude::pyfunction]
fn parse_crl_extension(
py: pyo3::Python<'_>,
der_oid: &[u8],
ext_data: &[u8],
) -> Result<pyo3::PyObject, PyAsn1Error> {
let oid = asn1::ObjectIdentifier::from_der(der_oid).unwrap();
let x509_module = py.import("cryptography.x509")?;
if oid == *CRL_NUMBER_OID {
let bignum = asn1::parse_single::<asn1::BigUint<'_>>(ext_data)?;
let pynum = big_asn1_uint_to_py(py, bignum)?;
Ok(x509_module.call1("CRLNumber", (pynum,))?.to_object(py))
} else if oid == *DELTA_CRL_INDICATOR_OID {
let bignum = asn1::parse_single::<asn1::BigUint<'_>>(ext_data)?;
let pynum = big_asn1_uint_to_py(py, bignum)?;
Ok(x509_module
.call1("DeltaCRLIndicator", (pynum,))?
.to_object(py))
} else if oid == *ISSUER_ALTERNATIVE_NAME_OID {
let gn_seq = asn1::parse_single::<asn1::SequenceOf<'_, GeneralName<'_>>>(ext_data)?;
let ians = parse_general_names(py, gn_seq)?;
Ok(x509_module
.call1("IssuerAlternativeName", (ians,))?
.to_object(py))
} else if oid == *AUTHORITY_INFORMATION_ACCESS_OID {
let ads = parse_access_descriptions(py, ext_data)?;
Ok(x509_module
.call1("AuthorityInformationAccess", (ads,))?
.to_object(py))
} else if oid == *AUTHORITY_KEY_IDENTIFIER_OID {
Ok(parse_authority_key_identifier(py, ext_data)?.to_object(py))
} else if oid == *ISSUING_DISTRIBUTION_POINT_OID {
let idp = asn1::parse_single::<IssuingDistributionPoint<'_>>(ext_data)?;
let (full_name, relative_name) = match idp.distribution_point {
Some(data) => parse_distribution_point_name(py, data)?,
None => (py.None(), py.None()),
};
let reasons = parse_distribution_point_reasons(py, idp.only_some_reasons)?;
Ok(x509_module
.call1(
"IssuingDistributionPoint",
(
full_name,
relative_name,
idp.only_contains_user_certs,
idp.only_contains_ca_certs,
reasons,
idp.indirect_crl,
idp.only_contains_attribute_certs,
),
)?
.to_object(py))
} else if oid == *FRESHEST_CRL_OID {
Ok(x509_module
.call1("FreshestCRL", (parse_distribution_points(py, ext_data)?,))?
.to_object(py))
} else {
Ok(py.None())
}
}
pub(crate) fn create_submodule(py: pyo3::Python<'_>) -> pyo3::PyResult<&pyo3::prelude::PyModule> {
let submod = pyo3::prelude::PyModule::new(py, "x509")?;
submod.add_wrapped(pyo3::wrap_pyfunction!(parse_x509_extension))?;
submod.add_wrapped(pyo3::wrap_pyfunction!(parse_crl_entry_extension))?;
submod.add_wrapped(pyo3::wrap_pyfunction!(parse_crl_extension))?;
submod.add_wrapped(pyo3::wrap_pyfunction!(
encode_precertificate_signed_certificate_timestamps
))?;
submod.add_class::<Sct>()?;
Ok(submod)
}
| 36.549509 | 164 | 0.606072 |
115c17767599fafbf68cc851cd431469aca27c54 | 4,451 | crate::util_macros::testcase!(
(|mut glue: multisql::Glue| {
crate::util_macros::execute!(glue, "CREATE TABLE Test (id INTEGER AUTO_INCREMENT NOT NULL, name TEXT)");
crate::util_macros::execute!(glue, "INSERT INTO Test (name) VALUES ('test1')");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
Test
"# => id = I64, name = Str:
(1, String::from("test1"))
);
crate::util_macros::execute!(glue, "INSERT INTO Test (name) VALUES ('test2'), ('test3')");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
Test
"# => id = I64, name = Str:
(1, String::from("test1")),
(2, String::from("test2")),
(3, String::from("test3"))
);
crate::util_macros::execute!(glue, "INSERT INTO Test (name, id) VALUES ('test4', NULL)");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
Test
"# => id = I64, name = Str:
(1, String::from("test1")),
(2, String::from("test2")),
(3, String::from("test3")),
(4, String::from("test4"))
);
crate::util_macros::execute!(glue, "INSERT INTO Test (name, id) VALUES ('test5', 6)");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
Test
"# => id = I64, name = Str:
(1, String::from("test1")),
(2, String::from("test2")),
(3, String::from("test3")),
(4, String::from("test4")),
(6, String::from("test5"))
);
crate::util_macros::execute!(glue, "INSERT INTO Test (name) VALUES ('test6')");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
Test
"# => id = I64, name = Str:
(1, String::from("test1")),
(2, String::from("test2")),
(3, String::from("test3")),
(4, String::from("test4")),
(6, String::from("test5")),
(5, String::from("test6"))
);
crate::util_macros::execute!(glue, "INSERT INTO Test (name) VALUES ('test7')");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
Test
"# => id = I64, name = Str:
(1, String::from("test1")),
(2, String::from("test2")),
(3, String::from("test3")),
(4, String::from("test4")),
(6, String::from("test5")),
(5, String::from("test6")),
(6, String::from("test7"))
);
crate::util_macros::execute!(glue, "CREATE TABLE TestUnique (id INTEGER AUTO_INCREMENT NOT NULL UNIQUE, name TEXT)");
crate::util_macros::execute!(glue, "INSERT INTO TestUnique (name, id) VALUES ('test1', NULL), ('test2', 3)");
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
TestUnique
"# => id = I64, name = Str:
(1, String::from("test1")),
(3, String::from("test2"))
);
{
let _result: Result<multisql::Payload, multisql::Error> = Err(multisql::ValidateError::DuplicateEntryOnUniqueField.into());
assert!(matches!(
glue.execute("INSERT INTO TestUnique (name) VALUES ('test3'), ('test4')"),
_result
));
}
crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
TestUnique
"# => id = I64, name = Str:
(1, String::from("test1")),
(3, String::from("test2"))
);
crate::util_macros::execute!(glue, "CREATE TABLE TestUniqueSecond (id INTEGER AUTO_INCREMENT NOT NULL UNIQUE, name TEXT)");
{
let _result: Result<multisql::Payload, multisql::Error> = Err(multisql::ValidateError::DuplicateEntryOnUniqueField.into());
assert!(matches!(
glue.execute("INSERT INTO TestUniqueSecond (name, id) VALUES ('test1', NULL), ('test2', 3), ('test3', NULL), ('test4', NULL)"),
_result
));
}
crate::util_macros::execute!(glue, "CREATE TABLE TestInsertSelect (id INTEGER AUTO_INCREMENT NOT NULL, name TEXT)");
crate::util_macros::execute!(glue, r#"INSERT INTO TestInsertSelect (name) SELECT name FROM Test"#);
{
let _result: Result<multisql::Payload, multisql::Error> = Err(multisql::AlterError::UnsupportedDataTypeForAutoIncrementColumn(String::from("id"), String::from("TEXT")).into());
assert!(matches!(
glue.execute("CREATE TABLE TestText (id TEXT AUTO_INCREMENT NOT NULL UNIQUE, name TEXT)"),
_result
));
}
/*crate::util_macros::assert_select!(glue, r#"
SELECT
*
FROM
TestInsertSelect
"# => id = I64, name = Str:
(1, String::from("test1")),
(2, String::from("test2")),
(3, String::from("test3")),
(4, String::from("test4")),
(5, String::from("test5")),
(6, String::from("test6")),
(7, String::from("test7"))
); Tempremental
*/
})
);
| 28.532051 | 179 | 0.60009 |
d7f31c1d971b6e5a2ceb96d64493a8228ae948a1 | 461 | // Issue #5886: a complex instance of issue #2687.
trait Iterator<A> {
fn next(&mut self) -> Option<A>;
}
trait IteratorUtil<A>: Sized
{
fn zip<B, U: Iterator<U>>(self, other: U) -> ZipIterator<Self, U>;
}
impl<A, T: Iterator<A>> IteratorUtil<A> for T {
fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
//~^ ERROR E0276
ZipIterator{a: self, b: other}
}
}
struct ZipIterator<T, U> {
a: T, b: U
}
fn main() {}
| 19.208333 | 70 | 0.579176 |
7a4b4e77f31b4b542c62b262f5975c20b6101717 | 10,813 | use lazy_static::lazy_static;
use casper_engine_test_support::{
internal::{
utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT,
DEFAULT_RUN_GENESIS_REQUEST,
},
DEFAULT_ACCOUNT_ADDR,
};
use casper_execution_engine::{core::engine_state::CONV_RATE, shared::motes::Motes};
use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512};
const CONTRACT_EE_599_REGRESSION: &str = "ee_599_regression.wasm";
const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm";
const DONATION_PURSE_COPY_KEY: &str = "donation_purse_copy";
const EXPECTED_ERROR: &str = "InvalidContext";
const TRANSFER_FUNDS_KEY: &str = "transfer_funds";
const VICTIM_ADDR: AccountHash = AccountHash::new([42; 32]);
lazy_static! {
static ref VICTIM_INITIAL_FUNDS: U512 = *DEFAULT_PAYMENT * 10;
}
fn setup() -> InMemoryWasmTestBuilder {
// Creates victim account
let exec_request_1 = {
let args = runtime_args! {
"target" => VICTIM_ADDR,
"amount" => *VICTIM_INITIAL_FUNDS,
};
ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, args)
.build()
};
// Deploy contract
let exec_request_2 = {
let args = runtime_args! {
"method" => "install".to_string(),
};
ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_EE_599_REGRESSION, args)
.build()
};
let result = InMemoryWasmTestBuilder::default()
.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST)
.exec(exec_request_1)
.expect_success()
.commit()
.exec(exec_request_2)
.expect_success()
.commit()
.finish();
InMemoryWasmTestBuilder::from_result(result)
}
#[ignore]
#[test]
fn should_not_be_able_to_transfer_funds_with_transfer_purse_to_purse() {
let mut builder = setup();
let victim_account = builder
.get_account(VICTIM_ADDR)
.expect("should have victim account");
let default_account = builder
.get_account(*DEFAULT_ACCOUNT_ADDR)
.expect("should have default account");
let transfer_funds = default_account
.named_keys()
.get(TRANSFER_FUNDS_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", TRANSFER_FUNDS_KEY));
let donation_purse_copy_key = default_account
.named_keys()
.get(DONATION_PURSE_COPY_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", DONATION_PURSE_COPY_KEY));
let donation_purse_copy = donation_purse_copy_key.into_uref().expect("should be uref");
let exec_request_3 = {
let args = runtime_args! {
"method" => "call",
"contract_key" => transfer_funds.into_hash().expect("should be hash"),
"sub_contract_method_fwd" => "transfer_from_purse_to_purse_ext",
};
ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()
};
let result_2 = builder.exec(exec_request_3).commit().finish();
let exec_3_response = result_2
.builder()
.get_exec_response(0)
.expect("should have response");
let gas_cost = Motes::from_gas(utils::get_exec_costs(exec_3_response)[0], CONV_RATE)
.expect("should convert");
let error_msg = result_2
.builder()
.exec_error_message(0)
.expect("should have error");
assert!(
error_msg.contains(EXPECTED_ERROR),
"Got error: {}",
error_msg
);
let victim_balance_after = result_2
.builder()
.get_purse_balance(victim_account.main_purse());
assert_eq!(
*VICTIM_INITIAL_FUNDS - gas_cost.value(),
victim_balance_after
);
assert_eq!(
result_2.builder().get_purse_balance(donation_purse_copy),
U512::zero(),
);
}
#[ignore]
#[test]
fn should_not_be_able_to_transfer_funds_with_transfer_from_purse_to_account() {
let mut builder = setup();
let victim_account = builder
.get_account(VICTIM_ADDR)
.expect("should have victim account");
let default_account = builder
.get_account(*DEFAULT_ACCOUNT_ADDR)
.expect("should have default account");
let default_account_balance = builder.get_purse_balance(default_account.main_purse());
let transfer_funds = default_account
.named_keys()
.get(TRANSFER_FUNDS_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", TRANSFER_FUNDS_KEY));
let donation_purse_copy_key = default_account
.named_keys()
.get(DONATION_PURSE_COPY_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", DONATION_PURSE_COPY_KEY));
let donation_purse_copy = donation_purse_copy_key.into_uref().expect("should be uref");
let exec_request_3 = {
let args = runtime_args! {
"method" => "call".to_string(),
"contract_key" => transfer_funds.into_hash().expect("should get key"),
"sub_contract_method_fwd" => "transfer_from_purse_to_account_ext",
};
ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()
};
let result_2 = builder.exec(exec_request_3).commit().finish();
let exec_3_response = result_2
.builder()
.get_exec_response(0)
.expect("should have response");
let gas_cost = Motes::from_gas(utils::get_exec_costs(exec_3_response)[0], CONV_RATE)
.expect("should convert");
let error_msg = result_2
.builder()
.exec_error_message(0)
.expect("should have error");
assert!(
error_msg.contains(EXPECTED_ERROR),
"Got error: {}",
error_msg
);
let victim_balance_after = result_2
.builder()
.get_purse_balance(victim_account.main_purse());
assert_eq!(
*VICTIM_INITIAL_FUNDS - gas_cost.value(),
victim_balance_after
);
// In this variant of test `donation_purse` is left unchanged i.e. zero balance
assert_eq!(
result_2.builder().get_purse_balance(donation_purse_copy),
U512::zero(),
);
// Main purse of the contract owner is unchanged
let updated_default_account_balance = result_2
.builder()
.get_purse_balance(default_account.main_purse());
assert_eq!(
updated_default_account_balance - default_account_balance,
U512::zero(),
)
}
#[ignore]
#[test]
fn should_not_be_able_to_transfer_funds_with_transfer_to_account() {
let mut builder = setup();
let victim_account = builder
.get_account(VICTIM_ADDR)
.expect("should have victim account");
let default_account = builder
.get_account(*DEFAULT_ACCOUNT_ADDR)
.expect("should have default account");
let default_account_balance = builder.get_purse_balance(default_account.main_purse());
let transfer_funds = default_account
.named_keys()
.get(TRANSFER_FUNDS_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", TRANSFER_FUNDS_KEY));
let donation_purse_copy_key = default_account
.named_keys()
.get(DONATION_PURSE_COPY_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", DONATION_PURSE_COPY_KEY));
let donation_purse_copy = donation_purse_copy_key.into_uref().expect("should be uref");
let exec_request_3 = {
let args = runtime_args! {
"method" => "call",
"contract_key" => transfer_funds.into_hash().expect("should be hash"),
"sub_contract_method_fwd" => "transfer_to_account_ext",
};
ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()
};
let result_2 = builder.exec(exec_request_3).commit().finish();
let exec_3_response = result_2
.builder()
.get_exec_response(0)
.expect("should have response");
let gas_cost = Motes::from_gas(utils::get_exec_costs(exec_3_response)[0], CONV_RATE)
.expect("should convert");
let error_msg = result_2
.builder()
.exec_error_message(0)
.expect("should have error");
assert!(
error_msg.contains(EXPECTED_ERROR),
"Got error: {}",
error_msg
);
let victim_balance_after = result_2
.builder()
.get_purse_balance(victim_account.main_purse());
assert_eq!(
*VICTIM_INITIAL_FUNDS - gas_cost.value(),
victim_balance_after
);
// In this variant of test `donation_purse` is left unchanged i.e. zero balance
assert_eq!(
result_2.builder().get_purse_balance(donation_purse_copy),
U512::zero(),
);
// Verify that default account's balance didn't change
let updated_default_account_balance = result_2
.builder()
.get_purse_balance(default_account.main_purse());
assert_eq!(
updated_default_account_balance - default_account_balance,
U512::zero(),
)
}
#[ignore]
#[test]
fn should_not_be_able_to_get_main_purse_in_invalid_context() {
let mut builder = setup();
let victim_account = builder
.get_account(VICTIM_ADDR)
.expect("should have victim account");
let default_account = builder
.get_account(*DEFAULT_ACCOUNT_ADDR)
.expect("should have default account");
let transfer_funds = default_account
.named_keys()
.get(TRANSFER_FUNDS_KEY)
.cloned()
.unwrap_or_else(|| panic!("should have {}", TRANSFER_FUNDS_KEY));
let exec_request_3 = {
let args = runtime_args! {
"method" => "call".to_string(),
"contract_key" => transfer_funds.into_hash().expect("should be hash"),
"sub_contract_method_fwd" => "transfer_to_account_ext",
};
ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build()
};
let victim_balance_before = builder.get_purse_balance(victim_account.main_purse());
let result_2 = builder.exec(exec_request_3).commit().finish();
let exec_3_response = result_2
.builder()
.get_exec_response(0)
.expect("should have response");
let gas_cost = Motes::from_gas(utils::get_exec_costs(exec_3_response)[0], CONV_RATE)
.expect("should convert");
let error_msg = result_2
.builder()
.exec_error_message(0)
.expect("should have error");
assert!(
error_msg.contains(EXPECTED_ERROR),
"Got error: {}",
error_msg
);
let victim_balance_after = result_2
.builder()
.get_purse_balance(victim_account.main_purse());
assert_eq!(
victim_balance_before - gas_cost.value(),
victim_balance_after
);
}
| 30.982808 | 98 | 0.655045 |
9ca13e85b533304f2f2c5c7bc5657da138556343 | 10,980 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Gamma and derived distributions.
use self::GammaRepr::*;
use self::ChiSquaredRepr::*;
#[cfg(not(test))] // only necessary for no_std
use FloatMath;
use {Open01, Rng};
use super::normal::StandardNormal;
use super::{Exp, IndependentSample, Sample};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
/// The density function of this distribution is
///
/// ```text
/// f(x) = x^(k - 1) * exp(-x / θ) / (Γ(k) * θ^k)
/// ```
///
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
/// The algorithm used is that described by Marsaglia & Tsang 2000[1],
/// falling back to directly sampling from an Exponential for `shape
/// == 1`, and using the boosting technique described in [1] for
/// `shape < 1`.
///
/// [1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method
/// for Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
/// (September 2000),
/// 363-372. DOI:[10.1145/358407.358414](http://doi.acm.org/10.1145/358407.358414)
pub struct Gamma {
repr: GammaRepr,
}
enum GammaRepr {
Large(GammaLargeShape),
One(Exp),
Small(GammaSmallShape),
}
// These two helpers could be made public, but saving the
// match-on-Gamma-enum branch from using them directly (e.g. if one
// knows that the shape is always > 1) doesn't appear to be much
// faster.
/// Gamma distribution where the shape parameter is less than 1.
///
/// Note, samples from this require a compulsory floating-point `pow`
/// call, which makes it significantly slower than sampling from a
/// gamma distribution where the shape parameter is greater than or
/// equal to 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaSmallShape {
inv_shape: f64,
large_shape: GammaLargeShape,
}
/// Gamma distribution where the shape parameter is larger than 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
struct GammaLargeShape {
scale: f64,
c: f64,
d: f64,
}
impl Gamma {
/// Construct an object representing the `Gamma(shape, scale)`
/// distribution.
///
/// Panics if `shape <= 0` or `scale <= 0`.
pub fn new(shape: f64, scale: f64) -> Gamma {
assert!(shape > 0.0, "Gamma::new called with shape <= 0");
assert!(scale > 0.0, "Gamma::new called with scale <= 0");
let repr = match shape {
1.0 => One(Exp::new(1.0 / scale)),
0.0...1.0 => Small(GammaSmallShape::new_raw(shape, scale)),
_ => Large(GammaLargeShape::new_raw(shape, scale)),
};
Gamma { repr: repr }
}
}
impl GammaSmallShape {
fn new_raw(shape: f64, scale: f64) -> GammaSmallShape {
GammaSmallShape {
inv_shape: 1. / shape,
large_shape: GammaLargeShape::new_raw(shape + 1.0, scale),
}
}
}
impl GammaLargeShape {
fn new_raw(shape: f64, scale: f64) -> GammaLargeShape {
let d = shape - 1. / 3.;
GammaLargeShape {
scale: scale,
c: 1. / (9. * d).sqrt(),
d: d,
}
}
}
impl Sample<f64> for Gamma {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 {
self.ind_sample(rng)
}
}
impl Sample<f64> for GammaSmallShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 {
self.ind_sample(rng)
}
}
impl Sample<f64> for GammaLargeShape {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 {
self.ind_sample(rng)
}
}
impl IndependentSample<f64> for Gamma {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
Small(ref g) => g.ind_sample(rng),
One(ref g) => g.ind_sample(rng),
Large(ref g) => g.ind_sample(rng),
}
}
}
impl IndependentSample<f64> for GammaSmallShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let Open01(u) = rng.gen::<Open01<f64>>();
self.large_shape.ind_sample(rng) * u.powf(self.inv_shape)
}
}
impl IndependentSample<f64> for GammaLargeShape {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
loop {
let StandardNormal(x) = rng.gen::<StandardNormal>();
let v_cbrt = 1.0 + self.c * x;
if v_cbrt <= 0.0 {
// a^3 <= 0 iff a <= 0
continue;
}
let v = v_cbrt * v_cbrt * v_cbrt;
let Open01(u) = rng.gen::<Open01<f64>>();
let x_sqr = x * x;
if u < 1.0 - 0.0331 * x_sqr * x_sqr ||
u.ln() < 0.5 * x_sqr + self.d * (1.0 - v + v.ln()) {
return self.d * v * self.scale;
}
}
}
}
/// The chi-squared distribution `χ²(k)`, where `k` is the degrees of
/// freedom.
///
/// For `k > 0` integral, this distribution is the sum of the squares
/// of `k` independent standard normal random variables. For other
/// `k`, this uses the equivalent characterization `χ²(k) = Gamma(k/2,
/// 2)`.
pub struct ChiSquared {
repr: ChiSquaredRepr,
}
enum ChiSquaredRepr {
// k == 1, Gamma(alpha, ..) is particularly slow for alpha < 1,
// e.g. when alpha = 1/2 as it would be for this case, so special-
// casing and using the definition of N(0,1)^2 is faster.
DoFExactlyOne,
DoFAnythingElse(Gamma),
}
impl ChiSquared {
/// Create a new chi-squared distribution with degrees-of-freedom
/// `k`. Panics if `k < 0`.
pub fn new(k: f64) -> ChiSquared {
let repr = if k == 1.0 {
DoFExactlyOne
} else {
assert!(k > 0.0, "ChiSquared::new called with `k` < 0");
DoFAnythingElse(Gamma::new(0.5 * k, 2.0))
};
ChiSquared { repr: repr }
}
}
impl Sample<f64> for ChiSquared {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 {
self.ind_sample(rng)
}
}
impl IndependentSample<f64> for ChiSquared {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * norm
}
DoFAnythingElse(ref g) => g.ind_sample(rng),
}
}
}
/// The Fisher F distribution `F(m, n)`.
///
/// This distribution is equivalent to the ratio of two normalised
/// chi-squared distributions, that is, `F(m,n) = (χ²(m)/m) /
/// (χ²(n)/n)`.
pub struct FisherF {
numer: ChiSquared,
denom: ChiSquared,
// denom_dof / numer_dof so that this can just be a straight
// multiplication, rather than a division.
dof_ratio: f64,
}
impl FisherF {
/// Create a new `FisherF` distribution, with the given
/// parameter. Panics if either `m` or `n` are not positive.
pub fn new(m: f64, n: f64) -> FisherF {
assert!(m > 0.0, "FisherF::new called with `m < 0`");
assert!(n > 0.0, "FisherF::new called with `n < 0`");
FisherF {
numer: ChiSquared::new(m),
denom: ChiSquared::new(n),
dof_ratio: n / m,
}
}
}
impl Sample<f64> for FisherF {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 {
self.ind_sample(rng)
}
}
impl IndependentSample<f64> for FisherF {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
self.numer.ind_sample(rng) / self.denom.ind_sample(rng) * self.dof_ratio
}
}
/// The Student t distribution, `t(nu)`, where `nu` is the degrees of
/// freedom.
pub struct StudentT {
chi: ChiSquared,
dof: f64,
}
impl StudentT {
/// Create a new Student t distribution with `n` degrees of
/// freedom. Panics if `n <= 0`.
pub fn new(n: f64) -> StudentT {
assert!(n > 0.0, "StudentT::new called with `n <= 0`");
StudentT {
chi: ChiSquared::new(n),
dof: n,
}
}
}
impl Sample<f64> for StudentT {
fn sample<R: Rng>(&mut self, rng: &mut R) -> f64 {
self.ind_sample(rng)
}
}
impl IndependentSample<f64> for StudentT {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> f64 {
let StandardNormal(norm) = rng.gen::<StandardNormal>();
norm * (self.dof / self.chi.ind_sample(rng)).sqrt()
}
}
#[cfg(test)]
mod tests {
use distributions::{IndependentSample, Sample};
use super::{ChiSquared, FisherF, StudentT};
#[test]
fn test_chi_squared_one() {
let mut chi = ChiSquared::new(1.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
let mut chi = ChiSquared::new(0.5);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
let mut chi = ChiSquared::new(30.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
chi.sample(&mut rng);
chi.ind_sample(&mut rng);
}
}
#[test]
#[should_panic]
fn test_chi_squared_invalid_dof() {
ChiSquared::new(-1.0);
}
#[test]
fn test_f() {
let mut f = FisherF::new(2.0, 32.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
f.sample(&mut rng);
f.ind_sample(&mut rng);
}
}
#[test]
fn test_t() {
let mut t = StudentT::new(11.0);
let mut rng = ::test::rng();
for _ in 0..1000 {
t.sample(&mut rng);
t.ind_sample(&mut rng);
}
}
}
#[cfg(test)]
mod bench {
extern crate test;
use self::test::Bencher;
use std::mem::size_of;
use distributions::IndependentSample;
use super::Gamma;
#[bench]
fn bench_gamma_large_shape(b: &mut Bencher) {
let gamma = Gamma::new(10., 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
#[bench]
fn bench_gamma_small_shape(b: &mut Bencher) {
let gamma = Gamma::new(0.1, 1.0);
let mut rng = ::test::weak_rng();
b.iter(|| {
for _ in 0..::RAND_BENCH_N {
gamma.ind_sample(&mut rng);
}
});
b.bytes = size_of::<f64>() as u64 * ::RAND_BENCH_N;
}
}
| 28.445596 | 82 | 0.566302 |
6236fd0e0e56345882a71eeebeb989586d75e645 | 25,638 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Low-level bindings to the libuv library.
*
* This module contains a set of direct, 'bare-metal' wrappers around
* the libuv C-API.
*
* We're not bothering yet to redefine uv's structs as Rust structs
* because they are quite large and change often between versions.
* The maintenance burden is just too high. Instead we use the uv's
* `uv_handle_size` and `uv_req_size` to find the correct size of the
* structs and allocate them on the heap. This can be revisited later.
*
* There are also a collection of helper functions to ease interacting
* with the low-level API.
*
* As new functionality, existent in uv.h, is added to the rust stdlib,
* the mappings should be added in this module.
*/
#![allow(non_camel_case_types)] // C types
use libc::{size_t, c_int, c_uint, c_void, c_char, c_double};
use libc::{ssize_t, sockaddr, free, addrinfo};
use libc;
use std::rt::libc_heap::malloc_raw;
#[cfg(test)]
use libc::uintptr_t;
pub use self::errors::{EACCES, ECONNREFUSED, ECONNRESET, EPIPE, ECONNABORTED,
ECANCELED, EBADF, ENOTCONN, ENOENT, EADDRNOTAVAIL};
pub static OK: c_int = 0;
pub static EOF: c_int = -4095;
pub static UNKNOWN: c_int = -4094;
// uv-errno.h redefines error codes for windows, but not for unix...
// https://github.com/joyent/libuv/blob/master/include/uv-errno.h
#[cfg(windows)]
pub mod errors {
use libc::c_int;
pub static EACCES: c_int = -4092;
pub static ECONNREFUSED: c_int = -4078;
pub static ECONNRESET: c_int = -4077;
pub static ENOENT: c_int = -4058;
pub static ENOTCONN: c_int = -4053;
pub static EPIPE: c_int = -4047;
pub static ECONNABORTED: c_int = -4079;
pub static ECANCELED: c_int = -4081;
pub static EBADF: c_int = -4083;
pub static EADDRNOTAVAIL: c_int = -4090;
}
#[cfg(not(windows))]
pub mod errors {
use libc;
use libc::c_int;
pub static EACCES: c_int = -libc::EACCES;
pub static ECONNREFUSED: c_int = -libc::ECONNREFUSED;
pub static ECONNRESET: c_int = -libc::ECONNRESET;
pub static ENOENT: c_int = -libc::ENOENT;
pub static ENOTCONN: c_int = -libc::ENOTCONN;
pub static EPIPE: c_int = -libc::EPIPE;
pub static ECONNABORTED: c_int = -libc::ECONNABORTED;
pub static ECANCELED : c_int = -libc::ECANCELED;
pub static EBADF : c_int = -libc::EBADF;
pub static EADDRNOTAVAIL : c_int = -libc::EADDRNOTAVAIL;
}
pub static PROCESS_SETUID: c_int = 1 << 0;
pub static PROCESS_SETGID: c_int = 1 << 1;
pub static PROCESS_WINDOWS_VERBATIM_ARGUMENTS: c_int = 1 << 2;
pub static PROCESS_DETACHED: c_int = 1 << 3;
pub static PROCESS_WINDOWS_HIDE: c_int = 1 << 4;
pub static STDIO_IGNORE: c_int = 0x00;
pub static STDIO_CREATE_PIPE: c_int = 0x01;
pub static STDIO_INHERIT_FD: c_int = 0x02;
pub static STDIO_INHERIT_STREAM: c_int = 0x04;
pub static STDIO_READABLE_PIPE: c_int = 0x10;
pub static STDIO_WRITABLE_PIPE: c_int = 0x20;
#[cfg(unix)]
pub type uv_buf_len_t = libc::size_t;
#[cfg(windows)]
pub type uv_buf_len_t = libc::c_ulong;
// see libuv/include/uv-unix.h
#[cfg(unix)]
pub struct uv_buf_t {
pub base: *u8,
pub len: uv_buf_len_t,
}
// see libuv/include/uv-win.h
#[cfg(windows)]
pub struct uv_buf_t {
pub len: uv_buf_len_t,
pub base: *u8,
}
#[repr(C)]
pub enum uv_run_mode {
RUN_DEFAULT = 0,
RUN_ONCE,
RUN_NOWAIT,
}
pub struct uv_process_options_t {
pub exit_cb: uv_exit_cb,
pub file: *libc::c_char,
pub args: **libc::c_char,
pub env: **libc::c_char,
pub cwd: *libc::c_char,
pub flags: libc::c_uint,
pub stdio_count: libc::c_int,
pub stdio: *uv_stdio_container_t,
pub uid: uv_uid_t,
pub gid: uv_gid_t,
}
// These fields are private because they must be interfaced with through the
// functions below.
pub struct uv_stdio_container_t {
flags: libc::c_int,
stream: *uv_stream_t,
}
pub type uv_handle_t = c_void;
pub type uv_req_t = c_void;
pub type uv_loop_t = c_void;
pub type uv_idle_t = c_void;
pub type uv_tcp_t = c_void;
pub type uv_udp_t = c_void;
pub type uv_connect_t = c_void;
pub type uv_connection_t = c_void;
pub type uv_write_t = c_void;
pub type uv_async_t = c_void;
pub type uv_timer_t = c_void;
pub type uv_stream_t = c_void;
pub type uv_fs_t = c_void;
pub type uv_udp_send_t = c_void;
pub type uv_getaddrinfo_t = c_void;
pub type uv_process_t = c_void;
pub type uv_pipe_t = c_void;
pub type uv_tty_t = c_void;
pub type uv_signal_t = c_void;
pub type uv_shutdown_t = c_void;
pub struct uv_timespec_t {
pub tv_sec: libc::c_long,
pub tv_nsec: libc::c_long
}
pub struct uv_stat_t {
pub st_dev: libc::uint64_t,
pub st_mode: libc::uint64_t,
pub st_nlink: libc::uint64_t,
pub st_uid: libc::uint64_t,
pub st_gid: libc::uint64_t,
pub st_rdev: libc::uint64_t,
pub st_ino: libc::uint64_t,
pub st_size: libc::uint64_t,
pub st_blksize: libc::uint64_t,
pub st_blocks: libc::uint64_t,
pub st_flags: libc::uint64_t,
pub st_gen: libc::uint64_t,
pub st_atim: uv_timespec_t,
pub st_mtim: uv_timespec_t,
pub st_ctim: uv_timespec_t,
pub st_birthtim: uv_timespec_t
}
impl uv_stat_t {
pub fn new() -> uv_stat_t {
uv_stat_t {
st_dev: 0,
st_mode: 0,
st_nlink: 0,
st_uid: 0,
st_gid: 0,
st_rdev: 0,
st_ino: 0,
st_size: 0,
st_blksize: 0,
st_blocks: 0,
st_flags: 0,
st_gen: 0,
st_atim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 },
st_mtim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 },
st_ctim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 },
st_birthtim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 }
}
}
pub fn is_file(&self) -> bool {
((self.st_mode) & libc::S_IFMT as libc::uint64_t) == libc::S_IFREG as libc::uint64_t
}
pub fn is_dir(&self) -> bool {
((self.st_mode) & libc::S_IFMT as libc::uint64_t) == libc::S_IFDIR as libc::uint64_t
}
}
pub type uv_idle_cb = extern "C" fn(handle: *uv_idle_t);
pub type uv_alloc_cb = extern "C" fn(stream: *uv_stream_t,
suggested_size: size_t,
buf: *mut uv_buf_t);
pub type uv_read_cb = extern "C" fn(stream: *uv_stream_t,
nread: ssize_t,
buf: *uv_buf_t);
pub type uv_udp_send_cb = extern "C" fn(req: *uv_udp_send_t,
status: c_int);
pub type uv_udp_recv_cb = extern "C" fn(handle: *uv_udp_t,
nread: ssize_t,
buf: *uv_buf_t,
addr: *sockaddr,
flags: c_uint);
pub type uv_close_cb = extern "C" fn(handle: *uv_handle_t);
pub type uv_walk_cb = extern "C" fn(handle: *uv_handle_t,
arg: *c_void);
pub type uv_async_cb = extern "C" fn(handle: *uv_async_t);
pub type uv_connect_cb = extern "C" fn(handle: *uv_connect_t,
status: c_int);
pub type uv_connection_cb = extern "C" fn(handle: *uv_connection_t,
status: c_int);
pub type uv_timer_cb = extern "C" fn(handle: *uv_timer_t);
pub type uv_write_cb = extern "C" fn(handle: *uv_write_t,
status: c_int);
pub type uv_getaddrinfo_cb = extern "C" fn(req: *uv_getaddrinfo_t,
status: c_int,
res: *addrinfo);
pub type uv_exit_cb = extern "C" fn(handle: *uv_process_t,
exit_status: i64,
term_signal: c_int);
pub type uv_signal_cb = extern "C" fn(handle: *uv_signal_t,
signum: c_int);
pub type uv_fs_cb = extern "C" fn(req: *uv_fs_t);
pub type uv_shutdown_cb = extern "C" fn(req: *uv_shutdown_t, status: c_int);
#[cfg(unix)] pub type uv_uid_t = libc::types::os::arch::posix88::uid_t;
#[cfg(unix)] pub type uv_gid_t = libc::types::os::arch::posix88::gid_t;
#[cfg(windows)] pub type uv_uid_t = libc::c_uchar;
#[cfg(windows)] pub type uv_gid_t = libc::c_uchar;
#[repr(C)]
#[deriving(Eq)]
pub enum uv_handle_type {
UV_UNKNOWN_HANDLE,
UV_ASYNC,
UV_CHECK,
UV_FS_EVENT,
UV_FS_POLL,
UV_HANDLE,
UV_IDLE,
UV_NAMED_PIPE,
UV_POLL,
UV_PREPARE,
UV_PROCESS,
UV_STREAM,
UV_TCP,
UV_TIMER,
UV_TTY,
UV_UDP,
UV_SIGNAL,
UV_FILE,
UV_HANDLE_TYPE_MAX
}
#[repr(C)]
#[cfg(unix)]
#[deriving(Eq)]
pub enum uv_req_type {
UV_UNKNOWN_REQ,
UV_REQ,
UV_CONNECT,
UV_WRITE,
UV_SHUTDOWN,
UV_UDP_SEND,
UV_FS,
UV_WORK,
UV_GETADDRINFO,
UV_REQ_TYPE_MAX
}
// uv_req_type may have additional fields defined by UV_REQ_TYPE_PRIVATE.
// See UV_REQ_TYPE_PRIVATE at libuv/include/uv-win.h
#[repr(C)]
#[cfg(windows)]
#[deriving(Eq)]
pub enum uv_req_type {
UV_UNKNOWN_REQ,
UV_REQ,
UV_CONNECT,
UV_WRITE,
UV_SHUTDOWN,
UV_UDP_SEND,
UV_FS,
UV_WORK,
UV_GETADDRINFO,
UV_ACCEPT,
UV_FS_EVENT_REQ,
UV_POLL_REQ,
UV_PROCESS_EXIT,
UV_READ,
UV_UDP_RECV,
UV_WAKEUP,
UV_SIGNAL_REQ,
UV_REQ_TYPE_MAX
}
#[repr(C)]
#[deriving(Eq)]
pub enum uv_membership {
UV_LEAVE_GROUP,
UV_JOIN_GROUP
}
pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void {
assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX);
let size = uv_handle_size(handle);
malloc_raw(size as uint) as *c_void
}
pub unsafe fn free_handle(v: *c_void) {
free(v as *mut c_void)
}
pub unsafe fn malloc_req(req: uv_req_type) -> *c_void {
assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX);
let size = uv_req_size(req);
malloc_raw(size as uint) as *c_void
}
pub unsafe fn free_req(v: *c_void) {
free(v as *mut c_void)
}
#[test]
fn handle_sanity_check() {
unsafe {
assert_eq!(UV_HANDLE_TYPE_MAX as uint, rust_uv_handle_type_max());
}
}
#[test]
fn request_sanity_check() {
unsafe {
assert_eq!(UV_REQ_TYPE_MAX as uint, rust_uv_req_type_max());
}
}
// FIXME Event loops ignore SIGPIPE by default.
pub unsafe fn loop_new() -> *c_void {
return rust_uv_loop_new();
}
pub unsafe fn uv_write(req: *uv_write_t,
stream: *uv_stream_t,
buf_in: &[uv_buf_t],
cb: uv_write_cb) -> c_int {
extern {
fn uv_write(req: *uv_write_t, stream: *uv_stream_t,
buf_in: *uv_buf_t, buf_cnt: c_int,
cb: uv_write_cb) -> c_int;
}
let buf_ptr = buf_in.as_ptr();
let buf_cnt = buf_in.len() as i32;
return uv_write(req, stream, buf_ptr, buf_cnt, cb);
}
pub unsafe fn uv_udp_send(req: *uv_udp_send_t,
handle: *uv_udp_t,
buf_in: &[uv_buf_t],
addr: *sockaddr,
cb: uv_udp_send_cb) -> c_int {
extern {
fn uv_udp_send(req: *uv_write_t, stream: *uv_stream_t,
buf_in: *uv_buf_t, buf_cnt: c_int, addr: *sockaddr,
cb: uv_udp_send_cb) -> c_int;
}
let buf_ptr = buf_in.as_ptr();
let buf_cnt = buf_in.len() as i32;
return uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb);
}
pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t {
return rust_uv_get_udp_handle_from_send_req(send_req);
}
pub unsafe fn process_pid(p: *uv_process_t) -> c_int {
return rust_uv_process_pid(p);
}
pub unsafe fn set_stdio_container_flags(c: *uv_stdio_container_t,
flags: libc::c_int) {
rust_set_stdio_container_flags(c, flags);
}
pub unsafe fn set_stdio_container_fd(c: *uv_stdio_container_t,
fd: libc::c_int) {
rust_set_stdio_container_fd(c, fd);
}
pub unsafe fn set_stdio_container_stream(c: *uv_stdio_container_t,
stream: *uv_stream_t) {
rust_set_stdio_container_stream(c, stream);
}
// data access helpers
pub unsafe fn get_result_from_fs_req(req: *uv_fs_t) -> ssize_t {
rust_uv_get_result_from_fs_req(req)
}
pub unsafe fn get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void {
rust_uv_get_ptr_from_fs_req(req)
}
pub unsafe fn get_path_from_fs_req(req: *uv_fs_t) -> *c_char {
rust_uv_get_path_from_fs_req(req)
}
pub unsafe fn get_loop_from_fs_req(req: *uv_fs_t) -> *uv_loop_t {
rust_uv_get_loop_from_fs_req(req)
}
pub unsafe fn get_loop_from_getaddrinfo_req(req: *uv_getaddrinfo_t) -> *uv_loop_t {
rust_uv_get_loop_from_getaddrinfo_req(req)
}
pub unsafe fn get_loop_for_uv_handle<T>(handle: *T) -> *c_void {
return rust_uv_get_loop_for_uv_handle(handle as *c_void);
}
pub unsafe fn get_stream_handle_from_connect_req(connect: *uv_connect_t) -> *uv_stream_t {
return rust_uv_get_stream_handle_from_connect_req(connect);
}
pub unsafe fn get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t {
return rust_uv_get_stream_handle_from_write_req(write_req);
}
pub unsafe fn get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void {
rust_uv_get_data_for_uv_loop(loop_ptr)
}
pub unsafe fn set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void) {
rust_uv_set_data_for_uv_loop(loop_ptr, data);
}
pub unsafe fn get_data_for_uv_handle<T>(handle: *T) -> *c_void {
return rust_uv_get_data_for_uv_handle(handle as *c_void);
}
pub unsafe fn set_data_for_uv_handle<T, U>(handle: *T, data: *U) {
rust_uv_set_data_for_uv_handle(handle as *c_void, data as *c_void);
}
pub unsafe fn get_data_for_req<T>(req: *T) -> *c_void {
return rust_uv_get_data_for_req(req as *c_void);
}
pub unsafe fn set_data_for_req<T, U>(req: *T, data: *U) {
rust_uv_set_data_for_req(req as *c_void, data as *c_void);
}
pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) {
rust_uv_populate_uv_stat(req_in, stat_out)
}
pub unsafe fn guess_handle(handle: c_int) -> c_int {
rust_uv_guess_handle(handle)
}
// uv_support is the result of compiling rust_uv.cpp
//
// Note that this is in a cfg'd block so it doesn't get linked during testing.
// There's a bit of a conundrum when testing in that we're actually assuming
// that the tests are running in a uv loop, but they were created from the
// statically linked uv to the original rustuv crate. When we create the test
// executable, on some platforms if we re-link against uv, it actually creates
// second copies of everything. We obviously don't want this, so instead of
// dying horribly during testing, we allow all of the test rustuv's references
// to get resolved to the original rustuv crate.
#[cfg(not(test))]
#[link(name = "uv_support", kind = "static")]
#[link(name = "uv", kind = "static")]
extern {}
extern {
fn rust_uv_loop_new() -> *c_void;
#[cfg(test)]
fn rust_uv_handle_type_max() -> uintptr_t;
#[cfg(test)]
fn rust_uv_req_type_max() -> uintptr_t;
fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t;
fn rust_uv_populate_uv_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t);
fn rust_uv_get_result_from_fs_req(req: *uv_fs_t) -> ssize_t;
fn rust_uv_get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void;
fn rust_uv_get_path_from_fs_req(req: *uv_fs_t) -> *c_char;
fn rust_uv_get_loop_from_fs_req(req: *uv_fs_t) -> *uv_loop_t;
fn rust_uv_get_loop_from_getaddrinfo_req(req: *uv_fs_t) -> *uv_loop_t;
fn rust_uv_get_stream_handle_from_connect_req(req: *uv_connect_t) -> *uv_stream_t;
fn rust_uv_get_stream_handle_from_write_req(req: *uv_write_t) -> *uv_stream_t;
fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void;
fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void;
fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void);
fn rust_uv_get_data_for_uv_handle(handle: *c_void) -> *c_void;
fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void);
fn rust_uv_get_data_for_req(req: *c_void) -> *c_void;
fn rust_uv_set_data_for_req(req: *c_void, data: *c_void);
fn rust_set_stdio_container_flags(c: *uv_stdio_container_t, flags: c_int);
fn rust_set_stdio_container_fd(c: *uv_stdio_container_t, fd: c_int);
fn rust_set_stdio_container_stream(c: *uv_stdio_container_t,
stream: *uv_stream_t);
fn rust_uv_process_pid(p: *uv_process_t) -> c_int;
fn rust_uv_guess_handle(fd: c_int) -> c_int;
// generic uv functions
pub fn uv_loop_delete(l: *uv_loop_t);
pub fn uv_ref(t: *uv_handle_t);
pub fn uv_unref(t: *uv_handle_t);
pub fn uv_handle_size(ty: uv_handle_type) -> size_t;
pub fn uv_req_size(ty: uv_req_type) -> size_t;
pub fn uv_run(l: *uv_loop_t, mode: uv_run_mode) -> c_int;
pub fn uv_close(h: *uv_handle_t, cb: uv_close_cb);
pub fn uv_walk(l: *uv_loop_t, cb: uv_walk_cb, arg: *c_void);
pub fn uv_buf_init(base: *c_char, len: c_uint) -> uv_buf_t;
pub fn uv_strerror(err: c_int) -> *c_char;
pub fn uv_err_name(err: c_int) -> *c_char;
pub fn uv_listen(s: *uv_stream_t, backlog: c_int,
cb: uv_connection_cb) -> c_int;
pub fn uv_accept(server: *uv_stream_t, client: *uv_stream_t) -> c_int;
pub fn uv_read_start(stream: *uv_stream_t,
on_alloc: uv_alloc_cb,
on_read: uv_read_cb) -> c_int;
pub fn uv_read_stop(stream: *uv_stream_t) -> c_int;
pub fn uv_shutdown(req: *uv_shutdown_t, handle: *uv_stream_t,
cb: uv_shutdown_cb) -> c_int;
// idle bindings
pub fn uv_idle_init(l: *uv_loop_t, i: *uv_idle_t) -> c_int;
pub fn uv_idle_start(i: *uv_idle_t, cb: uv_idle_cb) -> c_int;
pub fn uv_idle_stop(i: *uv_idle_t) -> c_int;
// async bindings
pub fn uv_async_init(l: *uv_loop_t, a: *uv_async_t,
cb: uv_async_cb) -> c_int;
pub fn uv_async_send(a: *uv_async_t);
// tcp bindings
pub fn uv_tcp_init(l: *uv_loop_t, h: *uv_tcp_t) -> c_int;
pub fn uv_tcp_connect(c: *uv_connect_t, h: *uv_tcp_t,
addr: *sockaddr, cb: uv_connect_cb) -> c_int;
pub fn uv_tcp_bind(t: *uv_tcp_t, addr: *sockaddr) -> c_int;
pub fn uv_tcp_nodelay(h: *uv_tcp_t, enable: c_int) -> c_int;
pub fn uv_tcp_keepalive(h: *uv_tcp_t, enable: c_int,
delay: c_uint) -> c_int;
pub fn uv_tcp_simultaneous_accepts(h: *uv_tcp_t, enable: c_int) -> c_int;
pub fn uv_tcp_getsockname(h: *uv_tcp_t, name: *mut sockaddr,
len: *mut c_int) -> c_int;
pub fn uv_tcp_getpeername(h: *uv_tcp_t, name: *mut sockaddr,
len: *mut c_int) -> c_int;
// udp bindings
pub fn uv_udp_init(l: *uv_loop_t, h: *uv_udp_t) -> c_int;
pub fn uv_udp_bind(h: *uv_udp_t, addr: *sockaddr, flags: c_uint) -> c_int;
pub fn uv_udp_recv_start(server: *uv_udp_t,
on_alloc: uv_alloc_cb,
on_recv: uv_udp_recv_cb) -> c_int;
pub fn uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char,
interface_addr: *c_char,
membership: uv_membership) -> c_int;
pub fn uv_udp_recv_stop(server: *uv_udp_t) -> c_int;
pub fn uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int;
pub fn uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int;
pub fn uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int;
pub fn uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int;
pub fn uv_udp_getsockname(h: *uv_udp_t, name: *mut sockaddr,
len: *mut c_int) -> c_int;
// timer bindings
pub fn uv_timer_init(l: *uv_loop_t, t: *uv_timer_t) -> c_int;
pub fn uv_timer_start(t: *uv_timer_t, cb: uv_timer_cb,
timeout: libc::uint64_t,
repeat: libc::uint64_t) -> c_int;
pub fn uv_timer_stop(handle: *uv_timer_t) -> c_int;
// fs operations
pub fn uv_fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char,
flags: c_int, mode: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_write(l: *uv_loop_t, req: *uv_fs_t, fd: c_int,
bufs: *uv_buf_t, nbufs: c_uint,
offset: i64, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_read(l: *uv_loop_t, req: *uv_fs_t, fd: c_int,
bufs: *uv_buf_t, nbufs: c_uint,
offset: i64, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_close(l: *uv_loop_t, req: *uv_fs_t, fd: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_stat(l: *uv_loop_t, req: *uv_fs_t, path: *c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_fstat(l: *uv_loop_t, req: *uv_fs_t, fd: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_mkdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char,
mode: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_rmdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_readdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char,
flags: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_req_cleanup(req: *uv_fs_t);
pub fn uv_fs_fsync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_fdatasync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_ftruncate(handle: *uv_loop_t, req: *uv_fs_t, file: c_int,
offset: i64, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_readlink(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_symlink(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char,
dst: *c_char, flags: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_rename(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char,
dst: *c_char, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_utime(handle: *uv_loop_t, req: *uv_fs_t, path: *c_char,
atime: c_double, mtime: c_double,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_link(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char,
dst: *c_char, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_chown(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char,
uid: uv_uid_t, gid: uv_gid_t, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_chmod(handle: *uv_loop_t, req: *uv_fs_t, path: *c_char,
mode: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_lstat(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char,
cb: uv_fs_cb) -> c_int;
// getaddrinfo
pub fn uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t,
getaddrinfo_cb: uv_getaddrinfo_cb,
node: *c_char, service: *c_char,
hints: *addrinfo) -> c_int;
pub fn uv_freeaddrinfo(ai: *addrinfo);
// process spawning
pub fn uv_spawn(loop_ptr: *uv_loop_t, outptr: *uv_process_t,
options: *uv_process_options_t) -> c_int;
pub fn uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int;
pub fn uv_kill(pid: c_int, signum: c_int) -> c_int;
// pipes
pub fn uv_pipe_init(l: *uv_loop_t, p: *uv_pipe_t, ipc: c_int) -> c_int;
pub fn uv_pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int;
pub fn uv_pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int;
pub fn uv_pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t,
name: *c_char, cb: uv_connect_cb);
// tty
pub fn uv_tty_init(l: *uv_loop_t, tty: *uv_tty_t, fd: c_int,
readable: c_int) -> c_int;
pub fn uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int;
pub fn uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int,
height: *c_int) -> c_int;
// signals
pub fn uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int;
pub fn uv_signal_start(h: *uv_signal_t, cb: uv_signal_cb,
signum: c_int) -> c_int;
pub fn uv_signal_stop(handle: *uv_signal_t) -> c_int;
}
// libuv requires other native libraries on various platforms. These are all
// listed here (for each platform)
// libuv doesn't use pthread on windows
// android libc (bionic) provides pthread, so no additional link is required
#[cfg(not(windows), not(target_os = "android"))]
#[link(name = "pthread")]
extern {}
#[cfg(target_os = "linux")]
#[link(name = "rt")]
extern {}
#[cfg(target_os = "win32")]
#[link(name = "ws2_32")]
#[link(name = "psapi")]
#[link(name = "iphlpapi")]
extern {}
#[cfg(target_os = "freebsd")]
#[link(name = "kvm")]
extern {}
| 36.836207 | 92 | 0.634293 |
561b5dde1ee4742663396f4f5dbbf49a2a32b3ec | 10,377 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CFG {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct XBIASR {
bits: bool,
}
impl XBIASR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct WMODR {
bits: bool,
}
impl WMODR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct BLANKR {
bits: bool,
}
impl BLANKR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct LOCKR {
bits: bool,
}
impl LOCKR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct DUTYR {
bits: u8,
}
impl DUTYR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct FCSTR {
bits: u8,
}
impl FCSTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct NSUR {
bits: u8,
}
impl NSUR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _XBIASW<'a> {
w: &'a mut W,
}
impl<'a> _XBIASW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _WMODW<'a> {
w: &'a mut W,
}
impl<'a> _WMODW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _BLANKW<'a> {
w: &'a mut W,
}
impl<'a> _BLANKW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _LOCKW<'a> {
w: &'a mut W,
}
impl<'a> _LOCKW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _DUTYW<'a> {
w: &'a mut W,
}
impl<'a> _DUTYW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _FCSTW<'a> {
w: &'a mut W,
}
impl<'a> _FCSTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 63;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _NSUW<'a> {
w: &'a mut W,
}
impl<'a> _NSUW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 63;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - External Bias Generation"]
#[inline]
pub fn xbias(&self) -> XBIASR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
XBIASR { bits }
}
#[doc = "Bit 1 - Waveform Mode"]
#[inline]
pub fn wmod(&self) -> WMODR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
WMODR { bits }
}
#[doc = "Bit 2 - Blank LCD"]
#[inline]
pub fn blank(&self) -> BLANKR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
BLANKR { bits }
}
#[doc = "Bit 3 - Lock"]
#[inline]
pub fn lock(&self) -> LOCKR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
LOCKR { bits }
}
#[doc = "Bits 8:9 - Duty Select"]
#[inline]
pub fn duty(&self) -> DUTYR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u8
};
DUTYR { bits }
}
#[doc = "Bits 16:21 - Fine Contrast"]
#[inline]
pub fn fcst(&self) -> FCSTR {
let bits = {
const MASK: u8 = 63;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u8
};
FCSTR { bits }
}
#[doc = "Bits 24:29 - Number of Segment Terminals in Use"]
#[inline]
pub fn nsu(&self) -> NSUR {
let bits = {
const MASK: u8 = 63;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) as u8
};
NSUR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - External Bias Generation"]
#[inline]
pub fn xbias(&mut self) -> _XBIASW {
_XBIASW { w: self }
}
#[doc = "Bit 1 - Waveform Mode"]
#[inline]
pub fn wmod(&mut self) -> _WMODW {
_WMODW { w: self }
}
#[doc = "Bit 2 - Blank LCD"]
#[inline]
pub fn blank(&mut self) -> _BLANKW {
_BLANKW { w: self }
}
#[doc = "Bit 3 - Lock"]
#[inline]
pub fn lock(&mut self) -> _LOCKW {
_LOCKW { w: self }
}
#[doc = "Bits 8:9 - Duty Select"]
#[inline]
pub fn duty(&mut self) -> _DUTYW {
_DUTYW { w: self }
}
#[doc = "Bits 16:21 - Fine Contrast"]
#[inline]
pub fn fcst(&mut self) -> _FCSTW {
_FCSTW { w: self }
}
#[doc = "Bits 24:29 - Number of Segment Terminals in Use"]
#[inline]
pub fn nsu(&mut self) -> _NSUW {
_NSUW { w: self }
}
}
| 24.474057 | 62 | 0.485882 |
6458045a168243b7a3704255d2553a79eb8c3c7c | 8,752 | #[doc = "Reader of register DORMANT_WAKE_INTS1"]
pub type R = crate::R<u32, super::DORMANT_WAKE_INTS1>;
#[doc = "Reader of field `GPIO15_EDGE_HIGH`"]
pub type GPIO15_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO15_EDGE_LOW`"]
pub type GPIO15_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO15_LEVEL_HIGH`"]
pub type GPIO15_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO15_LEVEL_LOW`"]
pub type GPIO15_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO14_EDGE_HIGH`"]
pub type GPIO14_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO14_EDGE_LOW`"]
pub type GPIO14_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO14_LEVEL_HIGH`"]
pub type GPIO14_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO14_LEVEL_LOW`"]
pub type GPIO14_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO13_EDGE_HIGH`"]
pub type GPIO13_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO13_EDGE_LOW`"]
pub type GPIO13_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO13_LEVEL_HIGH`"]
pub type GPIO13_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO13_LEVEL_LOW`"]
pub type GPIO13_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO12_EDGE_HIGH`"]
pub type GPIO12_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO12_EDGE_LOW`"]
pub type GPIO12_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO12_LEVEL_HIGH`"]
pub type GPIO12_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO12_LEVEL_LOW`"]
pub type GPIO12_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO11_EDGE_HIGH`"]
pub type GPIO11_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO11_EDGE_LOW`"]
pub type GPIO11_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO11_LEVEL_HIGH`"]
pub type GPIO11_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO11_LEVEL_LOW`"]
pub type GPIO11_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO10_EDGE_HIGH`"]
pub type GPIO10_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO10_EDGE_LOW`"]
pub type GPIO10_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO10_LEVEL_HIGH`"]
pub type GPIO10_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO10_LEVEL_LOW`"]
pub type GPIO10_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO9_EDGE_HIGH`"]
pub type GPIO9_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO9_EDGE_LOW`"]
pub type GPIO9_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO9_LEVEL_HIGH`"]
pub type GPIO9_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO9_LEVEL_LOW`"]
pub type GPIO9_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO8_EDGE_HIGH`"]
pub type GPIO8_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO8_EDGE_LOW`"]
pub type GPIO8_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO8_LEVEL_HIGH`"]
pub type GPIO8_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO8_LEVEL_LOW`"]
pub type GPIO8_LEVEL_LOW_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 31"]
#[inline(always)]
pub fn gpio15_edge_high(&self) -> GPIO15_EDGE_HIGH_R {
GPIO15_EDGE_HIGH_R::new(((self.bits >> 31) & 0x01) != 0)
}
#[doc = "Bit 30"]
#[inline(always)]
pub fn gpio15_edge_low(&self) -> GPIO15_EDGE_LOW_R {
GPIO15_EDGE_LOW_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 29"]
#[inline(always)]
pub fn gpio15_level_high(&self) -> GPIO15_LEVEL_HIGH_R {
GPIO15_LEVEL_HIGH_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 28"]
#[inline(always)]
pub fn gpio15_level_low(&self) -> GPIO15_LEVEL_LOW_R {
GPIO15_LEVEL_LOW_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 27"]
#[inline(always)]
pub fn gpio14_edge_high(&self) -> GPIO14_EDGE_HIGH_R {
GPIO14_EDGE_HIGH_R::new(((self.bits >> 27) & 0x01) != 0)
}
#[doc = "Bit 26"]
#[inline(always)]
pub fn gpio14_edge_low(&self) -> GPIO14_EDGE_LOW_R {
GPIO14_EDGE_LOW_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 25"]
#[inline(always)]
pub fn gpio14_level_high(&self) -> GPIO14_LEVEL_HIGH_R {
GPIO14_LEVEL_HIGH_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 24"]
#[inline(always)]
pub fn gpio14_level_low(&self) -> GPIO14_LEVEL_LOW_R {
GPIO14_LEVEL_LOW_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 23"]
#[inline(always)]
pub fn gpio13_edge_high(&self) -> GPIO13_EDGE_HIGH_R {
GPIO13_EDGE_HIGH_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 22"]
#[inline(always)]
pub fn gpio13_edge_low(&self) -> GPIO13_EDGE_LOW_R {
GPIO13_EDGE_LOW_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 21"]
#[inline(always)]
pub fn gpio13_level_high(&self) -> GPIO13_LEVEL_HIGH_R {
GPIO13_LEVEL_HIGH_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 20"]
#[inline(always)]
pub fn gpio13_level_low(&self) -> GPIO13_LEVEL_LOW_R {
GPIO13_LEVEL_LOW_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 19"]
#[inline(always)]
pub fn gpio12_edge_high(&self) -> GPIO12_EDGE_HIGH_R {
GPIO12_EDGE_HIGH_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 18"]
#[inline(always)]
pub fn gpio12_edge_low(&self) -> GPIO12_EDGE_LOW_R {
GPIO12_EDGE_LOW_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 17"]
#[inline(always)]
pub fn gpio12_level_high(&self) -> GPIO12_LEVEL_HIGH_R {
GPIO12_LEVEL_HIGH_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 16"]
#[inline(always)]
pub fn gpio12_level_low(&self) -> GPIO12_LEVEL_LOW_R {
GPIO12_LEVEL_LOW_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 15"]
#[inline(always)]
pub fn gpio11_edge_high(&self) -> GPIO11_EDGE_HIGH_R {
GPIO11_EDGE_HIGH_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 14"]
#[inline(always)]
pub fn gpio11_edge_low(&self) -> GPIO11_EDGE_LOW_R {
GPIO11_EDGE_LOW_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 13"]
#[inline(always)]
pub fn gpio11_level_high(&self) -> GPIO11_LEVEL_HIGH_R {
GPIO11_LEVEL_HIGH_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 12"]
#[inline(always)]
pub fn gpio11_level_low(&self) -> GPIO11_LEVEL_LOW_R {
GPIO11_LEVEL_LOW_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 11"]
#[inline(always)]
pub fn gpio10_edge_high(&self) -> GPIO10_EDGE_HIGH_R {
GPIO10_EDGE_HIGH_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 10"]
#[inline(always)]
pub fn gpio10_edge_low(&self) -> GPIO10_EDGE_LOW_R {
GPIO10_EDGE_LOW_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9"]
#[inline(always)]
pub fn gpio10_level_high(&self) -> GPIO10_LEVEL_HIGH_R {
GPIO10_LEVEL_HIGH_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8"]
#[inline(always)]
pub fn gpio10_level_low(&self) -> GPIO10_LEVEL_LOW_R {
GPIO10_LEVEL_LOW_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 7"]
#[inline(always)]
pub fn gpio9_edge_high(&self) -> GPIO9_EDGE_HIGH_R {
GPIO9_EDGE_HIGH_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 6"]
#[inline(always)]
pub fn gpio9_edge_low(&self) -> GPIO9_EDGE_LOW_R {
GPIO9_EDGE_LOW_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5"]
#[inline(always)]
pub fn gpio9_level_high(&self) -> GPIO9_LEVEL_HIGH_R {
GPIO9_LEVEL_HIGH_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4"]
#[inline(always)]
pub fn gpio9_level_low(&self) -> GPIO9_LEVEL_LOW_R {
GPIO9_LEVEL_LOW_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3"]
#[inline(always)]
pub fn gpio8_edge_high(&self) -> GPIO8_EDGE_HIGH_R {
GPIO8_EDGE_HIGH_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2"]
#[inline(always)]
pub fn gpio8_edge_low(&self) -> GPIO8_EDGE_LOW_R {
GPIO8_EDGE_LOW_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1"]
#[inline(always)]
pub fn gpio8_level_high(&self) -> GPIO8_LEVEL_HIGH_R {
GPIO8_LEVEL_HIGH_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0"]
#[inline(always)]
pub fn gpio8_level_low(&self) -> GPIO8_LEVEL_LOW_R {
GPIO8_LEVEL_LOW_R::new((self.bits & 0x01) != 0)
}
}
| 38.218341 | 65 | 0.623972 |
281b537fd0c57e7a233b4d442798a9b72e8b3086 | 39,988 | //! duckdb-rs is an ergonomic wrapper for using DuckDB from Rust. It attempts to
//! expose an interface similar to [rusqlite](https://github.com/rusqlite/rusqlite).
//!
//! ```rust
//! use duckdb::{params, Connection, Result};
//! use arrow::record_batch::RecordBatch;
//! use arrow::util::pretty::print_batches;
//!
//! #[derive(Debug)]
//! struct Person {
//! id: i32,
//! name: String,
//! data: Option<Vec<u8>>,
//! }
//!
//! fn main() -> Result<()> {
//! let conn = Connection::open_in_memory()?;
//!
//! conn.execute_batch(
//! r"CREATE SEQUENCE seq;
//! CREATE TABLE person (
//! id INTEGER PRIMARY KEY DEFAULT NEXTVAL('seq'),
//! name TEXT NOT NULL,
//! data BLOB
//! );
//! ")?;
//! let me = Person {
//! id: 0,
//! name: "Steven".to_string(),
//! data: None,
//! };
//! conn.execute(
//! "INSERT INTO person (name, data) VALUES (?, ?)",
//! params![me.name, me.data],
//! )?;
//!
//! let mut stmt = conn.prepare("SELECT id, name, data FROM person")?;
//! let person_iter = stmt.query_map([], |row| {
//! Ok(Person {
//! id: row.get(0)?,
//! name: row.get(1)?,
//! data: row.get(2)?,
//! })
//! })?;
//!
//! for person in person_iter {
//! println!("Found person {:?}", person.unwrap());
//! }
//!
//! // query table by arrow
//! let rbs: Vec<RecordBatch> = stmt.query_arrow([])?.collect();
//! print_batches(&rbs);
//! Ok(())
//! }
//! ```
#![warn(missing_docs)]
pub use libduckdb_sys as ffi;
use std::cell::RefCell;
use std::convert;
use std::default::Default;
use std::ffi::CString;
use std::fmt;
use std::path::{Path, PathBuf};
use std::result;
use std::str;
use crate::inner_connection::InnerConnection;
use crate::raw_statement::RawStatement;
use crate::types::ValueRef;
pub use crate::appender::Appender;
pub use crate::appender_params::{appender_params_from_iter, AppenderParams, AppenderParamsFromIter};
pub use crate::arrow_batch::Arrow;
pub use crate::column::Column;
pub use crate::config::{AccessMode, Config, DefaultNullOrder, DefaultOrder};
pub use crate::error::Error;
pub use crate::ffi::ErrorCode;
pub use crate::params::{params_from_iter, Params, ParamsFromIter};
pub use crate::row::{AndThenRows, Map, MappedRows, Row, RowIndex, Rows};
pub use crate::statement::Statement;
pub use crate::transaction::{DropBehavior, Savepoint, Transaction, TransactionBehavior};
pub use crate::types::ToSql;
#[macro_use]
mod error;
mod appender;
mod appender_params;
mod arrow_batch;
mod column;
mod config;
mod inner_connection;
mod params;
mod pragma;
mod raw_statement;
mod row;
mod statement;
mod transaction;
pub mod types;
pub(crate) mod util;
/// A macro making it more convenient to pass heterogeneous or long lists of
/// parameters as a `&[&dyn ToSql]`.
///
/// # Example
///
/// ```rust,no_run
/// # use duckdb::{Result, Connection, params};
///
/// struct Person {
/// name: String,
/// age_in_years: u8,
/// data: Option<Vec<u8>>,
/// }
///
/// fn add_person(conn: &Connection, person: &Person) -> Result<()> {
/// conn.execute("INSERT INTO person (name, age_in_years, data)
/// VALUES (?1, ?2, ?3)",
/// params![person.name, person.age_in_years, person.data])?;
/// Ok(())
/// }
/// ```
#[macro_export]
macro_rules! params {
() => {
&[] as &[&dyn $crate::ToSql]
};
($($param:expr),+ $(,)?) => {
&[$(&$param as &dyn $crate::ToSql),+] as &[&dyn $crate::ToSql]
};
}
/// A typedef of the result returned by many methods.
pub type Result<T, E = Error> = result::Result<T, E>;
/// See the [method documentation](#tymethod.optional).
pub trait OptionalExt<T> {
/// Converts a `Result<T>` into a `Result<Option<T>>`.
///
/// By default, duckdb-rs treats 0 rows being returned from a query that is
/// expected to return 1 row as an error. This method will
/// handle that error, and give you back an `Option<T>` instead.
fn optional(self) -> Result<Option<T>>;
}
impl<T> OptionalExt<T> for Result<T> {
fn optional(self) -> Result<Option<T>> {
match self {
Ok(value) => Ok(Some(value)),
Err(Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e),
}
}
}
/// Name for a database within a DuckDB connection.
#[derive(Copy, Clone, Debug)]
pub enum DatabaseName<'a> {
/// The main database.
Main,
/// The temporary database (e.g., any "CREATE TEMPORARY TABLE" tables).
Temp,
/// A database that has been attached via "ATTACH DATABASE ...".
Attached(&'a str),
}
impl<'a> fmt::Display for DatabaseName<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DatabaseName::Main => write!(f, "main"),
DatabaseName::Temp => write!(f, "temp"),
DatabaseName::Attached(s) => write!(f, "{}", s),
}
}
}
/// Shorthand for [`DatabaseName::Main`].
pub const MAIN_DB: DatabaseName<'static> = DatabaseName::Main;
/// Shorthand for [`DatabaseName::Temp`].
pub const TEMP_DB: DatabaseName<'static> = DatabaseName::Temp;
/// A connection to a DuckDB database.
pub struct Connection {
db: RefCell<InnerConnection>,
path: Option<PathBuf>,
}
unsafe impl Send for Connection {}
impl Clone for Connection {
/// Open a new db connection
fn clone(&self) -> Self {
Connection {
db: RefCell::new(self.db.borrow().clone()),
path: self.path.clone(),
}
}
}
impl Connection {
/// Open a new connection to a DuckDB database.
///
/// `Connection::open(path)` is equivalent to
/// `Connection::open_with_flags(path,
/// Config::default())`.
///
/// ```rust,no_run
/// # use duckdb::{Connection, Result};
/// fn open_my_db() -> Result<()> {
/// let path = "./my_db.db3";
/// let db = Connection::open(&path)?;
/// println!("{}", db.is_autocommit());
/// Ok(())
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if `path` cannot be converted to a C-compatible
/// string or if the underlying DuckDB open call fails.
#[inline]
pub fn open<P: AsRef<Path>>(path: P) -> Result<Connection> {
Connection::open_with_flags(path, Config::default())
}
/// Open a new connection to an in-memory DuckDB database.
///
/// # Failure
///
/// Will return `Err` if the underlying DuckDB open call fails.
#[inline]
pub fn open_in_memory() -> Result<Connection> {
Connection::open_in_memory_with_flags(Config::default())
}
/// Open a new connection to a DuckDB database.
///
/// # Failure
///
/// Will return `Err` if `path` cannot be converted to a C-compatible
/// string or if the underlying DuckDB open call fails.
#[inline]
pub fn open_with_flags<P: AsRef<Path>>(path: P, config: Config) -> Result<Connection> {
#[cfg(unix)]
fn path_to_cstring(p: &Path) -> Result<CString> {
use std::os::unix::ffi::OsStrExt;
Ok(CString::new(p.as_os_str().as_bytes())?)
}
#[cfg(not(unix))]
fn path_to_cstring(p: &Path) -> Result<CString> {
let s = p.to_str().ok_or_else(|| Error::InvalidPath(p.to_owned()))?;
Ok(CString::new(s)?)
}
let c_path = path_to_cstring(path.as_ref())?;
InnerConnection::open_with_flags(&c_path, config).map(|db| Connection {
db: RefCell::new(db),
path: Some(path.as_ref().to_path_buf()),
})
}
/// Open a new connection to an in-memory DuckDB database.
///
/// # Failure
///
/// Will return `Err` if the underlying DuckDB open call fails.
#[inline]
pub fn open_in_memory_with_flags(config: Config) -> Result<Connection> {
Connection::open_with_flags(":memory:", config)
}
/// Convenience method to run multiple SQL statements (that cannot take any
/// parameters).
///
/// ## Example
///
/// ```rust,no_run
/// # use duckdb::{Connection, Result};
/// fn create_tables(conn: &Connection) -> Result<()> {
/// conn.execute_batch("BEGIN;
/// CREATE TABLE foo(x INTEGER);
/// CREATE TABLE bar(y TEXT);
/// COMMIT;",
/// )
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if `sql` cannot be converted to a C-compatible string
/// or if the underlying DuckDB call fails.
pub fn execute_batch(&self, sql: &str) -> Result<()> {
self.db.borrow_mut().execute(sql)
}
/// Convenience method to prepare and execute a single SQL statement.
///
/// On success, returns the number of rows that were changed or inserted or
/// deleted.
///
/// ## Example
///
/// ### With params
///
/// ```rust,no_run
/// # use duckdb::{Connection};
/// fn update_rows(conn: &Connection) {
/// match conn.execute("UPDATE foo SET bar = 'baz' WHERE qux = ?", [1i32]) {
/// Ok(updated) => println!("{} rows were updated", updated),
/// Err(err) => println!("update failed: {}", err),
/// }
/// }
/// ```
///
/// ### With params of varying types
///
/// ```rust,no_run
/// # use duckdb::{Connection, params};
/// fn update_rows(conn: &Connection) {
/// match conn.execute("UPDATE foo SET bar = ? WHERE qux = ?", params![&"baz", 1i32]) {
/// Ok(updated) => println!("{} rows were updated", updated),
/// Err(err) => println!("update failed: {}", err),
/// }
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if `sql` cannot be converted to a C-compatible string
/// or if the underlying DuckDB call fails.
#[inline]
pub fn execute<P: Params>(&self, sql: &str, params: P) -> Result<usize> {
self.prepare(sql).and_then(|mut stmt| stmt.execute(params))
}
/// Returns the path to the database file, if one exists and is known.
#[inline]
pub fn path(&self) -> Option<&Path> {
self.path.as_deref()
}
/// Convenience method to execute a query that is expected to return a
/// single row.
///
/// ## Example
///
/// ```rust,no_run
/// # use duckdb::{Result, Connection};
/// fn preferred_locale(conn: &Connection) -> Result<String> {
/// conn.query_row(
/// "SELECT value FROM preferences WHERE name='locale'",
/// [],
/// |row| row.get(0),
/// )
/// }
/// ```
///
/// If the query returns more than one row, all rows except the first are
/// ignored.
///
/// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the
/// query truly is optional, you can call `.optional()` on the result of
/// this to get a `Result<Option<T>>`.
///
/// # Failure
///
/// Will return `Err` if `sql` cannot be converted to a C-compatible string
/// or if the underlying DuckDB call fails.
#[inline]
pub fn query_row<T, P, F>(&self, sql: &str, params: P, f: F) -> Result<T>
where
P: Params,
F: FnOnce(&Row<'_>) -> Result<T>,
{
self.prepare(sql)?.query_row(params, f)
}
/// Convenience method to execute a query that is expected to return a
/// single row, and execute a mapping via `f` on that returned row with
/// the possibility of failure. The `Result` type of `f` must implement
/// `std::convert::From<Error>`.
///
/// ## Example
///
/// ```rust,no_run
/// # use duckdb::{Result, Connection};
/// fn preferred_locale(conn: &Connection) -> Result<String> {
/// conn.query_row_and_then(
/// "SELECT value FROM preferences WHERE name='locale'",
/// [],
/// |row| row.get(0),
/// )
/// }
/// ```
///
/// If the query returns more than one row, all rows except the first are
/// ignored.
///
/// # Failure
///
/// Will return `Err` if `sql` cannot be converted to a C-compatible string
/// or if the underlying DuckDB call fails.
#[inline]
pub fn query_row_and_then<T, E, P, F>(&self, sql: &str, params: P, f: F) -> Result<T, E>
where
P: Params,
F: FnOnce(&Row<'_>) -> Result<T, E>,
E: convert::From<Error>,
{
self.prepare(sql)?
.query(params)?
.get_expected_row()
.map_err(E::from)
.and_then(f)
}
/// Prepare a SQL statement for execution.
///
/// ## Example
///
/// ```rust,no_run
/// # use duckdb::{Connection, Result};
/// fn insert_new_people(conn: &Connection) -> Result<()> {
/// let mut stmt = conn.prepare("INSERT INTO People (name) VALUES (?)")?;
/// stmt.execute(["Joe Smith"])?;
/// stmt.execute(["Bob Jones"])?;
/// Ok(())
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if `sql` cannot be converted to a C-compatible string
/// or if the underlying DuckDB call fails.
#[inline]
pub fn prepare(&self, sql: &str) -> Result<Statement<'_>> {
self.db.borrow_mut().prepare(self, sql)
}
/// Create an Appender for fast import data
/// default to use `DatabaseName::Main`
///
/// ## Example
///
/// ```rust,no_run
/// # use duckdb::{Connection, Result, params};
/// fn insert_rows(conn: &Connection) -> Result<()> {
/// let mut app = conn.appender("foo")?;
/// app.append_rows([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])?;
/// Ok(())
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if `table` not exists
pub fn appender(&self, table: &str) -> Result<Appender<'_>> {
self.appender_to_db(table, &DatabaseName::Main.to_string())
}
/// Create an Appender for fast import data
///
/// ## Example
///
/// ```rust,no_run
/// # use duckdb::{Connection, Result, params, DatabaseName};
/// fn insert_rows(conn: &Connection) -> Result<()> {
/// let mut app = conn.appender_to_db("foo", &DatabaseName::Main.to_string())?;
/// app.append_rows([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])?;
/// Ok(())
/// }
/// ```
///
/// # Failure
///
/// Will return `Err` if `table` not exists
pub fn appender_to_db(&self, table: &str, schema: &str) -> Result<Appender<'_>> {
self.db.borrow_mut().appender(self, table, schema)
}
/// Close the DuckDB connection.
///
/// This is functionally equivalent to the `Drop` implementation for
/// `Connection` except that on failure, it returns an error and the
/// connection itself (presumably so closing can be attempted again).
///
/// # Failure
///
/// Will return `Err` if the underlying DuckDB call fails.
#[inline]
pub fn close(self) -> Result<(), (Connection, Error)> {
let r = self.db.borrow_mut().close();
r.map_err(move |err| (self, err))
}
/// Test for auto-commit mode.
/// Autocommit mode is on by default.
#[inline]
pub fn is_autocommit(&self) -> bool {
self.db.borrow().is_autocommit()
}
}
impl fmt::Debug for Connection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").field("path", &self.path).finish()
}
}
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
#[cfg(test)]
mod test {
use super::*;
use std::error::Error as StdError;
use std::fmt;
use arrow::array::Int32Array;
use arrow::record_batch::RecordBatch;
use fallible_iterator::FallibleIterator;
// this function is never called, but is still type checked; in
// particular, calls with specific instantiations will require
// that those types are `Send`.
#[allow(dead_code, unconditional_recursion)]
fn ensure_send<T: Send>() {
ensure_send::<Connection>();
}
pub fn checked_memory_handle() -> Connection {
Connection::open_in_memory().unwrap()
}
#[test]
fn test_params_of_vary_types() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(bar TEXT, qux INTEGER);
INSERT INTO foo VALUES ('baz', 1), ('baz', 2), ('baz', 3);
END;";
db.execute_batch(sql)?;
let changed = db.execute("UPDATE foo SET qux = ? WHERE bar = ?", params![1i32, &"baz"])?;
assert_eq!(changed, 3);
Ok(())
}
#[test]
fn test_concurrent_transactions_busy_commit() -> Result<()> {
let tmp = tempfile::tempdir().unwrap();
let path = tmp.path().join("transactions.db3");
Connection::open(&path)?.execute_batch(
"
BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(42);
END;",
)?;
let mut db1 =
Connection::open_with_flags(&path, Config::default().access_mode(config::AccessMode::ReadWrite)?)?;
let mut db2 =
Connection::open_with_flags(&path, Config::default().access_mode(config::AccessMode::ReadWrite)?)?;
{
let tx1 = db1.transaction()?;
let tx2 = db2.transaction()?;
// SELECT first makes sqlite lock with a shared lock
tx1.query_row("SELECT x FROM foo LIMIT 1", [], |_| Ok(()))?;
tx2.query_row("SELECT x FROM foo LIMIT 1", [], |_| Ok(()))?;
tx1.execute("INSERT INTO foo VALUES(?1)", [1])?;
let _ = tx2.execute("INSERT INTO foo VALUES(?1)", [2]);
let _ = tx1.commit();
let _ = tx2.commit();
}
let _ = db1.transaction().expect("commit should have closed transaction");
let _ = db2.transaction().expect("commit should have closed transaction");
Ok(())
}
#[test]
fn test_persistence() -> Result<()> {
let temp_dir = tempfile::tempdir().unwrap();
let path = temp_dir.path().join("test.db3");
{
let db = Connection::open(&path)?;
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(42);
END;";
db.execute_batch(sql)?;
}
let path_string = path.to_str().unwrap();
let db = Connection::open(&path_string)?;
let the_answer: Result<i64> = db.query_row("SELECT x FROM foo", [], |r| r.get(0));
assert_eq!(42i64, the_answer?);
Ok(())
}
#[test]
fn test_open() {
let con = Connection::open_in_memory();
if con.is_err() {
panic!("open error {}", con.unwrap_err());
}
assert!(Connection::open_in_memory().is_ok());
let db = checked_memory_handle();
assert!(db.close().is_ok());
let _ = checked_memory_handle();
let _ = checked_memory_handle();
}
#[test]
fn test_open_failure() -> Result<()> {
let filename = "no_such_file.db";
let result =
Connection::open_with_flags(filename, Config::default().access_mode(config::AccessMode::ReadOnly)?);
assert!(!result.is_ok());
let err = result.err().unwrap();
if let Error::DuckDBFailure(_e, Some(msg)) = err {
// TODO: update error code
// assert_eq!(ErrorCode::CannotOpen, e.code);
assert!(
msg.contains(filename),
"error message '{}' does not contain '{}'",
msg,
filename
);
} else {
panic!("DuckDBFailure expected");
}
Ok(())
}
#[cfg(unix)]
#[test]
fn test_invalid_unicode_file_names() -> Result<()> {
use std::ffi::OsStr;
use std::fs::File;
use std::os::unix::ffi::OsStrExt;
let temp_dir = tempfile::tempdir().unwrap();
let path = temp_dir.path();
if File::create(path.join(OsStr::from_bytes(&[0xFE]))).is_err() {
// Skip test, filesystem doesn't support invalid Unicode
return Ok(());
}
let db_path = path.join(OsStr::from_bytes(&[0xFF]));
{
let db = Connection::open(&db_path)?;
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(42);
END;";
db.execute_batch(sql)?;
}
let db = Connection::open(&db_path)?;
let the_answer: Result<i64> = db.query_row("SELECT x FROM foo", [], |r| r.get(0));
assert_eq!(42i64, the_answer?);
Ok(())
}
#[test]
fn test_close_always_ok() -> Result<()> {
let db = checked_memory_handle();
// TODO: prepare a query but not execute it
db.close().unwrap();
Ok(())
}
#[test]
fn test_execute_batch() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(1);
INSERT INTO foo VALUES(2);
INSERT INTO foo VALUES(3);
INSERT INTO foo VALUES(4);
END;";
db.execute_batch(sql)?;
db.execute_batch("UPDATE foo SET x = 3 WHERE x < 3")?;
assert!(db.execute_batch("INVALID SQL").is_err());
Ok(())
}
#[test]
fn test_execute_single() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER)")?;
assert_eq!(
3,
db.execute("INSERT INTO foo(x) VALUES (?), (?), (?)", [1i32, 2i32, 3i32])?
);
assert_eq!(1, db.execute("INSERT INTO foo(x) VALUES (?)", [4i32])?);
assert_eq!(
10i32,
db.query_row::<i32, _, _>("SELECT SUM(x) FROM foo", [], |r| r.get(0))?
);
Ok(())
}
#[test]
fn test_prepare_column_names() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER);")?;
let mut stmt = db.prepare("SELECT * FROM foo")?;
stmt.execute([])?;
assert_eq!(stmt.column_count(), 1);
assert_eq!(stmt.column_names(), vec!["x"]);
let mut stmt = db.prepare("SELECT x AS a, x AS b FROM foo")?;
stmt.execute([])?;
assert_eq!(stmt.column_count(), 2);
assert_eq!(stmt.column_names(), vec!["a", "b"]);
Ok(())
}
#[test]
fn test_prepare_execute() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER);")?;
let mut insert_stmt = db.prepare("INSERT INTO foo(x) VALUES(?)")?;
assert_eq!(insert_stmt.execute([1i32])?, 1);
assert_eq!(insert_stmt.execute([2i32])?, 1);
assert_eq!(insert_stmt.execute([3i32])?, 1);
assert!(insert_stmt.execute(["hello"]).is_err());
// NOTE: can't execute on errored stmt
// assert!(insert_stmt.execute(["goodbye"]).is_err());
// assert!(insert_stmt.execute([types::Null]).is_err());
let mut update_stmt = db.prepare("UPDATE foo SET x=? WHERE x<?")?;
assert_eq!(update_stmt.execute([3i32, 3i32])?, 2);
assert_eq!(update_stmt.execute([3i32, 3i32])?, 0);
assert_eq!(update_stmt.execute([8i32, 8i32])?, 3);
Ok(())
}
#[test]
fn test_prepare_query() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER);")?;
let mut insert_stmt = db.prepare("INSERT INTO foo(x) VALUES(?)")?;
assert_eq!(insert_stmt.execute([1i32])?, 1);
assert_eq!(insert_stmt.execute([2i32])?, 1);
assert_eq!(insert_stmt.execute([3i32])?, 1);
let mut query = db.prepare("SELECT x FROM foo WHERE x < ? ORDER BY x DESC")?;
{
let mut rows = query.query([4i32])?;
let mut v = Vec::<i32>::new();
while let Some(row) = rows.next()? {
v.push(row.get(0)?);
}
assert_eq!(v, [3i32, 2, 1]);
}
{
let mut rows = query.query([3i32])?;
let mut v = Vec::<i32>::new();
while let Some(row) = rows.next()? {
v.push(row.get(0)?);
}
assert_eq!(v, [2i32, 1]);
}
Ok(())
}
#[test]
fn test_query_map() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
INSERT INTO foo VALUES(3, ', ');
INSERT INTO foo VALUES(2, 'world');
INSERT INTO foo VALUES(1, '!');
END;";
db.execute_batch(sql)?;
let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC")?;
let results: Result<Vec<String>> = query.query([])?.map(|row| row.get(1)).collect();
assert_eq!(results?.concat(), "hello, world!");
Ok(())
}
#[test]
fn test_query_row() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(1);
INSERT INTO foo VALUES(2);
INSERT INTO foo VALUES(3);
INSERT INTO foo VALUES(4);
END;";
db.execute_batch(sql)?;
assert_eq!(
10i64,
db.query_row::<i64, _, _>("SELECT SUM(x) FROM foo", [], |r| r.get(0))?
);
let result: Result<i64> = db.query_row("SELECT x FROM foo WHERE x > 5", [], |r| r.get(0));
match result.unwrap_err() {
Error::QueryReturnedNoRows => (),
err => panic!("Unexpected error {}", err),
}
let bad_query_result = db.query_row("NOT A PROPER QUERY; test123", [], |_| Ok(()));
assert!(bad_query_result.is_err());
Ok(())
}
#[test]
fn test_optional() -> Result<()> {
let db = checked_memory_handle();
let result: Result<i64> = db.query_row("SELECT 1 WHERE 0 <> 0", [], |r| r.get(0));
let result = result.optional();
match result? {
None => (),
_ => panic!("Unexpected result"),
}
let result: Result<i64> = db.query_row("SELECT 1 WHERE 0 == 0", [], |r| r.get(0));
let result = result.optional();
match result? {
Some(1) => (),
_ => panic!("Unexpected result"),
}
let bad_query_result: Result<i64> = db.query_row("NOT A PROPER QUERY", [], |r| r.get(0));
let bad_query_result = bad_query_result.optional();
assert!(bad_query_result.is_err());
Ok(())
}
#[test]
fn test_prepare_failures() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER);")?;
let _ = db.prepare("SELECT * FROM does_not_exist").unwrap_err();
// assert!(format!("{}", err).contains("does_not_exist"));
Ok(())
}
#[test]
fn test_is_autocommit() {
let db = checked_memory_handle();
assert!(db.is_autocommit(), "autocommit expected to be active by default");
}
#[test]
#[ignore = "not supported"]
fn test_statement_debugging() -> Result<()> {
let db = checked_memory_handle();
let query = "SELECT 12345";
let stmt = db.prepare(query)?;
assert!(format!("{:?}", stmt).contains(query));
Ok(())
}
#[test]
fn test_notnull_constraint_error() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x TEXT NOT NULL)")?;
let result = db.execute("INSERT INTO foo (x) VALUES (NULL)", []);
assert!(result.is_err());
match result.unwrap_err() {
Error::DuckDBFailure(err, _) => {
// TODO(wangfenjin): Update errorcode
assert_eq!(err.code, ErrorCode::Unknown);
}
err => panic!("Unexpected error {}", err),
}
Ok(())
}
#[test]
fn test_clone() -> Result<()> {
let owned_con = checked_memory_handle();
{
let cloned_con = owned_con.clone();
cloned_con.execute_batch("PRAGMA VERSION")?;
}
owned_con.close().unwrap();
Ok(())
}
mod query_and_then_tests {
use super::*;
#[derive(Debug)]
enum CustomError {
SomeError,
Sqlite(Error),
}
impl fmt::Display for CustomError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match *self {
CustomError::SomeError => write!(f, "my custom error"),
CustomError::Sqlite(ref se) => write!(f, "my custom error: {}", se),
}
}
}
impl StdError for CustomError {
fn description(&self) -> &str {
"my custom error"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
CustomError::SomeError => None,
CustomError::Sqlite(ref se) => Some(se),
}
}
}
impl From<Error> for CustomError {
fn from(se: Error) -> CustomError {
CustomError::Sqlite(se)
}
}
type CustomResult<T> = Result<T, CustomError>;
#[test]
fn test_query_and_then() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
INSERT INTO foo VALUES(3, ', ');
INSERT INTO foo VALUES(2, 'world');
INSERT INTO foo VALUES(1, '!');
END;";
db.execute_batch(sql)?;
let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC")?;
let results: Result<Vec<String>> = query.query_and_then([], |row| row.get(1))?.collect();
assert_eq!(results?.concat(), "hello, world!");
Ok(())
}
#[test]
fn test_query_and_then_fails() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
INSERT INTO foo VALUES(3, ', ');
INSERT INTO foo VALUES(2, 'world');
INSERT INTO foo VALUES(1, '!');
END;";
db.execute_batch(sql)?;
let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC")?;
let bad_type: Result<Vec<f64>> = query.query_and_then([], |row| row.get(1))?.collect();
match bad_type.unwrap_err() {
Error::InvalidColumnType(..) => (),
err => panic!("Unexpected error {}", err),
}
let bad_idx: Result<Vec<String>> = query.query_and_then([], |row| row.get(3))?.collect();
match bad_idx.unwrap_err() {
Error::InvalidColumnIndex(_) => (),
err => panic!("Unexpected error {}", err),
}
Ok(())
}
#[test]
fn test_query_and_then_custom_error() -> CustomResult<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
INSERT INTO foo VALUES(3, ', ');
INSERT INTO foo VALUES(2, 'world');
INSERT INTO foo VALUES(1, '!');
END;";
db.execute_batch(sql)?;
let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC")?;
let results: CustomResult<Vec<String>> = query
.query_and_then([], |row| row.get(1).map_err(CustomError::Sqlite))?
.collect();
assert_eq!(results?.concat(), "hello, world!");
Ok(())
}
#[test]
fn test_query_and_then_custom_error_fails() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
INSERT INTO foo VALUES(3, ', ');
INSERT INTO foo VALUES(2, 'world');
INSERT INTO foo VALUES(1, '!');
END;";
db.execute_batch(sql)?;
let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC")?;
let bad_type: CustomResult<Vec<f64>> = query
.query_and_then([], |row| row.get(1).map_err(CustomError::Sqlite))?
.collect();
match bad_type.unwrap_err() {
CustomError::Sqlite(Error::InvalidColumnType(..)) => (),
err => panic!("Unexpected error {}", err),
}
let bad_idx: CustomResult<Vec<String>> = query
.query_and_then([], |row| row.get(3).map_err(CustomError::Sqlite))?
.collect();
match bad_idx.unwrap_err() {
CustomError::Sqlite(Error::InvalidColumnIndex(_)) => (),
err => panic!("Unexpected error {}", err),
}
let non_sqlite_err: CustomResult<Vec<String>> =
query.query_and_then([], |_| Err(CustomError::SomeError))?.collect();
match non_sqlite_err.unwrap_err() {
CustomError::SomeError => (),
err => panic!("Unexpected error {}", err),
}
Ok(())
}
#[test]
fn test_query_row_and_then_custom_error() -> CustomResult<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
END;";
db.execute_batch(sql)?;
let query = "SELECT x, y FROM foo ORDER BY x DESC";
let results: CustomResult<String> =
db.query_row_and_then(query, [], |row| row.get(1).map_err(CustomError::Sqlite));
assert_eq!(results?, "hello");
Ok(())
}
#[test]
fn test_query_row_and_then_custom_error_fails() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
END;";
db.execute_batch(sql)?;
let query = "SELECT x, y FROM foo ORDER BY x DESC";
let bad_type: CustomResult<f64> =
db.query_row_and_then(query, [], |row| row.get(1).map_err(CustomError::Sqlite));
match bad_type.unwrap_err() {
CustomError::Sqlite(Error::InvalidColumnType(..)) => (),
err => panic!("Unexpected error {}", err),
}
let bad_idx: CustomResult<String> =
db.query_row_and_then(query, [], |row| row.get(3).map_err(CustomError::Sqlite));
match bad_idx.unwrap_err() {
CustomError::Sqlite(Error::InvalidColumnIndex(_)) => (),
err => panic!("Unexpected error {}", err),
}
let non_sqlite_err: CustomResult<String> =
db.query_row_and_then(query, [], |_| Err(CustomError::SomeError));
match non_sqlite_err.unwrap_err() {
CustomError::SomeError => (),
err => panic!("Unexpected error {}", err),
}
Ok(())
}
}
#[test]
fn test_dynamic() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER, y TEXT);
INSERT INTO foo VALUES(4, 'hello');
END;";
db.execute_batch(sql)?;
db.query_row("SELECT * FROM foo", [], |r| {
assert_eq!(2, r.as_ref().column_count());
Ok(())
})
}
#[test]
fn test_dyn_box() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE foo(x INTEGER);")?;
let b: Box<dyn ToSql> = Box::new(5);
db.execute("INSERT INTO foo VALUES(?)", [b])?;
db.query_row("SELECT x FROM foo", [], |r| {
assert_eq!(5, r.get_unwrap::<_, i32>(0));
Ok(())
})
}
#[test]
fn test_alter_table() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("CREATE TABLE x(t INTEGER);")?;
// `execute_batch` should be used but `execute` should also work
db.execute("ALTER TABLE x RENAME TO y;", [])?;
Ok(())
}
#[test]
fn test_query_arrow_record_batch_small() -> Result<()> {
let db = checked_memory_handle();
let sql = "BEGIN TRANSACTION;
CREATE TABLE test(t INTEGER);
INSERT INTO test VALUES (1); INSERT INTO test VALUES (2); INSERT INTO test VALUES (3); INSERT INTO test VALUES (4); INSERT INTO test VALUES (5);
END TRANSACTION;";
db.execute_batch(sql)?;
let mut stmt = db.prepare("select t from test order by t desc")?;
let mut arr = stmt.query_arrow([])?;
let rb = arr.next().unwrap();
let column = rb.column(0).as_any().downcast_ref::<Int32Array>().unwrap();
assert_eq!(column.len(), 5);
assert_eq!(column.value(0), 5);
assert_eq!(column.value(1), 4);
assert_eq!(column.value(2), 3);
assert_eq!(column.value(3), 2);
assert_eq!(column.value(4), 1);
assert!(arr.next().is_none());
Ok(())
}
#[test]
fn test_query_arrow_record_batch_large() -> Result<()> {
let db = checked_memory_handle();
db.execute_batch("BEGIN TRANSACTION")?;
db.execute_batch("CREATE TABLE test(t INTEGER);")?;
for _ in 0..300 {
db.execute_batch("INSERT INTO test VALUES (1); INSERT INTO test VALUES (2); INSERT INTO test VALUES (3); INSERT INTO test VALUES (4); INSERT INTO test VALUES (5);")?;
}
db.execute_batch("END TRANSACTION")?;
let rbs: Vec<RecordBatch> = db.prepare("select t from test order by t")?.query_arrow([])?.collect();
assert_eq!(rbs.len(), 2);
assert_eq!(rbs.iter().map(|rb| rb.num_rows()).sum::<usize>(), 1500);
assert_eq!(
rbs.iter()
.map(|rb| rb
.column(0)
.as_any()
.downcast_ref::<Int32Array>()
.unwrap()
.iter()
.map(|i| i.unwrap())
.sum::<i32>())
.sum::<i32>(),
4500
);
Ok(())
}
#[test]
fn test_database_name_to_string() -> Result<()> {
assert_eq!(DatabaseName::Main.to_string(), "main");
assert_eq!(DatabaseName::Temp.to_string(), "temp");
assert_eq!(DatabaseName::Attached("abc").to_string(), "abc");
Ok(())
}
}
| 32.563518 | 178 | 0.520756 |
f56d4a31820cc13aa1fa8ed04faa199bb6694f0e | 1,912 | use super::routing;
use super::server;
use super::{Request, Response};
use hyper::http::Extensions;
use hyper::Body;
use std::future::Future;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
pub struct ResponderFactory {
router: Arc<routing::Router<Request, Response>>,
}
impl ResponderFactory {
pub fn with_router(router: routing::Router<Request, Response>) -> Self {
Self {
router: Arc::new(router),
}
}
pub fn and_extensions(self, extensions: Extensions) -> ResponderFactorySecondPart {
ResponderFactorySecondPart {
router: self.router,
extensions: Arc::new(extensions),
}
}
}
pub struct ResponderFactorySecondPart {
router: Arc<routing::Router<Request, Response>>,
extensions: Arc<Extensions>,
}
impl server::ResponderFactory for ResponderFactorySecondPart {
type Responder = Responder;
fn make_responder(&self, remote_addr: SocketAddr) -> Self::Responder {
Responder {
remote_addr,
router: self.router.clone(),
extensions: self.extensions.clone(),
}
}
}
pub struct Responder {
remote_addr: SocketAddr,
router: Arc<routing::Router<Request, Response>>,
extensions: Arc<Extensions>,
}
impl server::Responder for Responder {
type ResponseFuture = Pin<Box<dyn Future<Output = hyper::Response<Body>> + Send>>;
fn response(&mut self, http_request: hyper::Request<Body>) -> Self::ResponseFuture {
let remote_addr = self.remote_addr;
let router = self.router.clone();
let extensions = self.extensions.clone();
Box::pin(async move {
let request = Request {
remote_addr,
extensions,
http: http_request,
};
let response = router.process(request).await;
response.http
})
}
}
| 26.929577 | 88 | 0.624477 |
8ae94334ea0290fb49983d8aa93a6ff72c84bf9a | 6,958 | use std::fmt::{Display, Error, Formatter};
use std::slice::Iter;
use std::str::FromStr;
use std::{ops, write};
#[derive(Clone)]
enum Expr {
Var,
Num(f64),
Add(Box<Expr>, Box<Expr>),
Sub(Box<Expr>, Box<Expr>),
Mul(Box<Expr>, Box<Expr>),
Div(Box<Expr>, Box<Expr>),
Pow(Box<Expr>, Box<Expr>),
Cos(Box<Expr>),
Sin(Box<Expr>),
Tan(Box<Expr>),
Exp(Box<Expr>),
Ln(Box<Expr>),
}
impl FromStr for Expr {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
fn parse(iter: &mut Iter<u8>) -> Expr {
while let Some(c) = iter.next() {
match c {
b'x' => return Expr::Var,
b'0'..=b'9' => {
let mut num = (c - b'0') as i64;
while let Some(c @ b'0'..=b'9') = iter.next() {
num = num * 10 + (c - b'0') as i64;
}
return Expr::Num(num as f64);
}
b'+' => return parse(iter) + parse(iter),
b'-' => {
if let Some(c @ b'0'..=b'9') = iter.next() {
let mut num = (c - b'0') as i64;
while let Some(c @ b'0'..=b'9') = iter.next() {
num = num * 10 + (c - b'0') as i64;
}
return Expr::Num(-num as f64);
} else {
return parse(iter) - parse(iter);
}
}
b'*' => return parse(iter) * parse(iter),
b'/' => return parse(iter) / parse(iter),
b'^' => return parse(iter).pow(parse(iter)),
b'c' => {
iter.next();
iter.next();
return parse(iter).cos();
}
b's' => {
iter.next();
iter.next();
return parse(iter).sin();
}
b't' => {
iter.next();
iter.next();
return parse(iter).tan();
}
b'e' => {
iter.next();
iter.next();
return parse(iter).exp();
}
b'l' => {
iter.next();
return parse(iter).ln();
}
_ => continue,
}
}
unreachable!()
}
Ok(parse(&mut s.as_bytes().into_iter()))
}
}
impl Display for Expr {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
Expr::Var => write!(f, "x"),
Expr::Num(n) => write!(f, "{}", n),
Expr::Add(a, b) => write!(f, "(+ {} {})", a, b),
Expr::Sub(a, b) => write!(f, "(- {} {})", a, b),
Expr::Mul(a, b) => write!(f, "(* {} {})", a, b),
Expr::Div(a, b) => write!(f, "(/ {} {})", a, b),
Expr::Pow(a, b) => write!(f, "(^ {} {})", a, b),
Expr::Cos(a) => write!(f, "(cos {})", a),
Expr::Sin(a) => write!(f, "(sin {})", a),
Expr::Tan(a) => write!(f, "(tan {})", a),
Expr::Exp(a) => write!(f, "(exp {})", a),
Expr::Ln(a) => write!(f, "(ln {})", a),
}
}
}
impl ops::Add for Expr {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Expr::Num(a), Expr::Num(b)) => Expr::Num(a + b),
(a, b) if a.eq_f64(0.) => b,
(a, b) if b.eq_f64(0.) => a,
(a, b) => Expr::Add(Box::new(a), Box::new(b)),
}
}
}
impl ops::Sub for Expr {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Expr::Num(a), Expr::Num(b)) => Expr::Num(a - b),
(a, b) if b.eq_f64(0.) => a,
(a, b) => Expr::Sub(Box::new(a), Box::new(b)),
}
}
}
impl ops::Mul for Expr {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Expr::Num(a), Expr::Num(b)) => Expr::Num(a * b),
(a, b) if a.eq_f64(0.) || b.eq_f64(1.) => a,
(a, b) if a.eq_f64(1.) || b.eq_f64(0.) => b,
(a, b) => Expr::Mul(Box::new(a), Box::new(b)),
}
}
}
impl ops::Div for Expr {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Expr::Num(a), Expr::Num(b)) => Expr::Num(a / b),
(a, b) if a.eq_f64(0.) || b.eq_f64(1.) => a,
(a, b) => Expr::Div(Box::new(a), Box::new(b)),
}
}
}
impl ops::Neg for Expr {
type Output = Self;
fn neg(self) -> Self::Output {
Expr::Mul(Box::new(Expr::Num(-1.)), Box::new(self))
}
}
impl Expr {
fn pow(self, rhs: Self) -> Self {
match (self, rhs) {
(Expr::Num(a), Expr::Num(b)) => Expr::Num(a.powf(b)),
(_, b) if b.eq_f64(0.) => Expr::Num(1.),
(a, b) if a.eq_f64(1.) || b.eq_f64(1.) => a,
(a, b) => Expr::Pow(Box::new(a), Box::new(b)),
}
}
fn powf(self, rhs: f64) -> Self {
self.pow(Expr::Num(rhs))
}
fn cos(self) -> Self {
Expr::Cos(Box::new(self))
}
fn sin(self) -> Self {
Expr::Sin(Box::new(self))
}
fn tan(self) -> Self {
Expr::Tan(Box::new(self))
}
fn exp(self) -> Self {
Expr::Exp(Box::new(self))
}
fn ln(self) -> Self {
Expr::Ln(Box::new(self))
}
fn eq_f64(&self, rhs: f64) -> bool {
match self {
Expr::Num(n) => (n - rhs).abs() < f64::EPSILON,
_ => false,
}
}
fn diff(&self) -> Self {
match self {
Expr::Var => Expr::Num(1.),
Expr::Num(_) => Expr::Num(0.),
Expr::Add(a, b) => a.diff() + b.diff(),
Expr::Sub(a, b) => a.diff() - b.diff(),
Expr::Mul(a, b) => a.diff() * *b.clone() + *a.clone() * b.diff(),
Expr::Div(a, b) => (a.diff() * *b.clone() - *a.clone() * b.diff()) / b.clone().powf(2.),
Expr::Pow(a, b) => a.diff() * (*b.clone() * a.clone().pow(*b.clone() - Expr::Num(1.))),
Expr::Cos(a) => a.diff() * -a.clone().sin(),
Expr::Sin(a) => a.diff() * a.clone().cos(),
Expr::Tan(a) => a.diff() * (Expr::Num(1.) + a.clone().tan().powf(2.)),
Expr::Exp(a) => a.diff() * a.clone().exp(),
Expr::Ln(a) => a.diff() * (Expr::Num(1.) / *a.clone()),
}
}
}
fn diff(expr_str: &str) -> String {
Expr::from_str(expr_str).unwrap().diff().to_string()
}
| 30.924444 | 100 | 0.371371 |
bb61590dd8955e98fb053a98b20671bcb32a6dd1 | 5,268 | //! Components for SPI.
//!
//! This provides three components.
//!
//! 1. `SpiMuxComponent` provides a virtualization layer for a SPI bus.
//!
//! 2. `SpiSyscallComponent` provides a system call interface to SPI.
//!
//! 3. `SpiComponent` provides a virtualized client to the SPI bus.
//!
//! `SpiSyscallComponent` is used for processes, while `SpiComponent` is used
//! for kernel capsules that need access to the SPI bus.
//!
//! Usage
//! -----
//! ```rust
//! let mux_spi = components::spi::SpiMuxComponent::new(&sam4l::spi::SPI).finalize(
//! components::spi_mux_component_helper!(sam4l::spi::SpiHw));
//! let spi_syscalls = SpiSyscallComponent::new(mux_spi, 3).finalize(
//! components::spi_syscalls_component_helper!(sam4l::spi::SpiHw));
//! let rf233_spi = SpiComponent::new(mux_spi, 3).finalize(
//! components::spi_component_helper!(sam4l::spi::SpiHw));
//! ```
// Author: Philip Levis <[email protected]>
// Last modified: 6/20/2018
use core::mem::MaybeUninit;
use capsules::spi::Spi;
use capsules::virtual_spi::{MuxSpiMaster, VirtualSpiMasterDevice};
use kernel::component::Component;
use kernel::hil::spi;
use kernel::static_init_half;
// Setup static space for the objects.
#[macro_export]
macro_rules! spi_mux_component_helper {
($S:ty) => {{
use core::mem::MaybeUninit;
static mut BUF: MaybeUninit<MuxSpiMaster<'static, $S>> = MaybeUninit::uninit();
&mut BUF
};};
}
#[macro_export]
macro_rules! spi_syscall_component_helper {
($S:ty) => {{
use capsules::spi::Spi;
use core::mem::MaybeUninit;
static mut BUF1: MaybeUninit<VirtualSpiMasterDevice<'static, $S>> = MaybeUninit::uninit();
static mut BUF2: MaybeUninit<Spi<'static, VirtualSpiMasterDevice<'static, $S>>> =
MaybeUninit::uninit();
(&mut BUF1, &mut BUF2)
};};
}
#[macro_export]
macro_rules! spi_component_helper {
($S:ty) => {{
use core::mem::MaybeUninit;
static mut BUF: MaybeUninit<VirtualSpiMasterDevice<'static, $S>> = MaybeUninit::uninit();
&mut BUF
};};
}
pub struct SpiMuxComponent<S: 'static + spi::SpiMaster> {
spi: &'static S,
}
pub struct SpiSyscallComponent<S: 'static + spi::SpiMaster> {
spi_mux: &'static MuxSpiMaster<'static, S>,
chip_select: S::ChipSelect,
}
pub struct SpiComponent<S: 'static + spi::SpiMaster> {
spi_mux: &'static MuxSpiMaster<'static, S>,
chip_select: S::ChipSelect,
}
impl<S: 'static + spi::SpiMaster> SpiMuxComponent<S> {
pub fn new(spi: &'static S) -> Self {
SpiMuxComponent { spi: spi }
}
}
impl<S: 'static + spi::SpiMaster> Component for SpiMuxComponent<S> {
type StaticInput = &'static mut MaybeUninit<MuxSpiMaster<'static, S>>;
type Output = &'static MuxSpiMaster<'static, S>;
unsafe fn finalize(&mut self, static_buffer: Self::StaticInput) -> Self::Output {
let mux_spi = static_init_half!(
static_buffer,
MuxSpiMaster<'static, S>,
MuxSpiMaster::new(self.spi)
);
self.spi.set_client(mux_spi);
self.spi.init();
mux_spi
}
}
impl<S: 'static + spi::SpiMaster> SpiSyscallComponent<S> {
pub fn new(mux: &'static MuxSpiMaster<'static, S>, chip_select: S::ChipSelect) -> Self {
SpiSyscallComponent {
spi_mux: mux,
chip_select: chip_select,
}
}
}
impl<S: 'static + spi::SpiMaster> Component for SpiSyscallComponent<S> {
type StaticInput = (
&'static mut MaybeUninit<VirtualSpiMasterDevice<'static, S>>,
&'static mut MaybeUninit<Spi<'static, VirtualSpiMasterDevice<'static, S>>>,
);
type Output = &'static Spi<'static, VirtualSpiMasterDevice<'static, S>>;
unsafe fn finalize(&mut self, static_buffer: Self::StaticInput) -> Self::Output {
let syscall_spi_device = static_init_half!(
static_buffer.0,
VirtualSpiMasterDevice<'static, S>,
VirtualSpiMasterDevice::new(self.spi_mux, self.chip_select)
);
let spi_syscalls = static_init_half!(
static_buffer.1,
Spi<'static, VirtualSpiMasterDevice<'static, S>>,
Spi::new(syscall_spi_device)
);
static mut SPI_READ_BUF: [u8; 1024] = [0; 1024];
static mut SPI_WRITE_BUF: [u8; 1024] = [0; 1024];
spi_syscalls.config_buffers(&mut SPI_READ_BUF, &mut SPI_WRITE_BUF);
syscall_spi_device.set_client(spi_syscalls);
spi_syscalls
}
}
impl<S: 'static + spi::SpiMaster> SpiComponent<S> {
pub fn new(mux: &'static MuxSpiMaster<'static, S>, chip_select: S::ChipSelect) -> Self {
SpiComponent {
spi_mux: mux,
chip_select: chip_select,
}
}
}
impl<S: 'static + spi::SpiMaster> Component for SpiComponent<S> {
type StaticInput = &'static mut MaybeUninit<VirtualSpiMasterDevice<'static, S>>;
type Output = &'static VirtualSpiMasterDevice<'static, S>;
unsafe fn finalize(&mut self, static_buffer: Self::StaticInput) -> Self::Output {
let spi_device = static_init_half!(
static_buffer,
VirtualSpiMasterDevice<'static, S>,
VirtualSpiMasterDevice::new(self.spi_mux, self.chip_select)
);
spi_device
}
}
| 31.54491 | 98 | 0.643318 |
e4239a1a84623f750fb24fb9e6b0a56771b12ea5 | 1,984 | use ntex_mqtt::{v3, v5, MqttServer};
#[derive(Clone)]
struct Session;
#[derive(Debug)]
struct ServerError;
impl From<()> for ServerError {
fn from(_: ()) -> Self {
ServerError
}
}
impl std::convert::TryFrom<ServerError> for v5::PublishAck {
type Error = ServerError;
fn try_from(err: ServerError) -> Result<Self, Self::Error> {
Err(err)
}
}
async fn handshake_v3<Io>(
handshake: v3::Handshake<Io>,
) -> Result<v3::HandshakeAck<Io, Session>, ServerError> {
log::info!("new connection: {:?}", handshake);
Ok(handshake.ack(Session, false))
}
async fn publish_v3(publish: v3::Publish) -> Result<(), ServerError> {
log::info!("incoming publish: {:?} -> {:?}", publish.id(), publish.topic());
Ok(())
}
async fn handshake_v5<Io>(
handshake: v5::Handshake<Io>,
) -> Result<v5::HandshakeAck<Io, Session>, ServerError> {
log::info!("new connection: {:?}", handshake);
Ok(handshake.ack(Session))
}
async fn publish_v5(publish: v5::Publish) -> Result<v5::PublishAck, ServerError> {
log::info!("incoming publish: {:?} -> {:?}", publish.id(), publish.topic());
Ok(publish.ack())
}
#[ntex::main]
async fn main() -> std::io::Result<()> {
println!("{}", std::mem::size_of::<v5::codec::Publish>());
println!("{}", std::mem::size_of::<v5::codec::PublishProperties>());
println!("{}", std::mem::size_of::<v5::codec::Packet>());
println!("{}", std::mem::size_of::<v5::Handshake<ntex::rt::net::TcpStream>>());
println!("{}", std::mem::size_of::<v5::error::MqttError<()>>());
std::env::set_var("RUST_LOG", "ntex=trace,ntex_mqtt=trace,basic=trace");
env_logger::init();
ntex::server::Server::build()
.bind("mqtt", "127.0.0.1:1883", || {
MqttServer::new()
.v3(v3::MqttServer::new(handshake_v3).publish(publish_v3))
.v5(v5::MqttServer::new(handshake_v5).publish(publish_v5))
})?
.workers(1)
.run()
.await
}
| 29.61194 | 83 | 0.592238 |
29cadc52f4ef0849b95faec37caaf9ca80fb45c2 | 3,835 | // Copyright (c) 2019 Emmanuel Gil Peyrot <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
generate_element!(
/// Source element for the ssrc SDP attribute.
Source, "source", JINGLE_SSMA,
attributes: [
/// Maps to the ssrc-id parameter.
id: Required<String> = "ssrc",
],
children: [
/// List of attributes for this source.
parameters: Vec<Parameter> = ("parameter", JINGLE_SSMA) => Parameter,
/// ssrc-info for this source.
info: Option<SsrcInfo> = ("ssrc-info", JITSI_MEET) => SsrcInfo
]
);
impl Source {
/// Create a new SSMA Source element.
pub fn new(id: String) -> Source {
Source {
id,
parameters: Vec::new(),
info: None,
}
}
}
generate_element!(
/// Parameter associated with a ssrc.
Parameter, "parameter", JINGLE_SSMA,
attributes: [
/// The name of the parameter.
name: Required<String> = "name",
/// The optional value of the parameter.
value: Option<String> = "value",
]
);
generate_element!(
/// ssrc-info associated with a ssrc.
SsrcInfo, "ssrc-info", JITSI_MEET,
attributes: [
/// The owner of the ssrc.
owner: Required<String> = "owner"
]
);
generate_element!(
/// Element grouping multiple ssrc.
Group, "ssrc-group", JINGLE_SSMA,
attributes: [
/// The semantics of this group.
semantics: Required<String> = "semantics",
],
children: [
/// The various ssrc concerned by this group.
sources: Vec<Source> = ("source", JINGLE_SSMA) => Source
]
);
#[cfg(test)]
mod tests {
use super::*;
use crate::Element;
use std::convert::TryFrom;
#[cfg(target_pointer_width = "32")]
#[test]
fn test_size() {
assert_size!(Source, 24);
assert_size!(Parameter, 24);
assert_size!(Group, 24);
}
#[cfg(target_pointer_width = "64")]
#[test]
fn test_size() {
assert_size!(Source, 48);
assert_size!(Parameter, 48);
assert_size!(Group, 48);
}
#[test]
fn parse_source() {
let elem: Element = "
<source ssrc='1656081975' xmlns='urn:xmpp:jingle:apps:rtp:ssma:0'>
<parameter name='cname' value='Yv/wvbCdsDW2Prgd'/>
<parameter name='msid' value='MLTJKIHilGn71fNQoszkQ4jlPTuS5vJyKVIv MLTJKIHilGn71fNQoszkQ4jlPTuS5vJyKVIva0'/>
</source>"
.parse()
.unwrap();
let mut ssrc = Source::try_from(elem).unwrap();
assert_eq!(ssrc.id, "1656081975");
assert_eq!(ssrc.parameters.len(), 2);
let parameter = ssrc.parameters.pop().unwrap();
assert_eq!(parameter.name, "msid");
assert_eq!(
parameter.value.unwrap(),
"MLTJKIHilGn71fNQoszkQ4jlPTuS5vJyKVIv MLTJKIHilGn71fNQoszkQ4jlPTuS5vJyKVIva0"
);
let parameter = ssrc.parameters.pop().unwrap();
assert_eq!(parameter.name, "cname");
assert_eq!(parameter.value.unwrap(), "Yv/wvbCdsDW2Prgd");
}
#[test]
fn parse_source_group() {
let elem: Element = "
<ssrc-group semantics='FID' xmlns='urn:xmpp:jingle:apps:rtp:ssma:0'>
<source ssrc='2301230316'/>
<source ssrc='386328120'/>
</ssrc-group>"
.parse()
.unwrap();
let mut group = Group::try_from(elem).unwrap();
assert_eq!(group.semantics, "FID");
assert_eq!(group.sources.len(), 2);
let source = group.sources.pop().unwrap();
assert_eq!(source.id, "386328120");
let source = group.sources.pop().unwrap();
assert_eq!(source.id, "2301230316");
}
}
| 29.274809 | 112 | 0.592177 |
872b4ec1a8f450750ba2f996fdb25e8399e15a73 | 11,561 | // Copyright 2020 - Nym Technologies SA <[email protected]>
// SPDX-License-Identifier: Apache-2.0
use crate::node::{
client_handling::websocket::message_receiver::MixMessageSender,
storage::{inboxes::ClientStorage, ClientLedger},
};
use futures::{
channel::{mpsc, oneshot},
StreamExt,
};
use gateway_requests::authentication::encrypted_address::EncryptedAddressBytes;
use gateway_requests::authentication::iv::AuthenticationIV;
use gateway_requests::registration::handshake::SharedKeys;
use log::*;
use nymsphinx::DestinationAddressBytes;
use std::collections::HashMap;
use tokio::task::JoinHandle;
pub(crate) type ClientsHandlerRequestSender = mpsc::UnboundedSender<ClientsHandlerRequest>;
pub(crate) type ClientsHandlerRequestReceiver = mpsc::UnboundedReceiver<ClientsHandlerRequest>;
pub(crate) type ClientsHandlerResponseSender = oneshot::Sender<ClientsHandlerResponse>;
// #[derive(Debug)]
pub(crate) enum ClientsHandlerRequest {
// client
Register(
DestinationAddressBytes,
SharedKeys,
MixMessageSender,
ClientsHandlerResponseSender,
),
Authenticate(
DestinationAddressBytes,
EncryptedAddressBytes,
AuthenticationIV,
MixMessageSender,
ClientsHandlerResponseSender,
),
Disconnect(DestinationAddressBytes),
// mix
IsOnline(DestinationAddressBytes, ClientsHandlerResponseSender),
}
#[derive(Debug)]
pub(crate) enum ClientsHandlerResponse {
Register(bool),
Authenticate(Option<SharedKeys>),
IsOnline(Option<MixMessageSender>),
Error(Box<dyn std::error::Error + Send + Sync>),
}
pub(crate) struct ClientsHandler {
open_connections: HashMap<DestinationAddressBytes, MixMessageSender>,
clients_ledger: ClientLedger,
clients_inbox_storage: ClientStorage,
}
impl ClientsHandler {
pub(crate) fn new(clients_ledger: ClientLedger, clients_inbox_storage: ClientStorage) -> Self {
ClientsHandler {
open_connections: HashMap::new(),
clients_ledger,
clients_inbox_storage,
}
}
fn make_error_response<E>(&self, err: E) -> ClientsHandlerResponse
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
ClientsHandlerResponse::Error(err.into())
}
// best effort sending error responses
fn send_error_response<E>(&self, err: E, res_channel: ClientsHandlerResponseSender)
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
if res_channel.send(self.make_error_response(err)).is_err() {
error!("Somehow we failed to send response back to websocket handler - there seem to be a weird bug present!");
}
}
async fn push_stored_messages_to_client_and_save_channel(
&mut self,
client_address: DestinationAddressBytes,
comm_channel: MixMessageSender,
) {
// TODO: it is possible that during a small window some of client messages will be "lost",
// i.e. be stored on the disk rather than pushed to the client, reason for this is as follows:
// now we push all stored messages from client's inbox to its websocket connection
// however, say, at the same time there's new message to the client - it gets stored on the disk!
// And only after this methods exits, mix receivers will become aware of the client
// connection going online and being able to forward traffic there.
//
// possible solution: spawn a future to empty inbox in X seconds rather than immediately
// JS: I will most likely do that (with including entries to config, etc.) once the
// basic version is up and running as not to waste time on it now
// NOTE: THIS IGNORES MESSAGE RETRIEVAL LIMIT AND TAKES EVERYTHING!
let all_stored_messages = match self
.clients_inbox_storage
.retrieve_all_client_messages(client_address)
.await
{
Ok(msgs) => msgs,
Err(e) => {
error!(
"failed to retrieve client messages. {:?} inbox might be corrupted now - {:?}",
client_address.to_base58_string(),
e
);
return;
}
};
let (messages, paths): (Vec<_>, Vec<_>) = all_stored_messages
.into_iter()
.map(|c| c.into_tuple())
.unzip();
if comm_channel.unbounded_send(messages).is_err() {
error!("Somehow we failed to stored messages to a fresh client channel - there seem to be a weird bug present!");
} else {
// but if all went well, we can now delete it
if let Err(e) = self.clients_inbox_storage.delete_files(paths).await {
error!(
"Failed to remove client ({:?}) files - {:?}",
client_address.to_base58_string(),
e
);
} else {
// finally, everything was fine - we retrieved everything, we deleted everything,
// we assume we can now safely delegate client message pushing
self.open_connections.insert(client_address, comm_channel);
}
}
}
async fn handle_register_request(
&mut self,
address: DestinationAddressBytes,
derived_shared_key: SharedKeys,
comm_channel: MixMessageSender,
res_channel: ClientsHandlerResponseSender,
) {
debug!(
"Processing register new client request: {:?}",
address.to_base58_string()
);
if self.open_connections.get(&address).is_some() {
warn!(
"Tried to process register request for a client with an already opened connection!"
);
self.send_error_response("duplicate connection detected", res_channel);
return;
}
if self
.clients_ledger
.insert_shared_key(derived_shared_key, address)
.unwrap()
.is_some()
{
info!(
"Client {:?} was already registered before!",
address.to_base58_string()
)
} else if let Err(e) = self.clients_inbox_storage.create_storage_dir(address).await {
error!("We failed to create inbox directory for the client -{:?}\nReverting stored shared key...", e);
// we must revert our changes if this operation failed
self.clients_ledger.remove_shared_key(&address).unwrap();
self.send_error_response("failed to complete issuing shared key", res_channel);
return;
}
self.push_stored_messages_to_client_and_save_channel(address, comm_channel)
.await;
if res_channel
.send(ClientsHandlerResponse::Register(true))
.is_err()
{
error!("Somehow we failed to send response back to websocket handler - there seem to be a weird bug present!");
}
}
async fn handle_authenticate_request(
&mut self,
address: DestinationAddressBytes,
encrypted_address: EncryptedAddressBytes,
iv: AuthenticationIV,
comm_channel: MixMessageSender,
res_channel: ClientsHandlerResponseSender,
) {
debug!(
"Processing authenticate client request: {:?}",
address.to_base58_string()
);
if self.open_connections.get(&address).is_some() {
warn!("Tried to process authenticate request for a client with an already opened connection!");
self.send_error_response("duplicate connection detected", res_channel);
return;
}
if self
.clients_ledger
.verify_shared_key(&address, &encrypted_address, &iv)
.unwrap()
{
// The first unwrap is due to possible db read errors, but I'm not entirely sure when could
// the second one happen.
let shared_key = self
.clients_ledger
.get_shared_key(&address)
.unwrap()
.unwrap();
self.push_stored_messages_to_client_and_save_channel(address, comm_channel)
.await;
if res_channel
.send(ClientsHandlerResponse::Authenticate(Some(shared_key)))
.is_err()
{
error!("Somehow we failed to send response back to websocket handler - there seem to be a weird bug present!");
}
} else if res_channel
.send(ClientsHandlerResponse::Authenticate(None))
.is_err()
{
error!("Somehow we failed to send response back to websocket handler - there seem to be a weird bug present!");
}
}
fn handle_disconnect(&mut self, address: DestinationAddressBytes) {
debug!(
"Processing disconnect client request: {:?}",
address.to_base58_string()
);
self.open_connections.remove(&address);
}
fn handle_is_online_request(
&self,
address: DestinationAddressBytes,
res_channel: ClientsHandlerResponseSender,
) {
debug!(
"Processing is online request for: {:?}",
address.to_base58_string()
);
let response_value = self.open_connections.get(&address).cloned();
// if this fails, it's a critical failure, because mix handlers should ALWAYS be online
res_channel
.send(ClientsHandlerResponse::IsOnline(response_value))
.unwrap();
}
pub(crate) async fn run(
&mut self,
mut request_receiver_channel: ClientsHandlerRequestReceiver,
) {
while let Some(request) = request_receiver_channel.next().await {
match request {
ClientsHandlerRequest::Register(
address,
derived_shared_key,
comm_channel,
res_channel,
) => {
self.handle_register_request(
address,
derived_shared_key,
comm_channel,
res_channel,
)
.await
}
ClientsHandlerRequest::Authenticate(
address,
encrypted_address,
iv,
comm_channel,
res_channel,
) => {
self.handle_authenticate_request(
address,
encrypted_address,
iv,
comm_channel,
res_channel,
)
.await
}
ClientsHandlerRequest::Disconnect(address) => self.handle_disconnect(address),
ClientsHandlerRequest::IsOnline(address, res_channel) => {
self.handle_is_online_request(address, res_channel)
}
};
}
error!("Something bad has happened and we stopped listening for requests!");
}
pub(crate) fn start(mut self) -> (JoinHandle<()>, ClientsHandlerRequestSender) {
let (sender, receiver) = mpsc::unbounded();
(
tokio::spawn(async move { self.run(receiver).await }),
sender,
)
}
}
| 36.470032 | 127 | 0.590087 |
f773ee0d3af0fd5b096f5d89314d67e4a2386285 | 135 | pub mod attachments;
pub mod autumn;
pub mod channel;
pub mod events;
pub mod id;
pub mod instance_data;
pub mod server;
pub mod user;
| 15 | 22 | 0.762963 |
215659418c50c32746dca8aa88fb8b810faf2efe | 3,743 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use std::io::SeekFrom;
use common_base::tokio;
use common_exception::ErrorCode;
use futures::AsyncReadExt;
use futures::AsyncSeekExt;
use rusoto_core::ByteStream;
use rusoto_core::Region;
use rusoto_s3::PutObjectRequest;
use rusoto_s3::S3Client;
use rusoto_s3::S3 as RusotoS3;
use crate::DataAccessor;
use crate::S3;
struct TestFixture {
region_name: String,
endpoint_url: String,
bucket_name: String,
test_key: String,
content: Vec<u8>,
}
impl TestFixture {
fn new(size: usize, key: String) -> Self {
let random_bytes: Vec<u8> = (0..size).map(|_| rand::random::<u8>()).collect();
Self {
region_name: "us-east-1".to_string(),
endpoint_url: "http://localhost:9000".to_string(),
bucket_name: "test-bucket".to_string(),
test_key: key,
content: random_bytes,
}
}
fn region(&self) -> Region {
Region::Custom {
name: self.region_name.clone(),
endpoint: self.endpoint_url.clone(),
}
}
fn data_accessor(&self) -> common_exception::Result<S3> {
S3::try_create(
self.region_name.as_str(),
self.endpoint_url.as_str(),
self.bucket_name.as_str(),
"",
"",
)
}
}
impl TestFixture {
async fn gen_test_obj(&self) -> common_exception::Result<()> {
let rusoto_client = S3Client::new(self.region());
let put_req = PutObjectRequest {
bucket: self.bucket_name.clone(),
key: self.test_key.clone(),
body: Some(ByteStream::from(self.content.clone())),
..Default::default()
};
rusoto_client
.put_object(put_req)
.await
.map(|_| ())
.map_err(|e| ErrorCode::DALTransportError(e.to_string()))
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore]
async fn test_s3_input_stream_api() -> common_exception::Result<()> {
let test_key = "test_s3_input_stream".to_string();
let fixture = TestFixture::new(1024 * 10, test_key.clone());
fixture.gen_test_obj().await?;
let s3 = fixture.data_accessor()?;
let mut input = s3.get_input_stream(&test_key, None)?;
let mut buffer = vec![];
input.read_to_end(&mut buffer).await?;
assert_eq!(fixture.content, buffer);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore]
async fn test_s3_input_stream_seek_api() -> common_exception::Result<()> {
let test_key = "test_s3_seek_stream_seek".to_string();
let fixture = TestFixture::new(1024 * 10, test_key.clone());
fixture.gen_test_obj().await?;
let s3 = fixture.data_accessor()?;
let mut input = s3.get_input_stream(&test_key, None)?;
let mut buffer = vec![];
input.seek(SeekFrom::Current(1)).await?;
input.read_to_end(&mut buffer).await?;
assert_eq!(fixture.content.len() - 1, buffer.len());
let r = input.seek(SeekFrom::End(0)).await?;
assert_eq!(fixture.content.len() as u64, r);
let r = input.seek(SeekFrom::End(1)).await;
assert!(r.is_err());
Ok(())
}
| 31.191667 | 86 | 0.631846 |
507e9b59bb355deaa2a1ec40c54a9c052413a0c2 | 20,913 | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::fmt::{self, Debug, Display, Formatter};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use engine_rocks::raw::ColumnFamilyOptions;
use engine_rocks::raw_util::CFOptions;
use engine_rocks::{RocksEngine as BaseRocksEngine, RocksEngineIterator};
use engine_traits::{CfName, CF_DEFAULT, CF_LOCK, CF_RAFT, CF_WRITE};
use engine_traits::{
Engines, IterOptions, Iterable, Iterator, KvEngine, Mutable, Peekable, ReadOptions, SeekKey,
WriteBatch, WriteBatchExt,
};
use kvproto::kvrpcpb::Context;
use tempfile::{Builder, TempDir};
use txn_types::{Key, Value};
use crate::storage::config::BlockCacheConfig;
use tikv_util::escape;
use tikv_util::worker::{Runnable, Scheduler, Worker};
use super::{
Callback, CbContext, Cursor, Engine, Error, ErrorInner, ExtCallback,
Iterator as EngineIterator, Modify, Result, ScanMode, SnapContext, Snapshot, WriteData,
};
pub use engine_rocks::RocksSnapshot;
const TEMP_DIR: &str = "";
enum Task {
Write(Vec<Modify>, Callback<()>),
Snapshot(Callback<Arc<RocksSnapshot>>),
Pause(Duration),
}
impl Display for Task {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match *self {
Task::Write(..) => write!(f, "write task"),
Task::Snapshot(_) => write!(f, "snapshot task"),
Task::Pause(_) => write!(f, "pause"),
}
}
}
struct Runner(Engines<BaseRocksEngine, BaseRocksEngine>);
impl Runnable for Runner {
type Task = Task;
fn run(&mut self, t: Task) {
match t {
Task::Write(modifies, cb) => {
cb((CbContext::new(), write_modifies(&self.0.kv, modifies)))
}
Task::Snapshot(cb) => cb((CbContext::new(), Ok(Arc::new(self.0.kv.snapshot())))),
Task::Pause(dur) => std::thread::sleep(dur),
}
}
}
struct RocksEngineCore {
// only use for memory mode
temp_dir: Option<TempDir>,
worker: Worker,
}
impl Drop for RocksEngineCore {
fn drop(&mut self) {
self.worker.stop();
}
}
/// The RocksEngine is based on `RocksDB`.
///
/// This is intended for **testing use only**.
#[derive(Clone)]
pub struct RocksEngine {
core: Arc<Mutex<RocksEngineCore>>,
sched: Scheduler<Task>,
engines: Engines<BaseRocksEngine, BaseRocksEngine>,
not_leader: Arc<AtomicBool>,
}
impl RocksEngine {
pub fn new(
path: &str,
cfs: &[CfName],
cfs_opts: Option<Vec<CFOptions<'_>>>,
shared_block_cache: bool,
) -> Result<RocksEngine> {
info!("RocksEngine: creating for path"; "path" => path);
let (path, temp_dir) = match path {
TEMP_DIR => {
let td = Builder::new().prefix("temp-rocksdb").tempdir().unwrap();
(td.path().to_str().unwrap().to_owned(), Some(td))
}
_ => (path.to_owned(), None),
};
let worker = Worker::new("engine-rocksdb");
let db = Arc::new(engine_rocks::raw_util::new_engine(
&path, None, cfs, cfs_opts,
)?);
// It does not use the raft_engine, so it is ok to fill with the same
// rocksdb.
let mut kv_engine = BaseRocksEngine::from_db(db.clone());
let mut raft_engine = BaseRocksEngine::from_db(db);
kv_engine.set_shared_block_cache(shared_block_cache);
raft_engine.set_shared_block_cache(shared_block_cache);
let engines = Engines::new(kv_engine, raft_engine);
let sched = worker.start("engine-rocksdb", Runner(engines.clone()));
Ok(RocksEngine {
sched,
core: Arc::new(Mutex::new(RocksEngineCore { temp_dir, worker })),
not_leader: Arc::new(AtomicBool::new(false)),
engines,
})
}
pub fn trigger_not_leader(&self) {
self.not_leader.store(true, Ordering::SeqCst);
}
pub fn pause(&self, dur: Duration) {
self.sched.schedule(Task::Pause(dur)).unwrap();
}
pub fn engines(&self) -> Engines<BaseRocksEngine, BaseRocksEngine> {
self.engines.clone()
}
pub fn get_rocksdb(&self) -> BaseRocksEngine {
self.engines.kv.clone()
}
pub fn stop(&self) {
let core = self.core.lock().unwrap();
core.worker.stop();
}
}
impl Display for RocksEngine {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "RocksDB")
}
}
impl Debug for RocksEngine {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"RocksDB [is_temp: {}]",
self.core.lock().unwrap().temp_dir.is_some()
)
}
}
/// A builder to build a temporary `RocksEngine`.
///
/// Only used for test purpose.
#[must_use]
pub struct TestEngineBuilder {
path: Option<PathBuf>,
cfs: Option<Vec<CfName>>,
}
impl TestEngineBuilder {
pub fn new() -> Self {
Self {
path: None,
cfs: None,
}
}
/// Customize the data directory of the temporary engine.
///
/// By default, TEMP_DIR will be used.
pub fn path(mut self, path: impl AsRef<Path>) -> Self {
self.path = Some(path.as_ref().to_path_buf());
self
}
/// Customize the CFs that engine will have.
///
/// By default, engine will have all CFs.
pub fn cfs(mut self, cfs: impl AsRef<[CfName]>) -> Self {
self.cfs = Some(cfs.as_ref().to_vec());
self
}
/// Build a `RocksEngine`.
pub fn build(self) -> Result<RocksEngine> {
let cfg_rocksdb = crate::config::DbConfig::default();
self.build_with_cfg(&cfg_rocksdb)
}
pub fn build_with_cfg(self, cfg_rocksdb: &crate::config::DbConfig) -> Result<RocksEngine> {
let path = match self.path {
None => TEMP_DIR.to_owned(),
Some(p) => p.to_str().unwrap().to_owned(),
};
let cfs = self.cfs.unwrap_or_else(|| crate::storage::ALL_CFS.to_vec());
let cache = BlockCacheConfig::default().build_shared_cache();
let cfs_opts = cfs
.iter()
.map(|cf| match *cf {
CF_DEFAULT => {
CFOptions::new(CF_DEFAULT, cfg_rocksdb.defaultcf.build_opt(&cache, None))
}
CF_LOCK => CFOptions::new(CF_LOCK, cfg_rocksdb.lockcf.build_opt(&cache)),
CF_WRITE => CFOptions::new(CF_WRITE, cfg_rocksdb.writecf.build_opt(&cache, None)),
CF_RAFT => CFOptions::new(CF_RAFT, cfg_rocksdb.raftcf.build_opt(&cache)),
_ => CFOptions::new(*cf, ColumnFamilyOptions::new()),
})
.collect();
RocksEngine::new(&path, &cfs, Some(cfs_opts), cache.is_some())
}
}
/// Write modifications into a `BaseRocksEngine` instance.
pub fn write_modifies(kv_engine: &BaseRocksEngine, modifies: Vec<Modify>) -> Result<()> {
fail_point!("rockskv_write_modifies", |_| Err(box_err!("write failed")));
let mut wb = kv_engine.write_batch();
for rev in modifies {
let res = match rev {
Modify::Delete(cf, k) => {
if cf == CF_DEFAULT {
trace!("RocksEngine: delete"; "key" => %k);
wb.delete(k.as_encoded())
} else {
trace!("RocksEngine: delete_cf"; "cf" => cf, "key" => %k);
wb.delete_cf(cf, k.as_encoded())
}
}
Modify::Put(cf, k, v) => {
if cf == CF_DEFAULT {
trace!("RocksEngine: put"; "key" => %k, "value" => escape(&v));
wb.put(k.as_encoded(), &v)
} else {
trace!("RocksEngine: put_cf"; "cf" => cf, "key" => %k, "value" => escape(&v));
wb.put_cf(cf, k.as_encoded(), &v)
}
}
Modify::DeleteRange(cf, start_key, end_key, notify_only) => {
trace!(
"RocksEngine: delete_range_cf";
"cf" => cf,
"start_key" => %start_key,
"end_key" => %end_key,
"notify_only" => notify_only,
);
if !notify_only {
wb.delete_range_cf(cf, start_key.as_encoded(), end_key.as_encoded())
} else {
Ok(())
}
}
};
// TODO: turn the error into an engine error.
if let Err(msg) = res {
return Err(box_err!("{}", msg));
}
}
wb.write()?;
Ok(())
}
impl Engine for RocksEngine {
type Snap = Arc<RocksSnapshot>;
type Local = BaseRocksEngine;
fn kv_engine(&self) -> BaseRocksEngine {
self.engines.kv.clone()
}
fn snapshot_on_kv_engine(&self, _: &[u8], _: &[u8]) -> Result<Self::Snap> {
self.snapshot(Default::default())
}
fn modify_on_kv_engine(&self, modifies: Vec<Modify>) -> Result<()> {
write_modifies(&self.engines.kv, modifies)
}
fn async_write(&self, ctx: &Context, batch: WriteData, cb: Callback<()>) -> Result<()> {
self.async_write_ext(ctx, batch, cb, None, None)
}
fn async_write_ext(
&self,
_: &Context,
batch: WriteData,
cb: Callback<()>,
proposed_cb: Option<ExtCallback>,
committed_cb: Option<ExtCallback>,
) -> Result<()> {
fail_point!("rockskv_async_write", |_| Err(box_err!("write failed")));
if batch.modifies.is_empty() {
return Err(Error::from(ErrorInner::EmptyRequest));
}
if let Some(cb) = proposed_cb {
cb();
}
if let Some(cb) = committed_cb {
cb();
}
box_try!(self.sched.schedule(Task::Write(batch.modifies, cb)));
Ok(())
}
fn async_snapshot(&self, _: SnapContext<'_>, cb: Callback<Self::Snap>) -> Result<()> {
fail_point!("rockskv_async_snapshot", |_| Err(box_err!(
"snapshot failed"
)));
let not_leader = {
let mut header = kvproto::errorpb::Error::default();
header.mut_not_leader().set_region_id(100);
header
};
fail_point!("rockskv_async_snapshot_not_leader", |_| {
Err(Error::from(ErrorInner::Request(not_leader.clone())))
});
if self.not_leader.load(Ordering::SeqCst) {
return Err(Error::from(ErrorInner::Request(not_leader)));
}
box_try!(self.sched.schedule(Task::Snapshot(cb)));
Ok(())
}
}
impl Snapshot for Arc<RocksSnapshot> {
type Iter = RocksEngineIterator;
fn get(&self, key: &Key) -> Result<Option<Value>> {
trace!("RocksSnapshot: get"; "key" => %key);
let v = self.get_value(key.as_encoded())?;
Ok(v.map(|v| v.to_vec()))
}
fn get_cf(&self, cf: CfName, key: &Key) -> Result<Option<Value>> {
trace!("RocksSnapshot: get_cf"; "cf" => cf, "key" => %key);
let v = self.get_value_cf(cf, key.as_encoded())?;
Ok(v.map(|v| v.to_vec()))
}
fn get_cf_opt(&self, opts: ReadOptions, cf: CfName, key: &Key) -> Result<Option<Value>> {
trace!("RocksSnapshot: get_cf"; "cf" => cf, "key" => %key);
let v = self.get_value_cf_opt(&opts, cf, key.as_encoded())?;
Ok(v.map(|v| v.to_vec()))
}
fn iter(&self, iter_opt: IterOptions, mode: ScanMode) -> Result<Cursor<Self::Iter>> {
trace!("RocksSnapshot: create iterator");
let prefix_seek = iter_opt.prefix_seek_used();
let iter = self.iterator_opt(iter_opt)?;
Ok(Cursor::new(iter, mode, prefix_seek))
}
fn iter_cf(
&self,
cf: CfName,
iter_opt: IterOptions,
mode: ScanMode,
) -> Result<Cursor<Self::Iter>> {
trace!("RocksSnapshot: create cf iterator");
let prefix_seek = iter_opt.prefix_seek_used();
let iter = self.iterator_cf_opt(cf, iter_opt)?;
Ok(Cursor::new(iter, mode, prefix_seek))
}
}
impl EngineIterator for RocksEngineIterator {
fn next(&mut self) -> Result<bool> {
Iterator::next(self).map_err(Error::from)
}
fn prev(&mut self) -> Result<bool> {
Iterator::prev(self).map_err(Error::from)
}
fn seek(&mut self, key: &Key) -> Result<bool> {
Iterator::seek(self, key.as_encoded().as_slice().into()).map_err(Error::from)
}
fn seek_for_prev(&mut self, key: &Key) -> Result<bool> {
Iterator::seek_for_prev(self, key.as_encoded().as_slice().into()).map_err(Error::from)
}
fn seek_to_first(&mut self) -> Result<bool> {
Iterator::seek(self, SeekKey::Start).map_err(Error::from)
}
fn seek_to_last(&mut self) -> Result<bool> {
Iterator::seek(self, SeekKey::End).map_err(Error::from)
}
fn valid(&self) -> Result<bool> {
Iterator::valid(self).map_err(Error::from)
}
fn key(&self) -> &[u8] {
Iterator::key(self)
}
fn value(&self) -> &[u8] {
Iterator::value(self)
}
}
#[cfg(test)]
mod tests {
use super::super::perf_context::PerfStatisticsInstant;
use super::super::tests::*;
use super::super::CfStatistics;
use super::*;
use txn_types::TimeStamp;
#[test]
fn test_rocksdb() {
let engine = TestEngineBuilder::new()
.cfs(TEST_ENGINE_CFS)
.build()
.unwrap();
test_base_curd_options(&engine)
}
#[test]
fn test_rocksdb_linear() {
let engine = TestEngineBuilder::new()
.cfs(TEST_ENGINE_CFS)
.build()
.unwrap();
test_linear(&engine);
}
#[test]
fn test_rocksdb_statistic() {
let engine = TestEngineBuilder::new()
.cfs(TEST_ENGINE_CFS)
.build()
.unwrap();
test_cfs_statistics(&engine);
}
#[test]
fn rocksdb_reopen() {
let dir = tempfile::Builder::new()
.prefix("rocksdb_test")
.tempdir()
.unwrap();
{
let engine = TestEngineBuilder::new()
.path(dir.path())
.cfs(TEST_ENGINE_CFS)
.build()
.unwrap();
must_put_cf(&engine, "cf", b"k", b"v1");
}
{
let engine = TestEngineBuilder::new()
.path(dir.path())
.cfs(TEST_ENGINE_CFS)
.build()
.unwrap();
assert_has_cf(&engine, "cf", b"k", b"v1");
}
}
#[test]
fn test_rocksdb_perf_statistics() {
let engine = TestEngineBuilder::new()
.cfs(TEST_ENGINE_CFS)
.build()
.unwrap();
test_perf_statistics(&engine);
}
#[test]
fn test_max_skippable_internal_keys_error() {
let engine = TestEngineBuilder::new().build().unwrap();
must_put(&engine, b"foo", b"bar");
must_delete(&engine, b"foo");
must_put(&engine, b"foo1", b"bar1");
must_delete(&engine, b"foo1");
must_put(&engine, b"foo2", b"bar2");
let snapshot = engine.snapshot(Default::default()).unwrap();
let iter_opt = IterOptions::default().set_max_skippable_internal_keys(1);
let mut iter = snapshot.iter(iter_opt, ScanMode::Forward).unwrap();
let mut statistics = CfStatistics::default();
let res = iter.seek(&Key::from_raw(b"foo"), &mut statistics);
assert!(res.is_err());
assert!(res
.unwrap_err()
.to_string()
.contains("Result incomplete: Too many internal keys skipped"));
}
fn test_perf_statistics<E: Engine>(engine: &E) {
must_put(engine, b"foo", b"bar1");
must_put(engine, b"foo2", b"bar2");
must_put(engine, b"foo3", b"bar3"); // deleted
must_put(engine, b"foo4", b"bar4");
must_put(engine, b"foo42", b"bar42"); // deleted
must_put(engine, b"foo5", b"bar5"); // deleted
must_put(engine, b"foo6", b"bar6");
must_delete(engine, b"foo3");
must_delete(engine, b"foo42");
must_delete(engine, b"foo5");
let snapshot = engine.snapshot(Default::default()).unwrap();
let mut iter = snapshot
.iter(IterOptions::default(), ScanMode::Forward)
.unwrap();
let mut statistics = CfStatistics::default();
let perf_statistics = PerfStatisticsInstant::new();
iter.seek(&Key::from_raw(b"foo30"), &mut statistics)
.unwrap();
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 0);
let perf_statistics = PerfStatisticsInstant::new();
iter.near_seek(&Key::from_raw(b"foo55"), &mut statistics)
.unwrap();
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 2);
let perf_statistics = PerfStatisticsInstant::new();
iter.prev(&mut statistics);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 2);
iter.prev(&mut statistics);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 3);
iter.prev(&mut statistics);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 3);
}
#[test]
fn test_prefix_seek_skip_tombstone() {
let engine = TestEngineBuilder::new().build().unwrap();
engine
.put_cf(
&Context::default(),
"write",
Key::from_raw(b"aoo").append_ts(TimeStamp::zero()),
b"ba".to_vec(),
)
.unwrap();
for key in &[
b"foo".to_vec(),
b"foo1".to_vec(),
b"foo2".to_vec(),
b"foo3".to_vec(),
] {
engine
.put_cf(
&Context::default(),
"write",
Key::from_raw(key).append_ts(TimeStamp::zero()),
b"bar".to_vec(),
)
.unwrap();
engine
.delete_cf(
&Context::default(),
"write",
Key::from_raw(key).append_ts(TimeStamp::zero()),
)
.unwrap();
}
engine
.put_cf(
&Context::default(),
"write",
Key::from_raw(b"foo4").append_ts(TimeStamp::zero()),
b"bar4".to_vec(),
)
.unwrap();
let snapshot = engine.snapshot(Default::default()).unwrap();
let iter_opt = IterOptions::default()
.use_prefix_seek()
.set_prefix_same_as_start(true);
let mut iter = snapshot
.iter_cf("write", iter_opt, ScanMode::Forward)
.unwrap();
let mut statistics = CfStatistics::default();
let perf_statistics = PerfStatisticsInstant::new();
iter.seek(
&Key::from_raw(b"aoo").append_ts(TimeStamp::zero()),
&mut statistics,
)
.unwrap();
assert_eq!(iter.valid().unwrap(), true);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 0);
let perf_statistics = PerfStatisticsInstant::new();
iter.seek(
&Key::from_raw(b"foo").append_ts(TimeStamp::zero()),
&mut statistics,
)
.unwrap();
assert_eq!(iter.valid().unwrap(), false);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 1);
let perf_statistics = PerfStatisticsInstant::new();
iter.seek(
&Key::from_raw(b"foo1").append_ts(TimeStamp::zero()),
&mut statistics,
)
.unwrap();
assert_eq!(iter.valid().unwrap(), false);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 1);
let perf_statistics = PerfStatisticsInstant::new();
iter.seek(
&Key::from_raw(b"foo2").append_ts(TimeStamp::zero()),
&mut statistics,
)
.unwrap();
assert_eq!(iter.valid().unwrap(), false);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 1);
let perf_statistics = PerfStatisticsInstant::new();
assert_eq!(
iter.seek(
&Key::from_raw(b"foo4").append_ts(TimeStamp::zero()),
&mut statistics
)
.unwrap(),
true
);
assert_eq!(iter.valid().unwrap(), true);
assert_eq!(
iter.key(&mut statistics),
Key::from_raw(b"foo4")
.append_ts(TimeStamp::zero())
.as_encoded()
.as_slice()
);
assert_eq!(perf_statistics.delta().0.internal_delete_skipped_count, 0);
}
}
| 32.173846 | 98 | 0.550614 |
e882b6e6ef76cd6529c919ae4d377ca85c6ce2d9 | 2,174 | //! Types for the *m.dummy* event.
use std::ops::{Deref, DerefMut};
use ruma_events_macros::BasicEventContent;
use ruma_serde::empty::Empty;
use serde::{Deserialize, Serialize};
use crate::BasicEvent;
/// This event type is used to indicate new Olm sessions for end-to-end encryption.
///
/// Typically it is encrypted as an *m.room.encrypted* event, then sent as a to-device event.
///
/// The event does not have any content associated with it. The sending client is expected to
/// send a key share request shortly after this message, causing the receiving client to process
/// this *m.dummy* event as the most recent event and using the keyshare request to set up the
/// session. The keyshare request and *m.dummy* combination should result in the original
/// sending client receiving keys over the newly established session.
pub type DummyEvent = BasicEvent<DummyEventContent>;
/// The payload for `DummyEvent`.
#[derive(Clone, Debug, Deserialize, Serialize, BasicEventContent)]
#[ruma_event(type = "m.dummy")]
pub struct DummyEventContent(pub Empty);
/// The to-device version of the payload for the `DummyEvent`.
pub type DummyToDeviceEventContent = DummyEventContent;
impl Deref for DummyEventContent {
type Target = Empty;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for DummyEventContent {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[cfg(test)]
mod tests {
use ruma_serde::Raw;
use serde_json::{from_value as from_json_value, json, to_value as to_json_value};
use super::{DummyEvent, DummyEventContent, Empty};
#[test]
fn serialization() {
let dummy_event = DummyEvent { content: DummyEventContent(Empty) };
let actual = to_json_value(dummy_event).unwrap();
let expected = json!({
"content": {},
"type": "m.dummy"
});
assert_eq!(actual, expected);
}
#[test]
fn deserialization() {
let json = json!({
"content": {},
"type": "m.dummy"
});
assert!(from_json_value::<Raw<DummyEvent>>(json).unwrap().deserialize().is_ok());
}
}
| 29.378378 | 96 | 0.668813 |
876e51d939adc8eb2cc0b5bffa9720b3ab50e626 | 4,258 | mod networker;
mod pipe_builder;
use glib::MainLoop;
use gstreamer_rtsp_server::{
RTSPMediaFactory, RTSPMediaFactoryExt, RTSPMountPointsExt, RTSPServer, RTSPServerExt,
RTSPServerExtManual,
};
use structopt::StructOpt;
use pipe_builder::{Encoder, Input, Pipe, VideoSize};
#[derive(Debug, StructOpt)]
#[structopt(
name = "rusty-engine",
about = "rusty-engine RTSP server",
rename_all = "kebab"
)]
struct Opt {
#[structopt(
short,
long = "fps",
help = "Video framerate (FPS)",
default_value = "30"
)]
framerate: u32,
#[structopt(short, long, help = "Video width", default_value = "320")]
width: u32,
#[structopt(short, long, help = "Video height", default_value = "240")]
height: u32,
#[structopt(
short,
long,
help = "Input mode to use",
required_unless = "list",
required_unless = "pipes-as-json",
possible_values(&["v4l2", "shmem", "rpi"])
)]
input: Option<Input>, // this looks stupid, but some library freaks out without it. we get the desired effect at run anyway
#[structopt(
short,
long,
help = "Input device to use",
default_value_if("input", Some("v4l2"), "/dev/video0"),
default_value_if("input", Some("shmem"), "/tmp/engineering")
)]
device: Option<String>,
#[structopt(
short,
long,
help = "Encoder to use. omx for Pi + USB camera, camera if you know it's available/Pi Camera Module, software any other time.",
possible_values(&["x264enc", "omx", "camera"]),
default_value = "x264enc",
default_value_if("input", Some("rpi"), "camera")
)]
encoder: Encoder,
#[structopt(long, help = "Pipelines to run as JSON.")]
pipes_as_json: Option<String>,
#[structopt(long, help = "List all input modes and exit.", group = "list")]
list_in: bool,
#[structopt(long, help = "List all encoders and exit.", group = "list")]
list_enc: bool,
#[structopt(flatten)]
net_opt: networker::NetOpt,
}
fn main() {
let opt = Opt::from_args();
if opt.list_in {
println!("Available input modes:");
for inp in Input::all() {
println!("{}", inp);
}
return;
}
if opt.list_enc {
println!("Available encoder modes:");
for enc in Encoder::all() {
println!("{}", enc);
}
return;
}
// set up basic Gst stuff
gstreamer::init().expect("GStreamer could not init!");
let loop_ = MainLoop::new(Option::None, false);
let server = RTSPServer::new();
server.set_service(&opt.net_opt.get_port().to_string());
let mounts = server
.get_mount_points()
.expect("Failed to get mount points");
if opt.pipes_as_json.is_some() {
let config = opt.pipes_as_json.unwrap();
let pipes: Vec<Pipe> = serde_json::from_str(&config).expect("JSON could not parse!");
for pipe in pipes.iter() {
let factory = RTSPMediaFactory::new();
let pipe_str = pipe_builder::create_pipe(pipe);
println!("Pipeline constructed: {}", pipe_str);
factory.set_launch(&pipe_str);
factory.set_shared(true);
mounts.add_factory(pipe.url(), &factory);
}
} else {
// try to set up video size
let size = VideoSize::new(opt.width, opt.height, opt.framerate);
let device = opt.device.unwrap_or_default();
let mut input = opt.input.unwrap();
input = match input {
Input::Video4Linux(_) => Input::Video4Linux(device),
Input::SharedMemory(_) => Input::SharedMemory(device),
_ => input,
};
let encoder = opt.encoder;
let pipe = Pipe::new(input, encoder, size, String::from(opt.net_opt.get_url()));
let pipe_str = pipe_builder::create_pipe(&pipe);
println!("Pipeline constructed: {}", pipe_str);
let factory = RTSPMediaFactory::new();
factory.set_launch(&pipe_str);
factory.set_shared(true);
// set up mounts
mounts.add_factory(pipe.url(), &factory);
}
server.attach(Option::None);
println!("Starting loop...");
loop_.run();
}
| 33.527559 | 135 | 0.591592 |
79b6d38db1a23261ba7324e32a3cc9d679da8e7d | 584 | #![deny(unsafe_code)]
#![deny(warnings)]
#![no_main]
#![no_std]
extern crate cortex_m_rt as rt;
#[macro_use]
extern crate cortex_m;
extern crate panic_itm;
extern crate stm32f103xx_hal;
use rt::{entry, exception, ExceptionFrame};
#[entry]
fn main() -> ! {
let p = cortex_m::Peripherals::take().unwrap();
let mut itm = p.ITM;
iprintln!(&mut itm.stim[0], "Hello, world!");
loop {}
}
#[exception]
fn HardFault(ef: &ExceptionFrame) -> ! {
panic!("{:#?}", ef);
}
#[exception]
fn DefaultHandler(irqn: i16) {
panic!("Unhandled exception (IRQn = {})", irqn);
}
| 17.69697 | 52 | 0.631849 |
e27bed9589322fb31428ec0cf81267cd98332e80 | 386 | extern crate hangeul;
fn main() {
let cho = 'ㄱ';
let jung = 'ㅏ';
let jong = Some(&'ㅄ');
let composed = hangeul::compose_char(&cho, &jung, jong).unwrap();
assert_eq!(composed, '값');
let (cho2, jung2, jong2) = hangeul::decompose_char(&composed).unwrap();
assert_eq!(cho, cho2);
assert_eq!(jung, jung2);
assert_eq!(jong.unwrap(), &jong2.unwrap());
}
| 24.125 | 75 | 0.601036 |
6716e5c5fcdac2dc12dac8f75193395f29f05a26 | 5,142 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![crate_name = "enclave1"]
#![crate_type = "staticlib"]
#![cfg_attr(not(target_env = "sgx"), no_std)]
#![cfg_attr(target_env = "sgx", feature(rustc_private))]
#![feature(llvm_asm)]
#[cfg(not(target_env = "sgx"))]
extern crate sgx_tstd as std;
#[cfg(not(target_env = "sgx"))]
extern crate minidow;
#[cfg(not(target_env = "sgx"))]
pub use minidow::MINIDOW_SECRET;
extern crate sgx_types;
use sgx_types::{
sgx_aes_gcm_128bit_tag_t, sgx_get_key, sgx_key_128bit_t, sgx_key_id_t, sgx_key_request_t,
sgx_rijndael128GCM_decrypt, sgx_rijndael128GCM_encrypt, sgx_self_report, sgx_status_t,
SGX_KEYPOLICY_MRENCLAVE, SGX_KEYSELECT_SEAL, SGX_SEAL_IV_SIZE, TSEAL_DEFAULT_FLAGSMASK,
TSEAL_DEFAULT_MISCMASK,
};
extern crate sgx_rand;
static mut PRIVATE_KEY: sgx_key_128bit_t = [0; 16];
#[no_mangle]
static SECRET: u64 = 0x1122334455667788;
#[no_mangle]
pub unsafe extern "C" fn spectre_test(measurement_array_addr: u64, off: u64) {
minidow::access_memory_spectre(measurement_array_addr as usize, off as usize);
}
#[no_mangle]
pub unsafe extern "C" fn get_key_addr() -> *const sgx_key_128bit_t {
&PRIVATE_KEY as *const _
}
pub unsafe fn get_seal_key() -> (sgx_key_id_t, *const sgx_key_128bit_t) {
let rand = sgx_rand::random::<[u8; core::mem::size_of::<sgx_key_id_t>()]>();
let key_id = sgx_key_id_t { id: rand };
return (key_id, get_seal_key_from_key_id(key_id.clone()));
}
pub unsafe fn get_seal_key_from_key_id(key_id: sgx_key_id_t) -> *const sgx_key_128bit_t {
let report = sgx_self_report();
let mut key_req = sgx_key_request_t::default();
key_req.key_id = key_id;
key_req.cpu_svn = (*report).body.cpu_svn;
key_req.cpu_svn = (*report).body.cpu_svn;
key_req.config_svn = (*report).body.config_svn;
key_req.key_name = SGX_KEYSELECT_SEAL;
key_req.key_policy = SGX_KEYPOLICY_MRENCLAVE;
key_req.attribute_mask.flags = TSEAL_DEFAULT_FLAGSMASK;
key_req.attribute_mask.xfrm = 0;
key_req.misc_mask = TSEAL_DEFAULT_MISCMASK;
let success = sgx_get_key(&key_req as *const _, &mut PRIVATE_KEY as *mut _);
if success != sgx_status_t::SGX_SUCCESS {
#[cfg(not(target_env = "sgx"))]
std::println!("[enclave] couldn't get a key!");
}
#[cfg(not(target_env = "sgx"))]
std::println!(
"[enclave] Private key value: 0x{:x}",
core::mem::transmute::<sgx_key_128bit_t, u128>(PRIVATE_KEY)
);
return &PRIVATE_KEY as *const _;
}
#[no_mangle]
pub unsafe extern "C" fn custom_seal_data(
buf: *const u8,
buf_size: u32,
out_buf: *mut u8,
out_size: *mut u32,
out_tag: *mut sgx_aes_gcm_128bit_tag_t,
) -> sgx_status_t {
let (key_id, key) = get_seal_key();
#[cfg(not(target_env = "sgx"))]
std::println!("[enclave] key_id in seal: {:?}", key_id.id);
let additional_data = &key_id.id[0] as *const u8;
let additional_data_len = core::mem::size_of::<sgx_key_id_t>() as u32;
*out_size = (buf_size as usize + additional_data_len as usize) as u32;
let status = sgx_rijndael128GCM_encrypt(
key,
buf,
buf_size,
out_buf,
&[0u8; SGX_SEAL_IV_SIZE] as *const u8,
SGX_SEAL_IV_SIZE as u32,
additional_data,
additional_data_len,
out_tag,
);
core::ptr::copy(
additional_data,
(out_buf as usize + buf_size as usize) as *mut u8,
additional_data_len as usize,
);
status
}
#[no_mangle]
pub unsafe extern "C" fn custom_unseal_data(
buf: *const u8,
buf_size: u32,
out_buf: *mut u8,
tag: *const sgx_aes_gcm_128bit_tag_t,
) -> sgx_status_t {
let key_id = (buf as usize + buf_size as usize) as *const sgx_key_id_t;
#[cfg(not(target_env = "sgx"))]
std::println!("[enclave] tag: {:?}", *tag);
#[cfg(not(target_env = "sgx"))]
std::println!("[enclave] key_id in unseal: {:?}", (*key_id).id);
let key = get_seal_key_from_key_id(*key_id);
let status = sgx_rijndael128GCM_decrypt(
key,
buf,
buf_size,
out_buf,
&[0_u8; SGX_SEAL_IV_SIZE] as *const u8,
SGX_SEAL_IV_SIZE as u32,
&(*key_id).id[0] as *const u8,
core::mem::size_of::<sgx_key_id_t>() as u32,
tag,
);
status
}
#[no_mangle]
pub extern "C" fn get_secret_addr() -> u64 {
return &SECRET as *const _ as u64;
}
| 30.790419 | 93 | 0.677752 |
c16bf229cbb5b88db575668fe6b2018cbd67a8ce | 6,107 | use super::traits::*;
use super::notify_fn::*;
use std::rc::*;
use std::sync::*;
use std::cell::*;
thread_local! {
static CURRENT_CONTEXT: RefCell<Option<BindingContext>> = RefCell::new(None);
}
///
/// Represents the dependencies of a binding context
///
#[derive(Clone)]
pub struct BindingDependencies {
/// Set to true if the binding dependencies have been changed since they were registered in the dependencies
recently_changed: Arc<Mutex<bool>>,
/// The when_changed monitors for the recently_changed flag
recent_change_monitors: Rc<RefCell<Vec<Box<dyn Releasable>>>>,
/// The list of changables that are dependent on this context
dependencies: Rc<RefCell<Vec<Box<dyn Changeable>>>>
}
impl BindingDependencies {
///
/// Creates a new binding dependencies object
///
pub fn new() -> BindingDependencies {
BindingDependencies {
recently_changed: Arc::new(Mutex::new(false)),
recent_change_monitors: Rc::new(RefCell::new(vec![])),
dependencies: Rc::new(RefCell::new(vec![]))
}
}
///
/// Adds a new dependency to this object
///
pub fn add_dependency<TChangeable: Changeable+'static>(&mut self, dependency: TChangeable) {
// Set the recently changed flag so that we can tell if the dependencies are already out of date before when_changed is called
let recently_changed = Arc::clone(&self.recently_changed);
let mut recent_change_monitors = self.recent_change_monitors.borrow_mut();
recent_change_monitors.push(dependency.when_changed(notify(move || { *recently_changed.lock().unwrap() = true; })));
// Add this dependency to the list
self.dependencies.borrow_mut().push(Box::new(dependency))
}
///
/// If the dependencies have not changed since they were registered, registers for changes
/// and returns a `Releasable`. If the dependencies are already different, returns `None`.
///
pub fn when_changed_if_unchanged(&self, what: Arc<dyn Notifiable>) -> Option<Box<dyn Releasable>> {
let mut to_release = vec![];
// Register with all of the dependencies
for dep in self.dependencies.borrow_mut().iter_mut() {
to_release.push(dep.when_changed(Arc::clone(&what)));
}
if *self.recently_changed.lock().unwrap() {
// If a value changed while we were building these dependencies, then immediately generate the notification
to_release.into_iter().for_each(|mut releasable| releasable.done());
// Nothing to release
None
} else {
// Otherwise, return the set of releasable values
Some(Box::new(to_release))
}
}
}
impl Changeable for BindingDependencies {
fn when_changed(&self, what: Arc<dyn Notifiable>) -> Box<dyn Releasable> {
let when_changed_or_not = self.when_changed_if_unchanged(Arc::clone(&what));
match when_changed_or_not {
Some(releasable) => releasable,
None => {
what.mark_as_changed();
Box::new(vec![])
}
}
}
}
///
/// Represents a binding context. Binding contexts are
/// per-thread structures, used to track
///
#[derive(Clone)]
pub struct BindingContext {
/// The dependencies for this context
dependencies: BindingDependencies,
/// None, or the binding context that this context was created within
nested: Option<Box<BindingContext>>
}
impl BindingContext {
///
/// Gets the active binding context
///
pub fn current() -> Option<BindingContext> {
CURRENT_CONTEXT.with(|current_context| {
current_context
.borrow()
.as_ref()
.cloned()
})
}
///
/// Panics if we're trying to create a binding, with a particular message
///
pub fn panic_if_in_binding_context(msg: &str) {
if CURRENT_CONTEXT.with(|context| context.borrow().is_some()) {
panic!("Not possible when binding: {}", msg);
}
}
///
/// Executes a function in a new binding context
///
pub fn bind<TResult, TFn>(to_do: TFn) -> (TResult, BindingDependencies)
where TFn: FnOnce() -> TResult {
// Remember the previous context
let previous_context = Self::current();
// Create a new context
let dependencies = BindingDependencies::new();
let new_context = BindingContext {
dependencies: dependencies.clone(),
nested: previous_context.clone().map(Box::new)
};
// Make the current context the same as the new context
CURRENT_CONTEXT.with(|current_context| *current_context.borrow_mut() = Some(new_context));
// Perform the requested action with this context
let result = to_do();
// Reset to the previous context
CURRENT_CONTEXT.with(|current_context| *current_context.borrow_mut() = previous_context);
(result, dependencies)
}
///
/// Performs an action outside of the binding context (dependencies
/// will not be tracked for anything the supplied function does)
///
pub fn out_of_context<TResult, TFn>(to_do: TFn) -> TResult
where TFn: FnOnce() -> TResult {
// Remember the previous context
let previous_context = Self::current();
// Unset the context
CURRENT_CONTEXT.with(|current_context| *current_context.borrow_mut() = None);
// Perform the operations without a binding context
let result = to_do();
// Reset to the previous context
CURRENT_CONTEXT.with(|current_context| *current_context.borrow_mut() = previous_context);
result
}
///
/// Adds a dependency to the current context (if one is found)
///
pub fn add_dependency<TChangeable: Changeable+'static>(dependency: TChangeable) {
Self::current().map(|mut ctx| ctx.dependencies.add_dependency(dependency));
}
}
| 33.740331 | 134 | 0.630752 |
76fa54317fc1123cd59df792356aac98476b7f57 | 38,971 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Verifies that the types and values of const and static items
// are safe. The rules enforced by this module are:
//
// - For each *mutable* static item, it checks that its **type**:
// - doesn't have a destructor
// - doesn't own a box
//
// - For each *immutable* static item, it checks that its **value**:
// - doesn't own a box
// - doesn't contain a struct literal or a call to an enum variant / struct constructor where
// - the type of the struct/enum has a dtor
//
// Rules Enforced Elsewhere:
// - It's not possible to take the address of a static item with unsafe interior. This is enforced
// by borrowck::gather_loans
use middle::ty::cast::{CastKind};
use middle::const_eval;
use middle::const_eval::EvalHint::ExprTypeChecked;
use middle::def;
use middle::def_id::DefId;
use middle::expr_use_visitor as euv;
use middle::infer;
use middle::mem_categorization as mc;
use middle::mem_categorization::Categorization;
use middle::traits;
use middle::ty::{self, Ty};
use util::nodemap::NodeMap;
use rustc_front::hir;
use syntax::ast;
use syntax::codemap::Span;
use syntax::feature_gate::UnstableFeatures;
use rustc_front::visit::{self, FnKind, Visitor};
use std::collections::hash_map::Entry;
use std::cmp::Ordering;
// Const qualification, from partial to completely promotable.
bitflags! {
#[derive(RustcEncodable, RustcDecodable)]
flags ConstQualif: u8 {
// Inner mutability (can not be placed behind a reference) or behind
// &mut in a non-global expression. Can be copied from static memory.
const MUTABLE_MEM = 1 << 0,
// Constant value with a type that implements Drop. Can be copied
// from static memory, similar to MUTABLE_MEM.
const NEEDS_DROP = 1 << 1,
// Even if the value can be placed in static memory, copying it from
// there is more expensive than in-place instantiation, and/or it may
// be too large. This applies to [T; N] and everything containing it.
// N.B.: references need to clear this flag to not end up on the stack.
const PREFER_IN_PLACE = 1 << 2,
// May use more than 0 bytes of memory, doesn't impact the constness
// directly, but is not allowed to be borrowed mutably in a constant.
const NON_ZERO_SIZED = 1 << 3,
// Actually borrowed, has to always be in static memory. Does not
// propagate, and requires the expression to behave like a 'static
// lvalue. The set of expressions with this flag is the minimum
// that have to be promoted.
const HAS_STATIC_BORROWS = 1 << 4,
// Invalid const for miscellaneous reasons (e.g. not implemented).
const NOT_CONST = 1 << 5,
// Borrowing the expression won't produce &'static T if any of these
// bits are set, though the value could be copied from static memory
// if `NOT_CONST` isn't set.
const NON_STATIC_BORROWS = ConstQualif::MUTABLE_MEM.bits |
ConstQualif::NEEDS_DROP.bits |
ConstQualif::NOT_CONST.bits
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Mode {
Const,
ConstFn,
Static,
StaticMut,
// An expression that occurs outside of any constant context
// (i.e. `const`, `static`, array lengths, etc.). The value
// can be variable at runtime, but will be promotable to
// static memory if we can prove it is actually constant.
Var,
}
struct CheckCrateVisitor<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
mode: Mode,
qualif: ConstQualif,
rvalue_borrows: NodeMap<hir::Mutability>
}
impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
fn with_mode<F, R>(&mut self, mode: Mode, f: F) -> R where
F: FnOnce(&mut CheckCrateVisitor<'a, 'tcx>) -> R,
{
let (old_mode, old_qualif) = (self.mode, self.qualif);
self.mode = mode;
self.qualif = ConstQualif::empty();
let r = f(self);
self.mode = old_mode;
self.qualif = old_qualif;
r
}
fn with_euv<'b, F, R>(&'b mut self, item_id: Option<ast::NodeId>, f: F) -> R where
F: for<'t> FnOnce(&mut euv::ExprUseVisitor<'b, 't, 'b, 'tcx>) -> R,
{
let param_env = match item_id {
Some(item_id) => ty::ParameterEnvironment::for_item(self.tcx, item_id),
None => self.tcx.empty_parameter_environment()
};
let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, Some(param_env), false);
f(&mut euv::ExprUseVisitor::new(self, &infcx))
}
fn global_expr(&mut self, mode: Mode, expr: &hir::Expr) -> ConstQualif {
assert!(mode != Mode::Var);
match self.tcx.const_qualif_map.borrow_mut().entry(expr.id) {
Entry::Occupied(entry) => return *entry.get(),
Entry::Vacant(entry) => {
// Prevent infinite recursion on re-entry.
entry.insert(ConstQualif::empty());
}
}
self.with_mode(mode, |this| {
this.with_euv(None, |euv| euv.consume_expr(expr));
this.visit_expr(expr);
this.qualif
})
}
fn fn_like(&mut self,
fk: FnKind,
fd: &hir::FnDecl,
b: &hir::Block,
s: Span,
fn_id: ast::NodeId)
-> ConstQualif {
match self.tcx.const_qualif_map.borrow_mut().entry(fn_id) {
Entry::Occupied(entry) => return *entry.get(),
Entry::Vacant(entry) => {
// Prevent infinite recursion on re-entry.
entry.insert(ConstQualif::empty());
}
}
let mode = match fk {
FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _) => {
Mode::ConstFn
}
FnKind::Method(_, m, _) => {
if m.constness == hir::Constness::Const {
Mode::ConstFn
} else {
Mode::Var
}
}
_ => Mode::Var
};
// Ensure the arguments are simple, not mutable/by-ref or patterns.
if mode == Mode::ConstFn {
for arg in &fd.inputs {
match arg.pat.node {
hir::PatWild => {}
hir::PatIdent(hir::BindByValue(hir::MutImmutable), _, None) => {}
_ => {
span_err!(self.tcx.sess, arg.pat.span, E0022,
"arguments of constant functions can only \
be immutable by-value bindings");
}
}
}
}
let qualif = self.with_mode(mode, |this| {
this.with_euv(Some(fn_id), |euv| euv.walk_fn(fd, b));
visit::walk_fn(this, fk, fd, b, s);
this.qualif
});
// Keep only bits that aren't affected by function body (NON_ZERO_SIZED),
// and bits that don't change semantics, just optimizations (PREFER_IN_PLACE).
let qualif = qualif & (ConstQualif::NON_ZERO_SIZED | ConstQualif::PREFER_IN_PLACE);
self.tcx.const_qualif_map.borrow_mut().insert(fn_id, qualif);
qualif
}
fn add_qualif(&mut self, qualif: ConstQualif) {
self.qualif = self.qualif | qualif;
}
/// Returns true if the call is to a const fn or method.
fn handle_const_fn_call(&mut self,
expr: &hir::Expr,
def_id: DefId,
ret_ty: Ty<'tcx>)
-> bool {
if let Some(fn_like) = const_eval::lookup_const_fn_by_id(self.tcx, def_id) {
if
// we are in a static/const initializer
self.mode != Mode::Var &&
// feature-gate is not enabled
!self.tcx.sess.features.borrow().const_fn &&
// this doesn't come from a macro that has #[allow_internal_unstable]
!self.tcx.sess.codemap().span_allows_unstable(expr.span)
{
self.tcx.sess.span_err(
expr.span,
&format!("const fns are an unstable feature"));
fileline_help!(
self.tcx.sess,
expr.span,
"in Nightly builds, add `#![feature(const_fn)]` to the crate \
attributes to enable");
}
let qualif = self.fn_like(fn_like.kind(),
fn_like.decl(),
fn_like.body(),
fn_like.span(),
fn_like.id());
self.add_qualif(qualif);
if ret_ty.type_contents(self.tcx).interior_unsafe() {
self.add_qualif(ConstQualif::MUTABLE_MEM);
}
true
} else {
false
}
}
fn record_borrow(&mut self, id: ast::NodeId, mutbl: hir::Mutability) {
match self.rvalue_borrows.entry(id) {
Entry::Occupied(mut entry) => {
// Merge the two borrows, taking the most demanding
// one, mutability-wise.
if mutbl == hir::MutMutable {
entry.insert(mutbl);
}
}
Entry::Vacant(entry) => {
entry.insert(mutbl);
}
}
}
fn msg(&self) -> &'static str {
match self.mode {
Mode::Const => "constant",
Mode::ConstFn => "constant function",
Mode::StaticMut | Mode::Static => "static",
Mode::Var => unreachable!(),
}
}
fn check_static_mut_type(&self, e: &hir::Expr) {
let node_ty = self.tcx.node_id_to_type(e.id);
let tcontents = node_ty.type_contents(self.tcx);
let suffix = if tcontents.has_dtor() {
"destructors"
} else if tcontents.owns_owned() {
"boxes"
} else {
return
};
span_err!(self.tcx.sess, e.span, E0397,
"mutable statics are not allowed to have {}", suffix);
}
fn check_static_type(&self, e: &hir::Expr) {
let ty = self.tcx.node_id_to_type(e.id);
let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None, false);
let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic);
let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
fulfill_cx.register_builtin_bound(&infcx, ty, ty::BoundSync, cause);
match fulfill_cx.select_all_or_error(&infcx) {
Ok(()) => { },
Err(ref errors) => {
traits::report_fulfillment_errors(&infcx, errors);
}
}
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &hir::Item) {
debug!("visit_item(item={})", self.tcx.map.node_to_string(i.id));
match i.node {
hir::ItemStatic(_, hir::MutImmutable, ref expr) => {
self.check_static_type(&**expr);
self.global_expr(Mode::Static, &**expr);
}
hir::ItemStatic(_, hir::MutMutable, ref expr) => {
self.check_static_mut_type(&**expr);
self.global_expr(Mode::StaticMut, &**expr);
}
hir::ItemConst(_, ref expr) => {
self.global_expr(Mode::Const, &**expr);
}
hir::ItemEnum(ref enum_definition, _) => {
for var in &enum_definition.variants {
if let Some(ref ex) = var.node.disr_expr {
self.global_expr(Mode::Const, &**ex);
}
}
}
_ => {
self.with_mode(Mode::Var, |v| visit::walk_item(v, i));
}
}
}
fn visit_trait_item(&mut self, t: &'v hir::TraitItem) {
match t.node {
hir::ConstTraitItem(_, ref default) => {
if let Some(ref expr) = *default {
self.global_expr(Mode::Const, &*expr);
} else {
visit::walk_trait_item(self, t);
}
}
_ => self.with_mode(Mode::Var, |v| visit::walk_trait_item(v, t)),
}
}
fn visit_impl_item(&mut self, i: &'v hir::ImplItem) {
match i.node {
hir::ConstImplItem(_, ref expr) => {
self.global_expr(Mode::Const, &*expr);
}
_ => self.with_mode(Mode::Var, |v| visit::walk_impl_item(v, i)),
}
}
fn visit_fn(&mut self,
fk: FnKind<'v>,
fd: &'v hir::FnDecl,
b: &'v hir::Block,
s: Span,
fn_id: ast::NodeId) {
self.fn_like(fk, fd, b, s, fn_id);
}
fn visit_pat(&mut self, p: &hir::Pat) {
match p.node {
hir::PatLit(ref lit) => {
self.global_expr(Mode::Const, &**lit);
}
hir::PatRange(ref start, ref end) => {
self.global_expr(Mode::Const, &**start);
self.global_expr(Mode::Const, &**end);
match const_eval::compare_lit_exprs(self.tcx, start, end) {
Some(Ordering::Less) |
Some(Ordering::Equal) => {}
Some(Ordering::Greater) => {
span_err!(self.tcx.sess, start.span, E0030,
"lower range bound must be less than or equal to upper");
}
None => {
self.tcx.sess.delay_span_bug(start.span,
"non-constant path in constant expr");
}
}
}
_ => visit::walk_pat(self, p)
}
}
fn visit_block(&mut self, block: &hir::Block) {
// Check all statements in the block
for stmt in &block.stmts {
let span = match stmt.node {
hir::StmtDecl(ref decl, _) => {
match decl.node {
hir::DeclLocal(_) => decl.span,
// Item statements are allowed
hir::DeclItem(_) => continue
}
}
hir::StmtExpr(ref expr, _) => expr.span,
hir::StmtSemi(ref semi, _) => semi.span,
};
self.add_qualif(ConstQualif::NOT_CONST);
if self.mode != Mode::Var {
span_err!(self.tcx.sess, span, E0016,
"blocks in {}s are limited to items and \
tail expressions", self.msg());
}
}
visit::walk_block(self, block);
}
fn visit_expr(&mut self, ex: &hir::Expr) {
let mut outer = self.qualif;
self.qualif = ConstQualif::empty();
let node_ty = self.tcx.node_id_to_type(ex.id);
check_expr(self, ex, node_ty);
check_adjustments(self, ex);
// Special-case some expressions to avoid certain flags bubbling up.
match ex.node {
hir::ExprCall(ref callee, ref args) => {
for arg in args {
self.visit_expr(&**arg)
}
let inner = self.qualif;
self.visit_expr(&**callee);
// The callee's size doesn't count in the call.
let added = self.qualif - inner;
self.qualif = inner | (added - ConstQualif::NON_ZERO_SIZED);
}
hir::ExprRepeat(ref element, _) => {
self.visit_expr(&**element);
// The count is checked elsewhere (typeck).
let count = match node_ty.sty {
ty::TyArray(_, n) => n,
_ => unreachable!()
};
// [element; 0] is always zero-sized.
if count == 0 {
self.qualif.remove(ConstQualif::NON_ZERO_SIZED | ConstQualif::PREFER_IN_PLACE);
}
}
hir::ExprMatch(ref discr, ref arms, _) => {
// Compute the most demanding borrow from all the arms'
// patterns and set that on the discriminator.
let mut borrow = None;
for pat in arms.iter().flat_map(|arm| &arm.pats) {
let pat_borrow = self.rvalue_borrows.remove(&pat.id);
match (borrow, pat_borrow) {
(None, _) | (_, Some(hir::MutMutable)) => {
borrow = pat_borrow;
}
_ => {}
}
}
if let Some(mutbl) = borrow {
self.record_borrow(discr.id, mutbl);
}
visit::walk_expr(self, ex);
}
// Division by zero and overflow checking.
hir::ExprBinary(op, _, _) => {
visit::walk_expr(self, ex);
let div_or_rem = op.node == hir::BiDiv || op.node == hir::BiRem;
match node_ty.sty {
ty::TyUint(_) | ty::TyInt(_) if div_or_rem => {
if !self.qualif.intersects(ConstQualif::NOT_CONST) {
match const_eval::eval_const_expr_partial(
self.tcx, ex, ExprTypeChecked, None) {
Ok(_) => {}
Err(msg) => {
self.tcx.sess.add_lint(::lint::builtin::CONST_ERR, ex.id,
msg.span,
msg.description().into_owned())
}
}
}
}
_ => {}
}
}
_ => visit::walk_expr(self, ex)
}
// Handle borrows on (or inside the autorefs of) this expression.
match self.rvalue_borrows.remove(&ex.id) {
Some(hir::MutImmutable) => {
// Constants cannot be borrowed if they contain interior mutability as
// it means that our "silent insertion of statics" could change
// initializer values (very bad).
// If the type doesn't have interior mutability, then `ConstQualif::MUTABLE_MEM` has
// propagated from another error, so erroring again would be just noise.
let tc = node_ty.type_contents(self.tcx);
if self.qualif.intersects(ConstQualif::MUTABLE_MEM) && tc.interior_unsafe() {
outer = outer | ConstQualif::NOT_CONST;
if self.mode != Mode::Var {
span_err!(self.tcx.sess, ex.span, E0492,
"cannot borrow a constant which contains \
interior mutability, create a static instead");
}
}
// If the reference has to be 'static, avoid in-place initialization
// as that will end up pointing to the stack instead.
if !self.qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
self.qualif = self.qualif - ConstQualif::PREFER_IN_PLACE;
self.add_qualif(ConstQualif::HAS_STATIC_BORROWS);
}
}
Some(hir::MutMutable) => {
// `&mut expr` means expr could be mutated, unless it's zero-sized.
if self.qualif.intersects(ConstQualif::NON_ZERO_SIZED) {
if self.mode == Mode::Var {
outer = outer | ConstQualif::NOT_CONST;
self.add_qualif(ConstQualif::MUTABLE_MEM);
} else {
span_err!(self.tcx.sess, ex.span, E0017,
"references in {}s may only refer \
to immutable values", self.msg())
}
}
if !self.qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
self.add_qualif(ConstQualif::HAS_STATIC_BORROWS);
}
}
None => {}
}
self.tcx.const_qualif_map.borrow_mut().insert(ex.id, self.qualif);
// Don't propagate certain flags.
self.qualif = outer | (self.qualif - ConstQualif::HAS_STATIC_BORROWS);
}
}
/// This function is used to enforce the constraints on
/// const/static items. It walks through the *value*
/// of the item walking down the expression and evaluating
/// every nested expression. If the expression is not part
/// of a const/static item, it is qualified for promotion
/// instead of producing errors.
fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
e: &hir::Expr, node_ty: Ty<'tcx>) {
match node_ty.sty {
ty::TyStruct(def, _) |
ty::TyEnum(def, _) if def.has_dtor() => {
v.add_qualif(ConstQualif::NEEDS_DROP);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0493,
"{}s are not allowed to have destructors",
v.msg());
}
}
_ => {}
}
let method_call = ty::MethodCall::expr(e.id);
match e.node {
hir::ExprUnary(..) |
hir::ExprBinary(..) |
hir::ExprIndex(..) if v.tcx.tables.borrow().method_map.contains_key(&method_call) => {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0011,
"user-defined operators are not allowed in {}s", v.msg());
}
}
hir::ExprBox(_) => {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0010,
"allocations are not allowed in {}s", v.msg());
}
}
hir::ExprUnary(op, ref inner) => {
match v.tcx.node_id_to_type(inner.id).sty {
ty::TyRawPtr(_) => {
assert!(op == hir::UnDeref);
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0396,
"raw pointers cannot be dereferenced in {}s", v.msg());
}
}
_ => {}
}
}
hir::ExprBinary(op, ref lhs, _) => {
match v.tcx.node_id_to_type(lhs.id).sty {
ty::TyRawPtr(_) => {
assert!(op.node == hir::BiEq || op.node == hir::BiNe ||
op.node == hir::BiLe || op.node == hir::BiLt ||
op.node == hir::BiGe || op.node == hir::BiGt);
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0395,
"raw pointers cannot be compared in {}s", v.msg());
}
}
_ => {}
}
}
hir::ExprCast(ref from, _) => {
debug!("Checking const cast(id={})", from.id);
match v.tcx.cast_kinds.borrow().get(&from.id) {
None => v.tcx.sess.span_bug(e.span, "no kind for cast"),
Some(&CastKind::PtrAddrCast) | Some(&CastKind::FnPtrAddrCast) => {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0018,
"raw pointers cannot be cast to integers in {}s", v.msg());
}
}
_ => {}
}
}
hir::ExprPath(..) => {
let def = v.tcx.def_map.borrow().get(&e.id).map(|d| d.full_def());
match def {
Some(def::DefVariant(_, _, _)) => {
// Count the discriminator or function pointer.
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
}
Some(def::DefStruct(_)) => {
if let ty::TyBareFn(..) = node_ty.sty {
// Count the function pointer.
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
}
}
Some(def::DefFn(..)) | Some(def::DefMethod(..)) => {
// Count the function pointer.
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
}
Some(def::DefStatic(..)) => {
match v.mode {
Mode::Static | Mode::StaticMut => {}
Mode::Const | Mode::ConstFn => {
span_err!(v.tcx.sess, e.span, E0013,
"{}s cannot refer to other statics, insert \
an intermediate constant instead", v.msg());
}
Mode::Var => v.add_qualif(ConstQualif::NOT_CONST)
}
}
Some(def::DefConst(did)) |
Some(def::DefAssociatedConst(did)) => {
if let Some(expr) = const_eval::lookup_const_by_id(v.tcx, did,
Some(e.id)) {
let inner = v.global_expr(Mode::Const, expr);
v.add_qualif(inner);
} else {
v.tcx.sess.span_bug(e.span,
"DefConst or DefAssociatedConst \
doesn't point to a constant");
}
}
Some(def::DefLocal(..)) if v.mode == Mode::ConstFn => {
// Sadly, we can't determine whether the types are zero-sized.
v.add_qualif(ConstQualif::NOT_CONST | ConstQualif::NON_ZERO_SIZED);
}
def => {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
debug!("(checking const) found bad def: {:?}", def);
span_err!(v.tcx.sess, e.span, E0014,
"paths in {}s may only refer to constants \
or functions", v.msg());
}
}
}
}
hir::ExprCall(ref callee, _) => {
let mut callee = &**callee;
loop {
callee = match callee.node {
hir::ExprBlock(ref block) => match block.expr {
Some(ref tail) => &**tail,
None => break
},
_ => break
};
}
let def = v.tcx.def_map.borrow().get(&callee.id).map(|d| d.full_def());
let is_const = match def {
Some(def::DefStruct(..)) => true,
Some(def::DefVariant(..)) => {
// Count the discriminator.
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
true
}
Some(def::DefFn(did, _)) => {
v.handle_const_fn_call(e, did, node_ty)
}
Some(def::DefMethod(did)) => {
match v.tcx.impl_or_trait_item(did).container() {
ty::ImplContainer(_) => {
v.handle_const_fn_call(e, did, node_ty)
}
ty::TraitContainer(_) => false
}
}
_ => false
};
if !is_const {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
fn span_limited_call_error(tcx: &ty::ctxt, span: Span, s: &str) {
span_err!(tcx.sess, span, E0015, "{}", s);
}
// FIXME(#24111) Remove this check when const fn stabilizes
if let UnstableFeatures::Disallow = v.tcx.sess.opts.unstable_features {
span_limited_call_error(&v.tcx, e.span,
&format!("function calls in {}s are limited to \
struct and enum constructors",
v.msg()));
v.tcx.sess.span_note(e.span,
"a limited form of compile-time function \
evaluation is available on a nightly \
compiler via `const fn`");
} else {
span_limited_call_error(&v.tcx, e.span,
&format!("function calls in {}s are limited \
to constant functions, \
struct and enum constructors",
v.msg()));
}
}
}
}
hir::ExprMethodCall(..) => {
let method = v.tcx.tables.borrow().method_map[&method_call];
let is_const = match v.tcx.impl_or_trait_item(method.def_id).container() {
ty::ImplContainer(_) => v.handle_const_fn_call(e, method.def_id, node_ty),
ty::TraitContainer(_) => false
};
if !is_const {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0378,
"method calls in {}s are limited to \
constant inherent methods", v.msg());
}
}
}
hir::ExprStruct(..) => {
let did = v.tcx.def_map.borrow().get(&e.id).map(|def| def.def_id());
if did == v.tcx.lang_items.unsafe_cell_type() {
v.add_qualif(ConstQualif::MUTABLE_MEM);
}
}
hir::ExprLit(_) |
hir::ExprAddrOf(..) => {
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
}
hir::ExprRepeat(..) => {
v.add_qualif(ConstQualif::PREFER_IN_PLACE);
}
hir::ExprClosure(..) => {
// Paths in constant contexts cannot refer to local variables,
// as there are none, and thus closures can't have upvars there.
if v.tcx.with_freevars(e.id, |fv| !fv.is_empty()) {
assert!(v.mode == Mode::Var,
"global closures can't capture anything");
v.add_qualif(ConstQualif::NOT_CONST);
}
}
hir::ExprBlock(_) |
hir::ExprIndex(..) |
hir::ExprField(..) |
hir::ExprTupField(..) |
hir::ExprVec(_) |
hir::ExprTup(..) => {}
// Conditional control flow (possible to implement).
hir::ExprMatch(..) |
hir::ExprIf(..) |
// Loops (not very meaningful in constants).
hir::ExprWhile(..) |
hir::ExprLoop(..) |
// More control flow (also not very meaningful).
hir::ExprBreak(_) |
hir::ExprAgain(_) |
hir::ExprRet(_) |
// Miscellaneous expressions that could be implemented.
hir::ExprRange(..) |
// Expressions with side-effects.
hir::ExprAssign(..) |
hir::ExprAssignOp(..) |
hir::ExprInlineAsm(_) => {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0019,
"{} contains unimplemented expression type", v.msg());
}
}
}
}
/// Check the adjustments of an expression
fn check_adjustments<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr) {
match v.tcx.tables.borrow().adjustments.get(&e.id) {
None |
Some(&ty::adjustment::AdjustReifyFnPointer) |
Some(&ty::adjustment::AdjustUnsafeFnPointer) => {}
Some(&ty::adjustment::AdjustDerefRef(
ty::adjustment::AutoDerefRef { autoderefs, .. }
)) => {
if (0..autoderefs as u32).any(|autoderef| {
v.tcx.is_overloaded_autoderef(e.id, autoderef)
}) {
v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0400,
"user-defined dereference operators are not allowed in {}s",
v.msg());
}
}
}
}
}
pub fn check_crate(tcx: &ty::ctxt) {
visit::walk_crate(&mut CheckCrateVisitor {
tcx: tcx,
mode: Mode::Var,
qualif: ConstQualif::NOT_CONST,
rvalue_borrows: NodeMap()
}, tcx.map.krate());
tcx.sess.abort_if_errors();
}
impl<'a, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'tcx> {
fn consume(&mut self,
_consume_id: ast::NodeId,
consume_span: Span,
cmt: mc::cmt,
_mode: euv::ConsumeMode) {
let mut cur = &cmt;
loop {
match cur.cat {
Categorization::StaticItem => {
if self.mode != Mode::Var {
// statics cannot be consumed by value at any time, that would imply
// that they're an initializer (what a const is for) or kept in sync
// over time (not feasible), so deny it outright.
span_err!(self.tcx.sess, consume_span, E0394,
"cannot refer to other statics by value, use the \
address-of operator or a constant instead");
}
break;
}
Categorization::Deref(ref cmt, _, _) |
Categorization::Downcast(ref cmt, _) |
Categorization::Interior(ref cmt, _) => cur = cmt,
Categorization::Rvalue(..) |
Categorization::Upvar(..) |
Categorization::Local(..) => break
}
}
}
fn borrow(&mut self,
borrow_id: ast::NodeId,
borrow_span: Span,
cmt: mc::cmt<'tcx>,
_loan_region: ty::Region,
bk: ty::BorrowKind,
loan_cause: euv::LoanCause)
{
// Kind of hacky, but we allow Unsafe coercions in constants.
// These occur when we convert a &T or *T to a *U, as well as
// when making a thin pointer (e.g., `*T`) into a fat pointer
// (e.g., `*Trait`).
match loan_cause {
euv::LoanCause::AutoUnsafe => {
return;
}
_ => { }
}
let mut cur = &cmt;
let mut is_interior = false;
loop {
match cur.cat {
Categorization::Rvalue(..) => {
if loan_cause == euv::MatchDiscriminant {
// Ignore the dummy immutable borrow created by EUV.
break;
}
let mutbl = bk.to_mutbl_lossy();
if mutbl == hir::MutMutable && self.mode == Mode::StaticMut {
// Mutable slices are the only `&mut` allowed in
// globals, but only in `static mut`, nowhere else.
// FIXME: This exception is really weird... there isn't
// any fundamental reason to restrict this based on
// type of the expression. `&mut [1]` has exactly the
// same representation as &mut 1.
match cmt.ty.sty {
ty::TyArray(_, _) | ty::TySlice(_) => break,
_ => {}
}
}
self.record_borrow(borrow_id, mutbl);
break;
}
Categorization::StaticItem => {
if is_interior && self.mode != Mode::Var {
// Borrowed statics can specifically *only* have their address taken,
// not any number of other borrows such as borrowing fields, reading
// elements of an array, etc.
span_err!(self.tcx.sess, borrow_span, E0494,
"cannot refer to the interior of another \
static, use a constant instead");
}
break;
}
Categorization::Deref(ref cmt, _, _) |
Categorization::Downcast(ref cmt, _) |
Categorization::Interior(ref cmt, _) => {
is_interior = true;
cur = cmt;
}
Categorization::Upvar(..) |
Categorization::Local(..) => break
}
}
}
fn decl_without_init(&mut self,
_id: ast::NodeId,
_span: Span) {}
fn mutate(&mut self,
_assignment_id: ast::NodeId,
_assignment_span: Span,
_assignee_cmt: mc::cmt,
_mode: euv::MutateMode) {}
fn matched_pat(&mut self,
_: &hir::Pat,
_: mc::cmt,
_: euv::MatchMode) {}
fn consume_pat(&mut self,
_consume_pat: &hir::Pat,
_cmt: mc::cmt,
_mode: euv::ConsumeMode) {}
}
| 40.300931 | 100 | 0.470991 |
7a7cbeb367a769a9e15908fd8d483943c11c0fdb | 63,859 | /**
* Copyright (c) 2016, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree. An additional
* directory.
*
**
*
* THIS FILE IS @generated; DO NOT EDIT IT
* To regenerate this file, run
*
* buck run //hphp/hack/src:generate_full_fidelity
*
**
*
*/
use super::{
syntax_children_iterator::*,
syntax_variant_generated::*,
syntax::*
};
impl<'a, T, V> SyntaxChildrenIterator<'a, T, V> {
pub fn next_impl(&mut self, direction : bool) -> Option<&'a Syntax<'a, T, V>> {
use SyntaxVariant::*;
let get_index = |len| {
let back_index_plus_1 = len - self.index_back;
if back_index_plus_1 <= self.index {
return None
}
if direction {
Some (self.index)
} else {
Some (back_index_plus_1 - 1)
}
};
let res = match self.syntax {
Missing => None,
Token (_) => None,
SyntaxList(elems) => {
get_index(elems.len()).and_then(|x| elems.get(x))
},
EndOfFile(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.token),
_ => None,
}
})
},
Script(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.declarations),
_ => None,
}
})
},
QualifiedName(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.parts),
_ => None,
}
})
},
SimpleTypeSpecifier(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.specifier),
_ => None,
}
})
},
LiteralExpression(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.expression),
_ => None,
}
})
},
PrefixedStringExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.str),
_ => None,
}
})
},
PrefixedCodeExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.prefix),
1 => Some(&x.left_backtick),
2 => Some(&x.expression),
3 => Some(&x.right_backtick),
_ => None,
}
})
},
VariableExpression(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.expression),
_ => None,
}
})
},
PipeVariableExpression(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.expression),
_ => None,
}
})
},
FileAttributeSpecification(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.left_double_angle),
1 => Some(&x.keyword),
2 => Some(&x.colon),
3 => Some(&x.attributes),
4 => Some(&x.right_double_angle),
_ => None,
}
})
},
EnumDeclaration(x) => {
get_index(10).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.keyword),
2 => Some(&x.name),
3 => Some(&x.colon),
4 => Some(&x.base),
5 => Some(&x.type_),
6 => Some(&x.left_brace),
7 => Some(&x.use_clauses),
8 => Some(&x.enumerators),
9 => Some(&x.right_brace),
_ => None,
}
})
},
EnumUse(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.names),
2 => Some(&x.semicolon),
_ => None,
}
})
},
Enumerator(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.equal),
2 => Some(&x.value),
3 => Some(&x.semicolon),
_ => None,
}
})
},
EnumClassDeclaration(x) => {
get_index(12).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.modifiers),
2 => Some(&x.enum_keyword),
3 => Some(&x.class_keyword),
4 => Some(&x.name),
5 => Some(&x.colon),
6 => Some(&x.base),
7 => Some(&x.extends),
8 => Some(&x.extends_list),
9 => Some(&x.left_brace),
10 => Some(&x.elements),
11 => Some(&x.right_brace),
_ => None,
}
})
},
EnumClassEnumerator(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.modifiers),
1 => Some(&x.type_),
2 => Some(&x.name),
3 => Some(&x.initializer),
4 => Some(&x.semicolon),
_ => None,
}
})
},
AliasDeclaration(x) => {
get_index(8).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.keyword),
2 => Some(&x.name),
3 => Some(&x.generic_parameter),
4 => Some(&x.constraint),
5 => Some(&x.equal),
6 => Some(&x.type_),
7 => Some(&x.semicolon),
_ => None,
}
})
},
ContextAliasDeclaration(x) => {
get_index(8).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.keyword),
2 => Some(&x.name),
3 => Some(&x.generic_parameter),
4 => Some(&x.as_constraint),
5 => Some(&x.equal),
6 => Some(&x.context),
7 => Some(&x.semicolon),
_ => None,
}
})
},
PropertyDeclaration(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.modifiers),
2 => Some(&x.type_),
3 => Some(&x.declarators),
4 => Some(&x.semicolon),
_ => None,
}
})
},
PropertyDeclarator(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.initializer),
_ => None,
}
})
},
NamespaceDeclaration(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.header),
1 => Some(&x.body),
_ => None,
}
})
},
NamespaceDeclarationHeader(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.name),
_ => None,
}
})
},
NamespaceBody(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_brace),
1 => Some(&x.declarations),
2 => Some(&x.right_brace),
_ => None,
}
})
},
NamespaceEmptyBody(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.semicolon),
_ => None,
}
})
},
NamespaceUseDeclaration(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.kind),
2 => Some(&x.clauses),
3 => Some(&x.semicolon),
_ => None,
}
})
},
NamespaceGroupUseDeclaration(x) => {
get_index(7).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.kind),
2 => Some(&x.prefix),
3 => Some(&x.left_brace),
4 => Some(&x.clauses),
5 => Some(&x.right_brace),
6 => Some(&x.semicolon),
_ => None,
}
})
},
NamespaceUseClause(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.clause_kind),
1 => Some(&x.name),
2 => Some(&x.as_),
3 => Some(&x.alias),
_ => None,
}
})
},
FunctionDeclaration(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.declaration_header),
2 => Some(&x.body),
_ => None,
}
})
},
FunctionDeclarationHeader(x) => {
get_index(12).and_then(|index| { match index {
0 => Some(&x.modifiers),
1 => Some(&x.keyword),
2 => Some(&x.name),
3 => Some(&x.type_parameter_list),
4 => Some(&x.left_paren),
5 => Some(&x.parameter_list),
6 => Some(&x.right_paren),
7 => Some(&x.contexts),
8 => Some(&x.colon),
9 => Some(&x.readonly_return),
10 => Some(&x.type_),
11 => Some(&x.where_clause),
_ => None,
}
})
},
Contexts(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_bracket),
1 => Some(&x.types),
2 => Some(&x.right_bracket),
_ => None,
}
})
},
WhereClause(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.constraints),
_ => None,
}
})
},
WhereConstraint(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_type),
1 => Some(&x.operator),
2 => Some(&x.right_type),
_ => None,
}
})
},
MethodishDeclaration(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.attribute),
1 => Some(&x.function_decl_header),
2 => Some(&x.function_body),
3 => Some(&x.semicolon),
_ => None,
}
})
},
MethodishTraitResolution(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.attribute),
1 => Some(&x.function_decl_header),
2 => Some(&x.equal),
3 => Some(&x.name),
4 => Some(&x.semicolon),
_ => None,
}
})
},
ClassishDeclaration(x) => {
get_index(12).and_then(|index| { match index {
0 => Some(&x.attribute),
1 => Some(&x.modifiers),
2 => Some(&x.xhp),
3 => Some(&x.keyword),
4 => Some(&x.name),
5 => Some(&x.type_parameters),
6 => Some(&x.extends_keyword),
7 => Some(&x.extends_list),
8 => Some(&x.implements_keyword),
9 => Some(&x.implements_list),
10 => Some(&x.where_clause),
11 => Some(&x.body),
_ => None,
}
})
},
ClassishBody(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_brace),
1 => Some(&x.elements),
2 => Some(&x.right_brace),
_ => None,
}
})
},
TraitUsePrecedenceItem(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.keyword),
2 => Some(&x.removed_names),
_ => None,
}
})
},
TraitUseAliasItem(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.aliasing_name),
1 => Some(&x.keyword),
2 => Some(&x.modifiers),
3 => Some(&x.aliased_name),
_ => None,
}
})
},
TraitUseConflictResolution(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.names),
2 => Some(&x.left_brace),
3 => Some(&x.clauses),
4 => Some(&x.right_brace),
_ => None,
}
})
},
TraitUse(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.names),
2 => Some(&x.semicolon),
_ => None,
}
})
},
RequireClause(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.kind),
2 => Some(&x.name),
3 => Some(&x.semicolon),
_ => None,
}
})
},
ConstDeclaration(x) => {
get_index(6).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.modifiers),
2 => Some(&x.keyword),
3 => Some(&x.type_specifier),
4 => Some(&x.declarators),
5 => Some(&x.semicolon),
_ => None,
}
})
},
ConstantDeclarator(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.initializer),
_ => None,
}
})
},
TypeConstDeclaration(x) => {
get_index(10).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.modifiers),
2 => Some(&x.keyword),
3 => Some(&x.type_keyword),
4 => Some(&x.name),
5 => Some(&x.type_parameters),
6 => Some(&x.type_constraints),
7 => Some(&x.equal),
8 => Some(&x.type_specifier),
9 => Some(&x.semicolon),
_ => None,
}
})
},
ContextConstDeclaration(x) => {
get_index(9).and_then(|index| { match index {
0 => Some(&x.modifiers),
1 => Some(&x.const_keyword),
2 => Some(&x.ctx_keyword),
3 => Some(&x.name),
4 => Some(&x.type_parameters),
5 => Some(&x.constraint),
6 => Some(&x.equal),
7 => Some(&x.ctx_list),
8 => Some(&x.semicolon),
_ => None,
}
})
},
DecoratedExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.decorator),
1 => Some(&x.expression),
_ => None,
}
})
},
ParameterDeclaration(x) => {
get_index(7).and_then(|index| { match index {
0 => Some(&x.attribute),
1 => Some(&x.visibility),
2 => Some(&x.call_convention),
3 => Some(&x.readonly),
4 => Some(&x.type_),
5 => Some(&x.name),
6 => Some(&x.default_value),
_ => None,
}
})
},
VariadicParameter(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.call_convention),
1 => Some(&x.type_),
2 => Some(&x.ellipsis),
_ => None,
}
})
},
OldAttributeSpecification(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_double_angle),
1 => Some(&x.attributes),
2 => Some(&x.right_double_angle),
_ => None,
}
})
},
AttributeSpecification(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.attributes),
_ => None,
}
})
},
Attribute(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.at),
1 => Some(&x.attribute_name),
_ => None,
}
})
},
InclusionExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.require),
1 => Some(&x.filename),
_ => None,
}
})
},
InclusionDirective(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.expression),
1 => Some(&x.semicolon),
_ => None,
}
})
},
CompoundStatement(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_brace),
1 => Some(&x.statements),
2 => Some(&x.right_brace),
_ => None,
}
})
},
ExpressionStatement(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.expression),
1 => Some(&x.semicolon),
_ => None,
}
})
},
MarkupSection(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.hashbang),
1 => Some(&x.suffix),
_ => None,
}
})
},
MarkupSuffix(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.less_than_question),
1 => Some(&x.name),
_ => None,
}
})
},
UnsetStatement(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.variables),
3 => Some(&x.right_paren),
4 => Some(&x.semicolon),
_ => None,
}
})
},
UsingStatementBlockScoped(x) => {
get_index(6).and_then(|index| { match index {
0 => Some(&x.await_keyword),
1 => Some(&x.using_keyword),
2 => Some(&x.left_paren),
3 => Some(&x.expressions),
4 => Some(&x.right_paren),
5 => Some(&x.body),
_ => None,
}
})
},
UsingStatementFunctionScoped(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.await_keyword),
1 => Some(&x.using_keyword),
2 => Some(&x.expression),
3 => Some(&x.semicolon),
_ => None,
}
})
},
WhileStatement(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.condition),
3 => Some(&x.right_paren),
4 => Some(&x.body),
_ => None,
}
})
},
IfStatement(x) => {
get_index(6).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.condition),
3 => Some(&x.right_paren),
4 => Some(&x.statement),
5 => Some(&x.else_clause),
_ => None,
}
})
},
ElseClause(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.statement),
_ => None,
}
})
},
TryStatement(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.compound_statement),
2 => Some(&x.catch_clauses),
3 => Some(&x.finally_clause),
_ => None,
}
})
},
CatchClause(x) => {
get_index(6).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.type_),
3 => Some(&x.variable),
4 => Some(&x.right_paren),
5 => Some(&x.body),
_ => None,
}
})
},
FinallyClause(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.body),
_ => None,
}
})
},
DoStatement(x) => {
get_index(7).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.body),
2 => Some(&x.while_keyword),
3 => Some(&x.left_paren),
4 => Some(&x.condition),
5 => Some(&x.right_paren),
6 => Some(&x.semicolon),
_ => None,
}
})
},
ForStatement(x) => {
get_index(9).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.initializer),
3 => Some(&x.first_semicolon),
4 => Some(&x.control),
5 => Some(&x.second_semicolon),
6 => Some(&x.end_of_loop),
7 => Some(&x.right_paren),
8 => Some(&x.body),
_ => None,
}
})
},
ForeachStatement(x) => {
get_index(10).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.collection),
3 => Some(&x.await_keyword),
4 => Some(&x.as_),
5 => Some(&x.key),
6 => Some(&x.arrow),
7 => Some(&x.value),
8 => Some(&x.right_paren),
9 => Some(&x.body),
_ => None,
}
})
},
SwitchStatement(x) => {
get_index(7).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.expression),
3 => Some(&x.right_paren),
4 => Some(&x.left_brace),
5 => Some(&x.sections),
6 => Some(&x.right_brace),
_ => None,
}
})
},
SwitchSection(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.labels),
1 => Some(&x.statements),
2 => Some(&x.fallthrough),
_ => None,
}
})
},
SwitchFallthrough(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.semicolon),
_ => None,
}
})
},
CaseLabel(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.expression),
2 => Some(&x.colon),
_ => None,
}
})
},
DefaultLabel(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.colon),
_ => None,
}
})
},
ReturnStatement(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.expression),
2 => Some(&x.semicolon),
_ => None,
}
})
},
YieldBreakStatement(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.break_),
2 => Some(&x.semicolon),
_ => None,
}
})
},
ThrowStatement(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.expression),
2 => Some(&x.semicolon),
_ => None,
}
})
},
BreakStatement(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.semicolon),
_ => None,
}
})
},
ContinueStatement(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.semicolon),
_ => None,
}
})
},
EchoStatement(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.expressions),
2 => Some(&x.semicolon),
_ => None,
}
})
},
ConcurrentStatement(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.statement),
_ => None,
}
})
},
SimpleInitializer(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.equal),
1 => Some(&x.value),
_ => None,
}
})
},
AnonymousClass(x) => {
get_index(9).and_then(|index| { match index {
0 => Some(&x.class_keyword),
1 => Some(&x.left_paren),
2 => Some(&x.argument_list),
3 => Some(&x.right_paren),
4 => Some(&x.extends_keyword),
5 => Some(&x.extends_list),
6 => Some(&x.implements_keyword),
7 => Some(&x.implements_list),
8 => Some(&x.body),
_ => None,
}
})
},
AnonymousFunction(x) => {
get_index(12).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.async_keyword),
2 => Some(&x.function_keyword),
3 => Some(&x.left_paren),
4 => Some(&x.parameters),
5 => Some(&x.right_paren),
6 => Some(&x.ctx_list),
7 => Some(&x.colon),
8 => Some(&x.readonly_return),
9 => Some(&x.type_),
10 => Some(&x.use_),
11 => Some(&x.body),
_ => None,
}
})
},
AnonymousFunctionUseClause(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.variables),
3 => Some(&x.right_paren),
_ => None,
}
})
},
LambdaExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.async_),
2 => Some(&x.signature),
3 => Some(&x.arrow),
4 => Some(&x.body),
_ => None,
}
})
},
LambdaSignature(x) => {
get_index(7).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.parameters),
2 => Some(&x.right_paren),
3 => Some(&x.contexts),
4 => Some(&x.colon),
5 => Some(&x.readonly_return),
6 => Some(&x.type_),
_ => None,
}
})
},
CastExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.type_),
2 => Some(&x.right_paren),
3 => Some(&x.operand),
_ => None,
}
})
},
ScopeResolutionExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.qualifier),
1 => Some(&x.operator),
2 => Some(&x.name),
_ => None,
}
})
},
MemberSelectionExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.object),
1 => Some(&x.operator),
2 => Some(&x.name),
_ => None,
}
})
},
SafeMemberSelectionExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.object),
1 => Some(&x.operator),
2 => Some(&x.name),
_ => None,
}
})
},
EmbeddedMemberSelectionExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.object),
1 => Some(&x.operator),
2 => Some(&x.name),
_ => None,
}
})
},
YieldExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.operand),
_ => None,
}
})
},
PrefixUnaryExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.operator),
1 => Some(&x.operand),
_ => None,
}
})
},
PostfixUnaryExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.operand),
1 => Some(&x.operator),
_ => None,
}
})
},
BinaryExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_operand),
1 => Some(&x.operator),
2 => Some(&x.right_operand),
_ => None,
}
})
},
IsExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_operand),
1 => Some(&x.operator),
2 => Some(&x.right_operand),
_ => None,
}
})
},
AsExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_operand),
1 => Some(&x.operator),
2 => Some(&x.right_operand),
_ => None,
}
})
},
NullableAsExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_operand),
1 => Some(&x.operator),
2 => Some(&x.right_operand),
_ => None,
}
})
},
UpcastExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_operand),
1 => Some(&x.operator),
2 => Some(&x.right_operand),
_ => None,
}
})
},
ConditionalExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.test),
1 => Some(&x.question),
2 => Some(&x.consequence),
3 => Some(&x.colon),
4 => Some(&x.alternative),
_ => None,
}
})
},
EvalExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.argument),
3 => Some(&x.right_paren),
_ => None,
}
})
},
IssetExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.argument_list),
3 => Some(&x.right_paren),
_ => None,
}
})
},
FunctionCallExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.receiver),
1 => Some(&x.type_args),
2 => Some(&x.left_paren),
3 => Some(&x.argument_list),
4 => Some(&x.right_paren),
_ => None,
}
})
},
FunctionPointerExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.receiver),
1 => Some(&x.type_args),
_ => None,
}
})
},
ParenthesizedExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.expression),
2 => Some(&x.right_paren),
_ => None,
}
})
},
BracedExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_brace),
1 => Some(&x.expression),
2 => Some(&x.right_brace),
_ => None,
}
})
},
ETSpliceExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.dollar),
1 => Some(&x.left_brace),
2 => Some(&x.expression),
3 => Some(&x.right_brace),
_ => None,
}
})
},
EmbeddedBracedExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_brace),
1 => Some(&x.expression),
2 => Some(&x.right_brace),
_ => None,
}
})
},
ListExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.members),
3 => Some(&x.right_paren),
_ => None,
}
})
},
CollectionLiteralExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.left_brace),
2 => Some(&x.initializers),
3 => Some(&x.right_brace),
_ => None,
}
})
},
ObjectCreationExpression(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.new_keyword),
1 => Some(&x.object),
_ => None,
}
})
},
ConstructorCall(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.type_),
1 => Some(&x.left_paren),
2 => Some(&x.argument_list),
3 => Some(&x.right_paren),
_ => None,
}
})
},
DarrayIntrinsicExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.explicit_type),
2 => Some(&x.left_bracket),
3 => Some(&x.members),
4 => Some(&x.right_bracket),
_ => None,
}
})
},
DictionaryIntrinsicExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.explicit_type),
2 => Some(&x.left_bracket),
3 => Some(&x.members),
4 => Some(&x.right_bracket),
_ => None,
}
})
},
KeysetIntrinsicExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.explicit_type),
2 => Some(&x.left_bracket),
3 => Some(&x.members),
4 => Some(&x.right_bracket),
_ => None,
}
})
},
VarrayIntrinsicExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.explicit_type),
2 => Some(&x.left_bracket),
3 => Some(&x.members),
4 => Some(&x.right_bracket),
_ => None,
}
})
},
VectorIntrinsicExpression(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.explicit_type),
2 => Some(&x.left_bracket),
3 => Some(&x.members),
4 => Some(&x.right_bracket),
_ => None,
}
})
},
ElementInitializer(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.key),
1 => Some(&x.arrow),
2 => Some(&x.value),
_ => None,
}
})
},
SubscriptExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.receiver),
1 => Some(&x.left_bracket),
2 => Some(&x.index),
3 => Some(&x.right_bracket),
_ => None,
}
})
},
EmbeddedSubscriptExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.receiver),
1 => Some(&x.left_bracket),
2 => Some(&x.index),
3 => Some(&x.right_bracket),
_ => None,
}
})
},
AwaitableCreationExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.async_),
2 => Some(&x.compound_statement),
_ => None,
}
})
},
XHPChildrenDeclaration(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.expression),
2 => Some(&x.semicolon),
_ => None,
}
})
},
XHPChildrenParenthesizedList(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.xhp_children),
2 => Some(&x.right_paren),
_ => None,
}
})
},
XHPCategoryDeclaration(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.categories),
2 => Some(&x.semicolon),
_ => None,
}
})
},
XHPEnumType(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.like),
1 => Some(&x.keyword),
2 => Some(&x.left_brace),
3 => Some(&x.values),
4 => Some(&x.right_brace),
_ => None,
}
})
},
XHPLateinit(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.at),
1 => Some(&x.keyword),
_ => None,
}
})
},
XHPRequired(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.at),
1 => Some(&x.keyword),
_ => None,
}
})
},
XHPClassAttributeDeclaration(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.attributes),
2 => Some(&x.semicolon),
_ => None,
}
})
},
XHPClassAttribute(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.type_),
1 => Some(&x.name),
2 => Some(&x.initializer),
3 => Some(&x.required),
_ => None,
}
})
},
XHPSimpleClassAttribute(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.type_),
_ => None,
}
})
},
XHPSimpleAttribute(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.equal),
2 => Some(&x.expression),
_ => None,
}
})
},
XHPSpreadAttribute(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.left_brace),
1 => Some(&x.spread_operator),
2 => Some(&x.expression),
3 => Some(&x.right_brace),
_ => None,
}
})
},
XHPOpen(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.left_angle),
1 => Some(&x.name),
2 => Some(&x.attributes),
3 => Some(&x.right_angle),
_ => None,
}
})
},
XHPExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.open),
1 => Some(&x.body),
2 => Some(&x.close),
_ => None,
}
})
},
XHPClose(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_angle),
1 => Some(&x.name),
2 => Some(&x.right_angle),
_ => None,
}
})
},
TypeConstant(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_type),
1 => Some(&x.separator),
2 => Some(&x.right_type),
_ => None,
}
})
},
VectorTypeSpecifier(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.type_),
3 => Some(&x.trailing_comma),
4 => Some(&x.right_angle),
_ => None,
}
})
},
KeysetTypeSpecifier(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.type_),
3 => Some(&x.trailing_comma),
4 => Some(&x.right_angle),
_ => None,
}
})
},
TupleTypeExplicitSpecifier(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.types),
3 => Some(&x.right_angle),
_ => None,
}
})
},
VarrayTypeSpecifier(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.type_),
3 => Some(&x.trailing_comma),
4 => Some(&x.right_angle),
_ => None,
}
})
},
FunctionCtxTypeSpecifier(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.variable),
_ => None,
}
})
},
TypeParameter(x) => {
get_index(6).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.reified),
2 => Some(&x.variance),
3 => Some(&x.name),
4 => Some(&x.param_params),
5 => Some(&x.constraints),
_ => None,
}
})
},
TypeConstraint(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.type_),
_ => None,
}
})
},
ContextConstraint(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.ctx_list),
_ => None,
}
})
},
DarrayTypeSpecifier(x) => {
get_index(7).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.key),
3 => Some(&x.comma),
4 => Some(&x.value),
5 => Some(&x.trailing_comma),
6 => Some(&x.right_angle),
_ => None,
}
})
},
DictionaryTypeSpecifier(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.members),
3 => Some(&x.right_angle),
_ => None,
}
})
},
ClosureTypeSpecifier(x) => {
get_index(11).and_then(|index| { match index {
0 => Some(&x.outer_left_paren),
1 => Some(&x.readonly_keyword),
2 => Some(&x.function_keyword),
3 => Some(&x.inner_left_paren),
4 => Some(&x.parameter_list),
5 => Some(&x.inner_right_paren),
6 => Some(&x.contexts),
7 => Some(&x.colon),
8 => Some(&x.readonly_return),
9 => Some(&x.return_type),
10 => Some(&x.outer_right_paren),
_ => None,
}
})
},
ClosureParameterTypeSpecifier(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.call_convention),
1 => Some(&x.readonly),
2 => Some(&x.type_),
_ => None,
}
})
},
ClassnameTypeSpecifier(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_angle),
2 => Some(&x.type_),
3 => Some(&x.trailing_comma),
4 => Some(&x.right_angle),
_ => None,
}
})
},
FieldSpecifier(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.question),
1 => Some(&x.name),
2 => Some(&x.arrow),
3 => Some(&x.type_),
_ => None,
}
})
},
FieldInitializer(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.name),
1 => Some(&x.arrow),
2 => Some(&x.value),
_ => None,
}
})
},
ShapeTypeSpecifier(x) => {
get_index(5).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.fields),
3 => Some(&x.ellipsis),
4 => Some(&x.right_paren),
_ => None,
}
})
},
ShapeExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.fields),
3 => Some(&x.right_paren),
_ => None,
}
})
},
TupleExpression(x) => {
get_index(4).and_then(|index| { match index {
0 => Some(&x.keyword),
1 => Some(&x.left_paren),
2 => Some(&x.items),
3 => Some(&x.right_paren),
_ => None,
}
})
},
GenericTypeSpecifier(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.class_type),
1 => Some(&x.argument_list),
_ => None,
}
})
},
NullableTypeSpecifier(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.question),
1 => Some(&x.type_),
_ => None,
}
})
},
LikeTypeSpecifier(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.tilde),
1 => Some(&x.type_),
_ => None,
}
})
},
SoftTypeSpecifier(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.at),
1 => Some(&x.type_),
_ => None,
}
})
},
AttributizedSpecifier(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.type_),
_ => None,
}
})
},
ReifiedTypeArgument(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.reified),
1 => Some(&x.type_),
_ => None,
}
})
},
TypeArguments(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_angle),
1 => Some(&x.types),
2 => Some(&x.right_angle),
_ => None,
}
})
},
TypeParameters(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_angle),
1 => Some(&x.parameters),
2 => Some(&x.right_angle),
_ => None,
}
})
},
TupleTypeSpecifier(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.types),
2 => Some(&x.right_paren),
_ => None,
}
})
},
UnionTypeSpecifier(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.types),
2 => Some(&x.right_paren),
_ => None,
}
})
},
IntersectionTypeSpecifier(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.left_paren),
1 => Some(&x.types),
2 => Some(&x.right_paren),
_ => None,
}
})
},
ErrorSyntax(x) => {
get_index(1).and_then(|index| { match index {
0 => Some(&x.error),
_ => None,
}
})
},
ListItem(x) => {
get_index(2).and_then(|index| { match index {
0 => Some(&x.item),
1 => Some(&x.separator),
_ => None,
}
})
},
EnumClassLabelExpression(x) => {
get_index(3).and_then(|index| { match index {
0 => Some(&x.qualifier),
1 => Some(&x.hash),
2 => Some(&x.expression),
_ => None,
}
})
},
ModuleDeclaration(x) => {
get_index(6).and_then(|index| { match index {
0 => Some(&x.attribute_spec),
1 => Some(&x.new_keyword),
2 => Some(&x.module_keyword),
3 => Some(&x.name),
4 => Some(&x.left_brace),
5 => Some(&x.right_brace),
_ => None,
}
})
},
};
if res.is_some() {
if direction {
self.index = self.index + 1
} else {
self.index_back = self.index_back + 1
}
}
res
}
}
| 37.084204 | 83 | 0.315367 |
01cb03730b87319e90f634ae3f1e4d5b3c5b4587 | 4,550 | // Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::utils::span_lint;
use crate::rustc::hir::*;
use crate::rustc::lint::{LateContext, LateLintPass, LintArray, LintPass};
use crate::rustc::{declare_tool_lint, lint_array};
use std::f64::consts as f64;
use crate::syntax::ast::{FloatTy, Lit, LitKind};
use crate::syntax::symbol;
/// **What it does:** Checks for floating point literals that approximate
/// constants which are defined in
/// [`std::f32::consts`](https://doc.rust-lang.org/stable/std/f32/consts/#constants)
/// or
/// [`std::f64::consts`](https://doc.rust-lang.org/stable/std/f64/consts/#constants),
/// respectively, suggesting to use the predefined constant.
///
/// **Why is this bad?** Usually, the definition in the standard library is more
/// precise than what people come up with. If you find that your definition is
/// actually more precise, please [file a Rust
/// issue](https://github.com/rust-lang/rust/issues).
///
/// **Known problems:** If you happen to have a value that is within 1/8192 of a
/// known constant, but is not *and should not* be the same, this lint will
/// report your value anyway. We have not yet noticed any false positives in
/// code we tested clippy with (this includes servo), but YMMV.
///
/// **Example:**
/// ```rust
/// let x = 3.14;
/// ```
declare_clippy_lint! {
pub APPROX_CONSTANT,
correctness,
"the approximate of a known float constant (in `std::fXX::consts`)"
}
// Tuples are of the form (constant, name, min_digits)
const KNOWN_CONSTS: &[(f64, &str, usize)] = &[
(f64::E, "E", 4),
(f64::FRAC_1_PI, "FRAC_1_PI", 4),
(f64::FRAC_1_SQRT_2, "FRAC_1_SQRT_2", 5),
(f64::FRAC_2_PI, "FRAC_2_PI", 5),
(f64::FRAC_2_SQRT_PI, "FRAC_2_SQRT_PI", 5),
(f64::FRAC_PI_2, "FRAC_PI_2", 5),
(f64::FRAC_PI_3, "FRAC_PI_3", 5),
(f64::FRAC_PI_4, "FRAC_PI_4", 5),
(f64::FRAC_PI_6, "FRAC_PI_6", 5),
(f64::FRAC_PI_8, "FRAC_PI_8", 5),
(f64::LN_10, "LN_10", 5),
(f64::LN_2, "LN_2", 5),
(f64::LOG10_E, "LOG10_E", 5),
(f64::LOG2_E, "LOG2_E", 5),
(f64::PI, "PI", 3),
(f64::SQRT_2, "SQRT_2", 5),
];
#[derive(Copy, Clone)]
pub struct Pass;
impl LintPass for Pass {
fn get_lints(&self) -> LintArray {
lint_array!(APPROX_CONSTANT)
}
}
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for Pass {
fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, e: &'tcx Expr) {
if let ExprKind::Lit(ref lit) = e.node {
check_lit(cx, lit, e);
}
}
}
fn check_lit(cx: &LateContext<'_, '_>, lit: &Lit, e: &Expr) {
match lit.node {
LitKind::Float(s, FloatTy::F32) => check_known_consts(cx, e, s, "f32"),
LitKind::Float(s, FloatTy::F64) => check_known_consts(cx, e, s, "f64"),
LitKind::FloatUnsuffixed(s) => check_known_consts(cx, e, s, "f{32, 64}"),
_ => (),
}
}
fn check_known_consts(cx: &LateContext<'_, '_>, e: &Expr, s: symbol::Symbol, module: &str) {
let s = s.as_str();
if s.parse::<f64>().is_ok() {
for &(constant, name, min_digits) in KNOWN_CONSTS {
if is_approx_const(constant, &s, min_digits) {
span_lint(
cx,
APPROX_CONSTANT,
e.span,
&format!(
"approximate value of `{}::consts::{}` found. \
Consider using it directly",
module, &name
),
);
return;
}
}
}
}
/// Returns false if the number of significant figures in `value` are
/// less than `min_digits`; otherwise, returns true if `value` is equal
/// to `constant`, rounded to the number of digits present in `value`.
fn is_approx_const(constant: f64, value: &str, min_digits: usize) -> bool {
if value.len() <= min_digits {
false
} else {
let round_const = format!("{:.*}", value.len() - 2, constant);
let mut trunc_const = constant.to_string();
if trunc_const.len() > value.len() {
trunc_const.truncate(value.len());
}
(value == round_const) || (value == trunc_const)
}
}
| 35 | 92 | 0.602857 |
91875ba9919f9f207de377efdebdefe9972f116f | 3,207 | use json_utils::json::JsValue;
use crate::core::ruleset::*;
use crate::json::std as j;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum JsonFilter {
#[serde(rename = "or")]
Or(Vec<JsonFilter>),
#[serde(rename = "and")]
And(Vec<JsonFilter>),
#[serde(rename = "eq")]
Eq { path: String, value: JsValue },
#[serde(rename = "neq")]
Neq { path: String, value: JsValue },
#[serde(rename = "list::len_eq")]
ListLenEq { path: String, value: usize },
#[serde(rename = "list::len_gt")]
ListLenGt { path: String, value: usize },
#[serde(rename = "list::len_gte")]
ListLenGte { path: String, value: usize },
#[serde(rename = "list::len_lt")]
ListLenLt { path: String, value: usize },
#[serde(rename = "list::len_lte")]
ListLenLte { path: String, value: usize },
#[serde(rename = "num::eq")]
NumEq { path: String, value: usize },
#[serde(rename = "num::lt")]
NumLt { path: String, value: usize },
#[serde(rename = "num::lte")]
NumLte { path: String, value: usize },
#[serde(rename = "num::gt")]
NumGt { path: String, value: usize },
#[serde(rename = "num::gte")]
NumGte { path: String, value: usize },
#[serde(rename = "str::eq")]
StrEq { path: String, value: String },
}
impl JsonFilter {
pub fn into_filter(self) -> Filter<JsValue> {
match self {
Self::Eq { path, value } => Filter::fact(j::eq(parse_path(&path), value)),
Self::Neq { path, value } => Filter::fact(j::neq(parse_path(&path), value)),
Self::And(filters) => {
Filter::And(filters.into_iter().map(|jf| jf.into_filter()).collect())
}
Self::Or(filters) => {
Filter::Or(filters.into_iter().map(|jf| jf.into_filter()).collect())
}
Self::StrEq { path, value } => Filter::fact(j::str::eq(parse_path(&path), value)),
Self::NumEq { path, value } => Filter::fact(j::num::eq(parse_path(&path), value)),
Self::NumLt { path, value } => Filter::fact(j::num::lt(parse_path(&path), value)),
Self::NumLte { path, value } => Filter::fact(j::num::lte(parse_path(&path), value)),
Self::NumGt { path, value } => Filter::fact(j::num::gt(parse_path(&path), value)),
Self::NumGte { path, value } => Filter::fact(j::num::gte(parse_path(&path), value)),
Self::ListLenEq { path, value } => {
Filter::fact(j::list::len_eq(parse_path(&path), value))
}
Self::ListLenLt { path, value } => {
Filter::fact(j::list::len_lt(parse_path(&path), value))
}
Self::ListLenLte { path, value } => {
Filter::fact(j::list::len_lte(parse_path(&path), value))
}
Self::ListLenGt { path, value } => {
Filter::fact(j::list::len_gt(parse_path(&path), value))
}
Self::ListLenGte { path, value } => {
Filter::fact(j::list::len_gte(parse_path(&path), value))
}
}
}
}
fn parse_path(s: &str) -> Vec<String> {
s.split("/").map(|s| s.to_owned()).collect()
}
| 36.443182 | 96 | 0.53851 |
fe95e66c85b6232b63d04ecded63f3f7d26b6a7f | 728 | pub trait Resumable {
fn resumer(&self) -> String;
}
pub struct ArticleDePresse {
pub titre: String,
pub lieu: String,
pub auteur: String,
pub contenu: String,
}
impl Resumable for ArticleDePresse {
fn resumer(&self) -> String {
format!("{}, par {} ({})", self.titre, self.auteur, self.lieu)
}
}
pub struct Tweet {
pub nom_utilisateur: String,
pub contenu: String,
pub reponse: bool,
pub retweet: bool,
}
impl Resumable for Tweet {
fn resumer(&self) -> String {
format!("{} : {}", self.nom_utilisateur, self.contenu)
}
}
// ANCHOR: here
pub fn notifier(element: &impl Resumable) {
println!("Flash info ! {}", element.resumer());
}
// ANCHOR_END: here
| 20.222222 | 70 | 0.616758 |
75eb9c9906230eb28cf341f450d378b6674f3220 | 1,394 | // Author: [email protected]
#![deny(warnings)]
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use backend::google_user::get_google_user_info;
use backend::channels::get_all_channels;
// router for v1 interface
// - api/v1/user
// - api/v1/channel
async fn router_v1(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/api/v1/user") => {
let id = get_google_user_info();
Ok(Response::new(Body::from(format!("id={}", id))))
}
(&Method::GET, "/api/v1/channel") => {
let id = get_all_channels();
Ok(Response::new(Body::from(format!("get all channels, id={}", id))))
}
// Return the 404 Not Found for other routes.
_ => {
let mut not_found = Response::default();
*not_found.status_mut() = StatusCode::NOT_FOUND;
Ok(not_found)
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let adder = ([127, 0, 0, 1], 3000).into();
let service = make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(router_v1)) });
let server = Server::bind(&adder).serve(service);
println!("Listening on http://{}", adder);
server.await?;
Ok(())
}
| 29.041667 | 94 | 0.582496 |
c171c4656f8f2933de27369002c283df7eb40ef2 | 9,901 | #[doc = "Register `LSR` reader"]
pub struct R(crate::R<LSR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<LSR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<LSR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<LSR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "RX Data Error in FIFO\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FIFOERR_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<FIFOERR_A> for bool {
#[inline(always)]
fn from(variant: FIFOERR_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `fifoerr` reader - RX Data Error in FIFO"]
pub struct FIFOERR_R(crate::FieldReader<bool, FIFOERR_A>);
impl FIFOERR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
FIFOERR_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<FIFOERR_A> {
match self.bits {
true => Some(FIFOERR_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
**self == FIFOERR_A::ERROR
}
}
impl core::ops::Deref for FIFOERR_R {
type Target = crate::FieldReader<bool, FIFOERR_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Transmitter Empty\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TEMT_A {
#[doc = "1: `1`"]
EMPTY = 1,
}
impl From<TEMT_A> for bool {
#[inline(always)]
fn from(variant: TEMT_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `temt` reader - Transmitter Empty"]
pub struct TEMT_R(crate::FieldReader<bool, TEMT_A>);
impl TEMT_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
TEMT_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<TEMT_A> {
match self.bits {
true => Some(TEMT_A::EMPTY),
_ => None,
}
}
#[doc = "Checks if the value of the field is `EMPTY`"]
#[inline(always)]
pub fn is_empty(&self) -> bool {
**self == TEMT_A::EMPTY
}
}
impl core::ops::Deref for TEMT_R {
type Target = crate::FieldReader<bool, TEMT_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "TX Holding Register Empty\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum THRE_A {
#[doc = "1: `1`"]
EMPTY = 1,
}
impl From<THRE_A> for bool {
#[inline(always)]
fn from(variant: THRE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `thre` reader - TX Holding Register Empty"]
pub struct THRE_R(crate::FieldReader<bool, THRE_A>);
impl THRE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
THRE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<THRE_A> {
match self.bits {
true => Some(THRE_A::EMPTY),
_ => None,
}
}
#[doc = "Checks if the value of the field is `EMPTY`"]
#[inline(always)]
pub fn is_empty(&self) -> bool {
**self == THRE_A::EMPTY
}
}
impl core::ops::Deref for THRE_R {
type Target = crate::FieldReader<bool, THRE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `bi` reader - Break Interrupt"]
pub struct BI_R(crate::FieldReader<bool, bool>);
impl BI_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
BI_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BI_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Framing Error\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FE_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<FE_A> for bool {
#[inline(always)]
fn from(variant: FE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `fe` reader - Framing Error"]
pub struct FE_R(crate::FieldReader<bool, FE_A>);
impl FE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
FE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<FE_A> {
match self.bits {
true => Some(FE_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
**self == FE_A::ERROR
}
}
impl core::ops::Deref for FE_R {
type Target = crate::FieldReader<bool, FE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Parity Error\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PE_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<PE_A> for bool {
#[inline(always)]
fn from(variant: PE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `pe` reader - Parity Error"]
pub struct PE_R(crate::FieldReader<bool, PE_A>);
impl PE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
PE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<PE_A> {
match self.bits {
true => Some(PE_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
**self == PE_A::ERROR
}
}
impl core::ops::Deref for PE_R {
type Target = crate::FieldReader<bool, PE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Overrun Error\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum OE_A {
#[doc = "1: `1`"]
ERROR = 1,
}
impl From<OE_A> for bool {
#[inline(always)]
fn from(variant: OE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `oe` reader - Overrun Error"]
pub struct OE_R(crate::FieldReader<bool, OE_A>);
impl OE_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
OE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<OE_A> {
match self.bits {
true => Some(OE_A::ERROR),
_ => None,
}
}
#[doc = "Checks if the value of the field is `ERROR`"]
#[inline(always)]
pub fn is_error(&self) -> bool {
**self == OE_A::ERROR
}
}
impl core::ops::Deref for OE_R {
type Target = crate::FieldReader<bool, OE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Data Ready\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DR_A {
#[doc = "1: `1`"]
READY = 1,
}
impl From<DR_A> for bool {
#[inline(always)]
fn from(variant: DR_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `dr` reader - Data Ready"]
pub struct DR_R(crate::FieldReader<bool, DR_A>);
impl DR_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
DR_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<DR_A> {
match self.bits {
true => Some(DR_A::READY),
_ => None,
}
}
#[doc = "Checks if the value of the field is `READY`"]
#[inline(always)]
pub fn is_ready(&self) -> bool {
**self == DR_A::READY
}
}
impl core::ops::Deref for DR_R {
type Target = crate::FieldReader<bool, DR_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bit 7 - RX Data Error in FIFO"]
#[inline(always)]
pub fn fifoerr(&self) -> FIFOERR_R {
FIFOERR_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 6 - Transmitter Empty"]
#[inline(always)]
pub fn temt(&self) -> TEMT_R {
TEMT_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5 - TX Holding Register Empty"]
#[inline(always)]
pub fn thre(&self) -> THRE_R {
THRE_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4 - Break Interrupt"]
#[inline(always)]
pub fn bi(&self) -> BI_R {
BI_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3 - Framing Error"]
#[inline(always)]
pub fn fe(&self) -> FE_R {
FE_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2 - Parity Error"]
#[inline(always)]
pub fn pe(&self) -> PE_R {
PE_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1 - Overrun Error"]
#[inline(always)]
pub fn oe(&self) -> OE_R {
OE_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0 - Data Ready"]
#[inline(always)]
pub fn dr(&self) -> DR_R {
DR_R::new((self.bits & 0x01) != 0)
}
}
#[doc = "UART Line Status Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [lsr](index.html) module"]
pub struct LSR_SPEC;
impl crate::RegisterSpec for LSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [lsr::R](R) reader structure"]
impl crate::Readable for LSR_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets LSR to value 0"]
impl crate::Resettable for LSR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 26.831978 | 231 | 0.555095 |
39cec0502c538c725087ee216e130a9ba7cff135 | 30,214 | //! # Algorithm
//!
//! For an atom and type on input (when type is not set `Undefined` is used):
//! * [Atom::Variable] is returned as is.
//! * [Atom::Symbol] and [Atom::Grounded] are type checked:
//! * If type is corrent then atom is returned as is.
//! * If type is incorrect then error result is returned.
//! * First atom (operation) of [Atom::Expression] is extracted and plan to
//! calculate its type is returned. After type is calculated the expression
//! is interpreted according its operation type. Notice: that few alternative
//! interpretations may be found here one for each type of the operation.
//!
//! For and expression atom and its operation type:
//! * If expected type is `Atom` or `Expression` then expression is returned as is.
//! * If operation type is function:
//! * Check arity and return type of the function, if check fails then return
//! error result.
//! * Return a sequence plan which interprets each argument using
//! corresponding type and calls resulting expression. If any argument
//! cannot be casted to type then error result is returned. If argument's
//! bindings are not compatible with bindings of the expression such
//! result is skipped if no options to interpret argument left then error
//! is returned.
//! * Notice that this step may return more than one result because each
//! argument can be interpreted by more than one way.
//! * If operation type is not function:
//! * Return a sequence plan which interprets each member using
//! `Undefined` type and calls resulting expression. If member's
//! bindings are not compatible with bindings of the expression such
//! result is skipped if no options to interpret member left then error
//! is returned.
//!
//! Call the expression:
//! * If there is a cached result for this expression then return it
//! * If operation is instance of [Atom::Grounded] then operation is executed:
//! * If result is error then error is returned
//! * If result is empty then it is returned as is
//! * If result is not empty plan to interpret each alternative further is
//! returned. Notice: if each alternative returns error then the result
//! of execution is also error.
//! * If operation is not [Atom::Grounded] then expression is matched.
//! Atomspace is queried for `(= <expr> $X)` and expression is replaced by $X.
//! * If no results returned then error is returned
//! * If result is not empty plan to interpret each alternative further is
//! returned. Notice: if each alternative returns error then the result
//! of execution is also error.
//! * If one of previous steps returned error then original expresion is
//! returned. Otherwise the result of the interpretation is returned.
//! It may be empty if one of expression is grounded expression which
//! returns empty result.
//!
//! Summary on possible results:
//! * empty result for expression is returned only when grounded operation
//! returns empty result
//! * error is returned when atom cannot be casted to the type expected
//! or all alternative interpretations are errors; the overall result includes
//! successfuly interpreted alternatives only
//! * call of the expression returns either succesful result or original expression
use crate::*;
use crate::common::plan::*;
use crate::atom::subexpr::*;
use crate::atom::matcher::*;
use crate::space::grounding::*;
use crate::common::collections::ListMap;
use crate::metta::types::{AtomType, is_func, get_arg_types, check_type, get_reducted_types};
use std::ops::Deref;
use std::rc::Rc;
use std::cell::RefCell;
use std::fmt::{Debug, Display, Formatter};
#[inline]
fn equal_symbol() -> Atom { sym!("=") }
/// Result of atom interpretation plus variable bindings found
#[derive(Clone, PartialEq)]
pub struct InterpretedAtom(Atom, Bindings);
impl InterpretedAtom {
fn atom(&self) -> &Atom {
&self.0
}
fn bindings(&self) -> &Bindings {
&self.1
}
/// Convert the instance into tuple of [Atom] and [Bindings]
pub fn into_tuple(self) -> (Atom, Bindings) {
(self.0, self.1)
}
}
impl Display for InterpretedAtom {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
if self.1.is_empty() {
write!(f, "{}", self.0)
} else {
// TODO: it is possible to cleanup all bindings for nested
// expressions which were introduced by matching when all
// sub-expressions are interpreted. This will simplify
// textual representation. For example in test_air_humidity_regulator
// (make air wet) leads to (start kettle), {$y: kettle}) result
// but $y is not present in the expression after interpreting
// (make air wet) and can be removed.
write!(f, "{}|{}", self.0, self.1)
}
}
}
impl Debug for InterpretedAtom {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
Display::fmt(self, f)
}
}
type Results = Vec<InterpretedAtom>;
type NoInputPlan = Box<dyn Plan<(), Results>>;
/// Initialize interpreter and returns the result of the zero step.
/// It can be error, immediate result or interpretation plan to be executed.
/// See [crate::metta::interpreter] for algorithm explanation.
///
/// # Arguments
/// * `space` - atomspace to query for interpretation
/// * `expr` - atom to interpret
pub fn interpret_init(space: GroundingSpace, expr: &Atom) -> StepResult<Vec<InterpretedAtom>> {
let context = InterpreterContextRef::new(space);
interpret_as_type_plan(context,
InterpretedAtom(expr.clone(), Bindings::new()),
AtomType::Undefined)
}
/// Perform next step of the interpretation plan and return the result. Panics
/// when [StepResult::Return] or [StepResult::Error] are passed as input.
/// See [crate::metta::interpreter] for algorithm explanation.
///
/// # Arguments
/// * `step` - [StepResult::Execute] result from the previous step.
pub fn interpret_step(step: StepResult<Vec<InterpretedAtom>>) -> StepResult<Vec<InterpretedAtom>> {
log::debug!("current plan:\n{:?}", step);
match step {
StepResult::Execute(plan) => plan.step(()),
StepResult::Return(_) => panic!("Plan execution is finished already"),
StepResult::Error(_) => panic!("Plan execution is finished with error"),
}
}
/// Interpret passed atom and return a new plan, result or error. This function
/// blocks until result is calculated. For step by step interpretation one
/// should use [interpret_init] and [interpret_step] functions.
/// # Arguments
/// * `space` - atomspace to query for interpretation
/// * `expr` - atom to interpret
pub fn interpret(space: GroundingSpace, expr: &Atom) -> Result<Vec<Atom>, String> {
let mut step = interpret_init(space, expr);
while step.has_next() {
step = interpret_step(step);
}
match step {
StepResult::Return(mut result) => Ok(result.drain(0..)
.map(|InterpretedAtom(atom, _)| atom).collect()),
StepResult::Error(message) => Err(message),
_ => panic!("Not expected step result: {:?}", step),
}
}
// TODO: ListMap is not effective but we cannot use HashMap here without
// requiring hash functions for the grounded atoms.
struct InterpreterCache(ListMap<Atom, Results>);
impl InterpreterCache {
fn new() -> Self {
Self(ListMap::new())
}
fn get(&self, key: &Atom, current_bindings: &Bindings) -> Option<Results> {
self.0.get(key).map(|results| -> Option<Results> {
let mut inconsistent = Vec::new();
let mut result = Vec::new();
for res in results {
let merged = Bindings::merge(res.bindings(), ¤t_bindings);
if let Some(merged) = merged {
result.push(InterpretedAtom(res.atom().clone(), merged));
} else {
inconsistent.push(res);
}
}
if inconsistent.is_empty() {
Some(result)
} else {
log::debug!("get_cached: return None as some results has inconsistent bindings");
log::debug!("get_cached: current bindings: {}, inconsistent results: {:?}", current_bindings, inconsistent);
None
}
}).flatten()
}
fn insert(&mut self, key: Atom, value: Results) {
self.0.insert(key, value)
}
fn reset(&mut self) {
self.0.clear();
}
}
impl SpaceObserver for InterpreterCache {
fn notify(&mut self, _event: &SpaceEvent) {
// TODO: implement more specific cache cleanup for each event
self.reset();
}
}
struct InterpreterContext {
space: GroundingSpace,
cache: Rc<RefCell<InterpreterCache>>,
}
#[derive(Clone)]
struct InterpreterContextRef(Rc<InterpreterContext>);
impl InterpreterContextRef {
fn new(mut space: GroundingSpace) -> Self {
let cache = Rc::new(RefCell::new(InterpreterCache::new()));
space.register_observer(Rc::clone(&cache));
Self(Rc::new(InterpreterContext{ space, cache }))
}
}
impl Deref for InterpreterContextRef {
type Target = InterpreterContext;
fn deref(&self) -> &Self::Target {
&self.0
}
}
fn is_grounded(expr: &ExpressionAtom) -> bool {
matches!(expr.children().get(0), Some(Atom::Grounded(_)))
}
fn has_grounded_sub_expr(expr: &Atom) -> bool {
return SubexprStream::from_expr(expr.clone(), TOP_DOWN_DEPTH_WALK)
.any(|sub| if let Atom::Expression(sub) = sub {
is_grounded(&sub)
} else {
panic!("Expression is expected");
});
}
fn interpret_as_type_plan(context: InterpreterContextRef,
input: InterpretedAtom, typ: AtomType) -> StepResult<Results> {
log::debug!("interpret_as_type_plan: input: {}, type: {}", input, typ);
match input.atom() {
Atom::Symbol(_) | Atom::Grounded(_) =>
cast_atom_to_type_plan(context, input, typ),
Atom::Expression(ref expr) if expr.children().is_empty() =>
cast_atom_to_type_plan(context, input, typ),
Atom::Expression(ref expr) => {
let op = &expr.children()[0];
StepResult::execute(SequencePlan::new(
get_type_of_atom_plan(context.clone(), op.clone()),
interpret_expression_as_type_plan(context, input, typ)
))
},
Atom::Variable(_) => {
StepResult::ret(vec![input])
},
}
}
fn cast_atom_to_type_plan(context: InterpreterContextRef,
input: InterpretedAtom, typ: AtomType) -> StepResult<Results> {
// TODO: implement this via interpreting of the (:cast atom typ) expression
if check_type(&context.space, input.atom(), &typ) {
log::debug!("cast_atom_to_type_plan: input: {} is casted to type: {}", input, typ);
StepResult::ret(vec![input])
} else {
log::debug!("cast_atom_to_type_plan: input: {} cannot be casted to type: {}", input, typ);
StepResult::err(format!("Incorrect type, input: {}, type: {}", input, typ))
}
}
fn get_type_of_atom_plan(context: InterpreterContextRef, atom: Atom) -> StepResult<Vec<Atom>> {
// TODO: implement this via interpreting of the (:? atom)
StepResult::ret(get_reducted_types(&context.space, &atom))
}
fn interpret_expression_as_type_plan(context: InterpreterContextRef,
input: InterpretedAtom, typ: AtomType) -> OperatorPlan<Vec<Atom>, Results> {
let descr = format!("form alternative plans for expression {} using types", input);
OperatorPlan::new(move |op_types: Vec<Atom>| {
make_alternives_plan(input.clone(), op_types, move |op_typ| {
interpret_expression_as_type_op(context.clone(),
input.clone(), op_typ, typ.clone())
})
}, descr)
}
fn get_expr(atom: &Atom) -> &ExpressionAtom {
match atom {
Atom::Expression(expr) => expr,
_ => panic!("Atom::Expression is expected, recieved: {}", atom),
}
}
fn get_expr_mut(atom: &mut Atom) -> &mut ExpressionAtom {
match atom {
Atom::Expression(expr) => expr,
_ => panic!("Atom::Expression is expected, recieved: {}", atom),
}
}
fn interpret_expression_as_type_op(context: InterpreterContextRef,
input: InterpretedAtom, op_typ: Atom, ret_typ: AtomType) -> NoInputPlan {
log::debug!("interpret_expression_as_type_op: input: {}, operation type: {}, expected return type: {}", input, op_typ, ret_typ);
let expr = get_expr(input.atom());
if ret_typ == AtomType::Specific(Atom::sym("Atom")) ||
ret_typ == AtomType::Specific(Atom::sym("Expression")) {
Box::new(StepResult::ret(vec![input]))
} else if is_func(&op_typ) {
let (op_arg_types, op_ret_typ) = get_arg_types(&op_typ);
// TODO: supertypes should be checked as well
if !ret_typ.map_or(|typ| *op_ret_typ == *typ, true) {
Box::new(StepResult::err(format!("Operation returns wrong type: {}, expected: {}", op_ret_typ, ret_typ)))
} else if op_arg_types.len() != (expr.children().len() - 1) {
Box::new(StepResult::err(format!("Operation arity is not equal to call arity: operation type, {}, call: {}", op_typ, expr)))
} else {
assert!(!expr.children().is_empty(), "Empty expression is not expected");
let mut plan: NoInputPlan = Box::new(StepResult::ret(vec![input.clone()]));
for expr_idx in 1..(expr.children().len()) {
let arg = expr.children()[expr_idx].clone();
let arg_typ = AtomType::Specific(op_arg_types[expr_idx - 1].clone());
plan = Box::new(SequencePlan::new(
ParallelPlan::new(
plan,
interpret_as_type_plan(context.clone(),
InterpretedAtom(arg, input.bindings().clone()),
arg_typ)),
insert_reducted_arg_plan(expr_idx)
))
}
call_alternatives_plan(plan, context, input)
}
} else {
let mut plan: NoInputPlan = Box::new(StepResult::ret(vec![input.clone()]));
for expr_idx in 0..(expr.children().len()) {
let arg = expr.children()[expr_idx].clone();
plan = Box::new(SequencePlan::new(
ParallelPlan::new(
plan,
interpret_as_type_plan(context.clone(),
InterpretedAtom(arg, input.bindings().clone()),
AtomType::Undefined)),
insert_reducted_arg_plan(expr_idx)
))
}
call_alternatives_plan(plan, context, input)
}
}
fn call_alternatives_plan(plan: NoInputPlan, context: InterpreterContextRef,
input: InterpretedAtom) -> NoInputPlan {
Box::new(SequencePlan::new(plan, OperatorPlan::new(move |results: Results| {
make_alternives_plan(input, results, move |result| {
call_plan(context.clone(), result)
})
}, "interpret each alternative")))
}
fn insert_reducted_arg_plan(atom_idx: usize) -> OperatorPlan<(Results, Results), Results> {
let descr = format!("insert right element as child {} of left element", atom_idx);
OperatorPlan::new(move |prev_result| insert_reducted_arg_op(atom_idx, prev_result), descr)
}
fn insert_reducted_arg_op(atom_idx: usize, (mut atoms, args): (Results, Results)) -> StepResult<Results> {
let result = atoms.drain(0..).flat_map(|interpreted_atom| {
args.iter().map(move |arg| {
let mut atom = interpreted_atom.atom().clone();
get_expr_mut(&mut atom).children_mut()[atom_idx] = arg.atom().clone();
let applied_bindings = apply_bindings_to_bindings(arg.bindings(),
interpreted_atom.bindings());
if let Result::Ok(atom_bindings) = applied_bindings {
Bindings::merge(&atom_bindings, arg.bindings())
.map(|bindings| InterpretedAtom(apply_bindings_to_atom(&atom, &bindings), bindings))
} else {
log::debug!("insert_reducted_arg_op: skip bindings: {} which cannot be applied to atom bindings: {}, reason: {:?}",
arg.bindings(), interpreted_atom.bindings(), applied_bindings);
None
}
})
}).filter(Option::is_some).map(Option::unwrap).collect();
log::debug!("insert_reducted_arg_op: result: {:?}", result);
StepResult::ret(result)
}
fn call_plan(context: InterpreterContextRef, input: InterpretedAtom) -> NoInputPlan {
let descr = format!("call {}", input);
Box::new(OperatorPlan::new(|_| call_op(context, input), descr))
}
fn call_op(context: InterpreterContextRef, input: InterpretedAtom) -> StepResult<Results> {
log::debug!("call_op: {}", input);
let cached = context.cache.borrow().get(input.atom(), input.bindings());
if let Some(result) = cached {
return_cached_result_plan(result)
} else {
if let Atom::Expression(_) = input.atom() {
if !has_grounded_sub_expr(input.atom()) {
let key = input.atom().clone();
StepResult::execute(SequencePlan::new(
OrPlan::new(
interpret_reducted_plan(context.clone(), input.clone()),
StepResult::ret(vec![input])),
save_result_in_cache_plan(context, key)
))
} else {
StepResult::execute(OrPlan::new(
interpret_reducted_plan(context.clone(), input.clone()),
StepResult::ret(vec![input])))
}
} else {
panic!("Only expressions are expected to be called");
}
}
}
fn return_cached_result_plan(results: Results) -> StepResult<Results> {
let descr = format!("return cached results {:?}", results);
StepResult::execute(OperatorPlan::new(|_| StepResult::ret(results), descr))
}
fn save_result_in_cache_plan(context: InterpreterContextRef, key: Atom) -> OperatorPlan<Results, Results> {
let descr = format!("save results in cache for key {}", key);
OperatorPlan::new(move |results: Results| {
context.cache.borrow_mut().insert(key, results.clone());
StepResult::ret(results)
}, descr)
}
fn interpret_reducted_plan(context: InterpreterContextRef,
input: InterpretedAtom) -> NoInputPlan {
if let Atom::Expression(ref expr) = input.atom() {
if is_grounded(expr) {
// TODO: there is no sense in passing variables as an arguments to
// the grounded atoms, so when grounded atom has a variable as an
// argument it probably should be matched instead.
Box::new(execute_plan(context, input))
} else {
Box::new(match_plan(context, input))
}
} else {
panic!("Only expression is expected, received: {}", input);
}
}
fn execute_plan(context: InterpreterContextRef, input: InterpretedAtom) -> OperatorPlan<(), Results> {
let descr = format!("execute {}", input);
OperatorPlan::new(|_| execute_op(context, input), descr)
}
fn execute_op(context: InterpreterContextRef, input: InterpretedAtom) -> StepResult<Results> {
log::debug!("execute_op: {}", input);
match input {
InterpretedAtom(Atom::Expression(ref expr), ref bindings) => {
let mut expr = expr.clone();
let op = expr.children().get(0).cloned();
if let Some(Atom::Grounded(op)) = op {
let mut args = expr.children_mut().drain(1..).collect();
match op.execute(&mut args) {
Ok(mut vec) => {
let results: Vec<InterpretedAtom> = vec.drain(0..)
.map(|atom| InterpretedAtom(atom, bindings.clone()))
.collect();
if results.is_empty() {
StepResult::ret(results)
} else {
make_alternives_plan(input, results, move |result| {
interpret_as_type_plan(context.clone(),
result, AtomType::Undefined)
})
}
},
Err(msg) => StepResult::err(msg),
}
} else {
panic!("Trying to execute non grounded atom: {}", expr)
}
},
_ => panic!("Unexpected non expression argument: {}", input),
}
}
fn match_plan(context: InterpreterContextRef, input: InterpretedAtom) -> OperatorPlan<(), Results> {
let descr = format!("match {}", input);
OperatorPlan::new(|_| match_op(context, input), descr)
}
fn match_op(context: InterpreterContextRef, input: InterpretedAtom) -> StepResult<Results> {
log::debug!("match_op: {}", input);
let var_x = VariableAtom::new("%X%");
// TODO: unique variable?
let atom_x = Atom::Variable(var_x.clone());
let query = Atom::expr(vec![equal_symbol(), input.atom().clone(), atom_x]);
let mut local_bindings = context.space.query(&query);
let results: Vec<InterpretedAtom> = local_bindings
.drain(0..)
.map(|mut binding| {
let result = binding.remove(&var_x).unwrap();
let result = apply_bindings_to_atom(&result, &binding);
// TODO: sometimes we apply bindings twice: first time here,
// second time when inserting matched argument into nesting
// expression. It should be enough doing it only once.
let bindings = apply_bindings_to_bindings(&binding, input.bindings());
let bindings = bindings.map(|mut bindings| {
binding.drain().for_each(|(k, v)| { bindings.insert(k, v); });
bindings
});
log::debug!("match_op: query: {}, binding: {:?}, result: {}", input, bindings, result);
(result, bindings)
})
.filter(|(_, bindings)| bindings.is_ok())
.map(|(result, bindings)| InterpretedAtom(result, bindings.unwrap()))
.collect();
make_alternives_plan(input, results, move |result| {
interpret_as_type_plan(context.clone(), result, AtomType::Undefined)
})
}
fn make_alternives_plan<T, F, P>(input: InterpretedAtom, mut results: Vec<T>,
plan: F) -> StepResult<Results>
where
F: Fn(T) -> P,
P: 'static + Plan<(), Results>
{
match results.len() {
0 => StepResult::err("No alternatives to interpret further"),
1 => StepResult::execute(plan(results.pop().unwrap())),
_ => {
StepResult::execute(AlternativeInterpretationsPlan::new(
input.0,
results.drain(0..)
.map(|result| -> NoInputPlan { Box::new(plan(result)) })
.collect()))
},
}
}
use std::collections::VecDeque;
/// Plan which interprets in parallel alternatives of the expression.
/// Each successful result is appended to the overall result of the plan.
/// If no alternatives returned successful result the plan returns error.
pub struct AlternativeInterpretationsPlan<T> {
atom: Atom,
plans: VecDeque<Box<dyn Plan<(), Vec<T>>>>,
results: Vec<T>,
success: bool,
}
impl<T> AlternativeInterpretationsPlan<T> {
/// Create new instance of [AlternativeInterpretationsPlan].
///
/// # Arguments
/// `atom` - atom to be printed as root of the alternative interpretations
/// `plan` - altenative plans for the atom
pub fn new(atom: Atom, plans: Vec<Box<dyn Plan<(), Vec<T>>>>) -> Self {
Self{ atom, plans: plans.into(), results: Vec::new(), success: false }
}
}
impl<T: 'static + Debug> Plan<(), Vec<T>> for AlternativeInterpretationsPlan<T> {
fn step(mut self: Box<Self>, _: ()) -> StepResult<Vec<T>> {
if self.plans.len() == 0 {
if self.success {
StepResult::ret(self.results)
} else {
StepResult::err("No successful alternatives")
}
} else {
let plan = self.plans.pop_front().unwrap();
match plan.step(()) {
StepResult::Execute(next) => {
self.plans.push_front(next);
StepResult::Execute(self)
},
StepResult::Return(mut result) => {
self.results.append(&mut result);
self.success = true;
StepResult::Execute(self)
},
StepResult::Error(message) => {
log::debug!("skip alternative because of error returned: {}", message);
StepResult::Execute(self)
},
}
}
}
}
impl<T: Debug> Debug for AlternativeInterpretationsPlan<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut res = write!(f, "interpret alternatives for {} (current results: {:?}):\n", self.atom, self.results);
for (i, plan) in self.plans.iter().enumerate() {
let plan_str = format!("{:?}", plan);
let mut lines = plan_str.lines();
res = res.and_then(|_| write!(f, " {} {}\n",
if i == 0 { ">" } else { "-" }, lines.next().unwrap()));
for line in lines {
res = res.and_then(|_| write!(f, " {}\n", line));
}
}
res
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_match_all() {
let mut space = GroundingSpace::new();
space.add(expr!("=", ("color"), "blue"));
space.add(expr!("=", ("color"), "red"));
space.add(expr!("=", ("color"), "green"));
let expr = expr!(("color"));
assert_eq!(interpret(space, &expr),
Ok(vec![expr!("blue"), expr!("red"), expr!("green")]));
}
#[test]
fn test_frog_reasoning() {
let mut space = GroundingSpace::new();
space.add(expr!("=", ("and", "True", "True"), "True"));
space.add(expr!("=", ("if", "True", then, else), then));
space.add(expr!("=", ("if", "False", then, else), else));
space.add(expr!("=", ("Fritz", "croaks"), "True"));
space.add(expr!("=", ("Fritz", "eats-flies"), "True"));
space.add(expr!("=", ("Tweety", "chirps"), "True"));
space.add(expr!("=", ("Tweety", "yellow"), "True"));
space.add(expr!("=", ("Tweety", "eats-flies"), "True"));
let expr = expr!("if", ("and", (x, "croaks"), (x, "eats-flies")),
("=", (x, "frog"), "True"), "nop");
assert_eq!(interpret(space, &expr),
Ok(vec![expr!("=", ("Fritz", "frog"), "True")]));
}
#[test]
fn test_variable_keeps_value_in_different_sub_expressions() {
let mut space = GroundingSpace::new();
space.add(expr!("=", ("eq", x, x), "True"));
space.add(expr!("=", ("plus", "Z", y), y));
space.add(expr!("=", ("plus", ("S", k), y), ("S", ("plus", k, y))));
assert_eq!(interpret(space.clone(), &expr!("eq", ("plus", "Z", n), n)),
Ok(vec![expr!("True")]));
assert_eq!(interpret(space.clone(), &expr!("eq", ("plus", ("S", "Z"), n), n)),
Ok(vec![expr!("eq", ("S", y), y)]));
}
fn test_interpret<T, R, P: Plan<T, R>>(plan: P, arg: T) -> Result<R, String> {
let mut step = Box::new(plan).step(arg);
loop {
match step {
StepResult::Execute(plan) => step = plan.step(()),
StepResult::Return(result) => return Ok(result),
StepResult::Error(message) => return Err(message),
}
}
}
#[test]
fn test_make_alternatives_plan_no_alternative() {
let plan = make_alternives_plan(InterpretedAtom(sym!("Test"), Bindings::new()),
vec![], |_res: InterpretedAtom| StepResult::ret(vec![]));
let result = test_interpret(plan, ());
assert_eq!(Err("No alternatives to interpret further".into()), result);
}
#[test]
fn test_alternatives_plan_single_alternative() {
let plan = AlternativeInterpretationsPlan::new(sym!("Test"),
vec![Box::new(StepResult::ret(vec!["A", "B"]))]);
let result = test_interpret(plan, ());
assert_eq!(Ok(vec!["A", "B"]), result);
}
#[test]
fn test_alternatives_plan_few_alternatives() {
let plan = AlternativeInterpretationsPlan::new(sym!("Test"),
vec![Box::new(StepResult::ret(vec!["A", "B"])),
Box::new(StepResult::ret(vec!["C", "D"]))]);
let result = test_interpret(plan, ());
assert_eq!(Ok(vec!["A", "B", "C", "D"]), result);
}
#[test]
fn test_alternatives_plan_error_present() {
let plan = AlternativeInterpretationsPlan::new(sym!("Test"),
vec![Box::new(StepResult::err("Expected error")),
Box::new(StepResult::ret(vec!["C", "D"]))]);
let result = test_interpret(plan, ());
assert_eq!(Ok(vec!["C", "D"]), result);
}
#[test]
fn test_alternatives_plan_only_errors() {
let plan: AlternativeInterpretationsPlan<&'static str> =
AlternativeInterpretationsPlan::new(sym!("Test"),
vec![Box::new(StepResult::err("Expected error")),
Box::new(StepResult::err("Another expected error"))]);
let result = test_interpret(plan, ());
assert_eq!(Err("No successful alternatives".into()), result);
}
#[test]
fn test_variable_defined_via_variable() {
let mut space = GroundingSpace::new();
space.add(expr!("=", ("if", "True", y), y));
space.add(expr!("=", ("not", "False"), "True"));
space.add(expr!("=", ("a", z), ("not", ("b", z))));
space.add(expr!("=", ("b", "d"), "False"));
let expr = expr!("if", ("a", x), x);
assert_eq!(interpret(space, &expr), Ok(vec![expr!("d")]));
}
}
| 40.285333 | 136 | 0.589958 |
894dbad97fc0d0897dc889d91bf6ae690d0cd29c | 2,290 | // Copyright (c) the JPEG XL Project Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
use crate::bit_reader::BitReader;
use crate::error::Error;
use std::collections::HashSet;
use crate::entropy_coding::decode::*;
fn move_to_front(v: &mut [u8], index: u8) {
let value = v[index as usize];
for i in (1..=index as usize).rev() {
v[i] = v[i - 1];
}
v[0] = value;
}
fn inverse_move_to_front(v: &mut [u8]) {
use array_init::array_init;
let mut mtf: [u8; 256] = array_init(|x| x as u8);
for val in v.iter_mut() {
let index = *val;
*val = mtf[index as usize];
if index != 0 {
move_to_front(&mut mtf, index);
}
}
}
fn verify_context_map(ctx_map: &[u8]) -> Result<(), Error> {
let num_histograms = *ctx_map.iter().max().unwrap() as u32 + 1;
let distinct_histograms = ctx_map.iter().collect::<HashSet<_>>().len() as u32;
if distinct_histograms != num_histograms {
return Err(Error::InvalidContextMapHole(
num_histograms,
distinct_histograms,
));
}
Ok(())
}
pub fn decode_context_map(num_contexts: usize, br: &mut BitReader) -> Result<Vec<u8>, Error> {
use std::iter::FromIterator;
let is_simple = br.read(1)? != 0;
if is_simple {
let bits_per_entry = br.read(2)? as usize;
if bits_per_entry != 0 {
Result::from_iter((0..num_contexts).map(|_| Ok(br.read(bits_per_entry)? as u8)))
} else {
Ok(vec![0u8; num_contexts])
}
} else {
let use_mtf = br.read(1)? != 0;
let histograms = Histograms::decode(1, br, /*allow_lz77=*/ num_contexts > 2)?;
let reader = histograms.make_reader(br)?;
let mut ctx_map: Vec<u8> = Result::from_iter((0..num_contexts).map(|_| {
let mv = reader.read(br, 0usize)?;
if mv > u8::MAX as u32 {
Err(Error::InvalidContextMap(mv))
} else {
Ok(mv as u8)
}
}))?;
reader.check_final_state()?;
if use_mtf {
inverse_move_to_front(&mut ctx_map[..]);
}
verify_context_map(&ctx_map[..])?;
Ok(ctx_map)
}
}
| 30.533333 | 94 | 0.565939 |
083e401be10af496a630d75cf7be7c703711daaa | 10,448 | use std::borrow::Cow;
use std::fs::OpenOptions;
use std::io::Read;
use std::path::Path;
use std::collections::{HashMap, BTreeMap};
use minidom::{Element, Error};
use serde::{Serialize, Deserialize};
use crate::utils::prelude::*;
use failure::Error as FailError;
mod component;
mod condition;
mod device;
pub use component::{ComponentBuilders, FileRef};
pub use condition::{Condition, Conditions};
pub use device::{Device, Devices, Memories, Algorithm, Processors, Core};
pub struct Release {
pub version: String,
pub text: String,
}
impl FromElem for Release {
fn from_elem(e: &Element) -> Result<Self, Error> {
assert_root_name(e, "release")?;
Ok(Self {
version: attr_map(e, "version", "release")?,
text: e.text(),
})
}
}
#[derive(Default)]
pub struct Releases(Vec<Release>);
impl Releases {
pub fn latest_release(&self) -> &Release {
&self.0[0]
}
}
impl FromElem for Releases {
fn from_elem(e: &Element) -> Result<Self, Error> {
assert_root_name(e, "releases")?;
let to_ret: Vec<_> = e.children()
.flat_map(|c| Release::from_elem(c).ok_warn())
.collect();
if to_ret.is_empty() {
Err(err_msg!("There must be at least one release!"))
} else {
Ok(Releases(to_ret))
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DumpDevice<'a> {
name: &'a str,
memories: Cow<'a, Memories>,
algorithms: Cow<'a, Vec<Algorithm>>,
processor: Cow<'a, Processors>,
from_pack: FromPack<'a>,
vendor: Option<&'a str>,
family: &'a str,
sub_family: Option<&'a str>
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct FromPack<'a> {
vendor: &'a str,
pack: &'a str,
version: &'a str,
url: &'a str,
}
impl<'a> FromPack<'a> {
fn new(vendor: &'a str, pack: &'a str, version: &'a str, url: &'a str) -> Self {
Self {
vendor,
pack,
version,
url,
}
}
}
impl<'a> DumpDevice<'a> {
fn from_device(dev: &'a Device, from_pack: FromPack<'a>) -> Self {
Self {
name: &dev.name,
memories: Cow::Borrowed(&dev.memories),
algorithms: Cow::Borrowed(&dev.algorithms),
processor: Cow::Borrowed(&dev.processor),
from_pack,
vendor: dev.vendor.as_ref().map(String::as_str),
family: &dev.family,
sub_family: dev.sub_family.as_ref().map(String::as_str),
}
}
}
pub struct Package {
pub name: String,
pub description: String,
pub vendor: String,
pub url: String,
pub license: Option<String>,
components: ComponentBuilders,
pub releases: Releases,
pub conditions: Conditions,
pub devices: Devices,
pub boards: Vec<Board>,
}
impl FromElem for Package {
fn from_elem(e: &Element) -> Result<Self, Error> {
assert_root_name(e, "package")?;
let name: String = child_text(e, "name", "package")?;
let description: String = child_text(e, "description", "package")?;
let vendor: String = child_text(e, "vendor", "package")?;
let url: String = child_text(e, "url", "package")?;
log::info!(
"Working on {}::{}",
vendor,
name,
);
let components = get_child_no_ns(e, "components")
.and_then(|c| ComponentBuilders::from_elem(c).ok_warn())
.unwrap_or_default();
let releases = get_child_no_ns(e, "releases")
.and_then(|c| Releases::from_elem(c).ok_warn())
.unwrap_or_default();
let conditions = get_child_no_ns(e, "conditions")
.and_then(|c| Conditions::from_elem(c).ok_warn())
.unwrap_or_default();
let devices = get_child_no_ns(e, "devices")
.and_then(|c| Devices::from_elem(c).ok_warn())
.unwrap_or_default();
let boards = get_child_no_ns(e, "boards")
.map(|c| Board::vec_from_children(c.children()))
.unwrap_or_default();
Ok(Self {
name,
description,
vendor,
url,
components,
license: child_text(e, "license", "package").ok(),
releases,
conditions,
devices,
boards,
})
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct Board {
name: String,
mounted_devices: Vec<String>,
}
impl FromElem for Board {
fn from_elem(e: &Element) -> Result<Self, Error> {
Ok(Self {
name: attr_map(e, "name", "board")?,
mounted_devices: e.children()
.flat_map(|c| match c.name() {
"mountedDevice" => attr_map(c, "Dname", "mountedDevice").ok(),
_ => None,
})
.collect(),
})
}
}
#[derive(Debug, Serialize)]
pub struct Component {
pub vendor: String,
pub class: String,
pub group: String,
pub sub_group: Option<String>,
pub variant: Option<String>,
pub version: String,
pub api_version: Option<String>,
pub condition: Option<String>,
pub max_instances: Option<u8>,
pub is_default: bool,
pub deprecated: bool,
pub description: String,
pub rte_addition: String,
pub files: Vec<FileRef>,
}
type Components = Vec<Component>;
impl Package {
pub fn make_components(&self) -> Components {
self.components
.0
.clone()
.into_iter()
.map(|comp| {
Component {
vendor: comp.vendor.unwrap_or_else(|| self.vendor.clone()),
class: comp.class.unwrap(),
group: comp.group.unwrap(),
sub_group: comp.sub_group,
variant: comp.variant,
version: comp.version.unwrap_or_else(|| {
self.releases.latest_release().version.clone()
}),
api_version: comp.api_version,
condition: comp.condition,
max_instances: comp.max_instances,
is_default: comp.is_default,
deprecated: comp.deprecated,
description: comp.description,
rte_addition: comp.rte_addition,
files: comp.files,
}
})
.collect()
}
pub fn make_condition_lookup<'a>(&'a self) -> HashMap<&'a str, &'a Condition> {
let mut map = HashMap::with_capacity(self.conditions.0.iter().count());
for cond in self.conditions.0.iter() {
if let Some(dup) = map.insert(cond.id.as_str(), cond) {
log::warn!("Duplicate Condition found {}", dup.id);
}
}
map
}
pub fn make_dump_devices<'a>(&'a self) -> Vec<(&'a str, DumpDevice<'a>)> {
let from_pack = FromPack::new(
&self.vendor,
&self.name,
&self.releases.latest_release().version,
&self.url,
);
self.devices
.0
.iter()
.map(|(name, d)| {
(name.as_str(), DumpDevice::from_device(d, from_pack.clone()))
})
.collect()
}
}
pub fn dump_devices<'a, P: AsRef<Path>, I: IntoIterator<Item = &'a Package>>(
pdscs: I,
device_dest: Option<P>,
board_dest: Option<P>,
) -> Result<(), FailError> {
let pdscs: Vec<&Package> = pdscs.into_iter().collect();
let devices = pdscs
.iter()
.flat_map(|pdsc| pdsc.make_dump_devices().into_iter())
.collect::<HashMap<_, _>>();
match device_dest {
Some(to_file) => {
if !devices.is_empty() {
let mut file_contents = Vec::new();
let mut old_devices: HashMap<&str, DumpDevice> = HashMap::new();
if let Ok(mut fd) = OpenOptions::new().read(true).open(to_file.as_ref()) {
fd.read_to_end(&mut file_contents)?;
old_devices = serde_json::from_slice(&file_contents).unwrap_or_default();
}
let mut all_devices = BTreeMap::new();
all_devices.extend(old_devices.iter());
all_devices.extend(devices.iter());
let mut options = OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
if let Ok(fd) = options.open(to_file.as_ref()) {
serde_json::to_writer_pretty(fd, &all_devices).unwrap();
} else {
println!("Could not open file {:?}", to_file.as_ref());
}
}
}
None => println!("{}", &serde_json::to_string_pretty(&devices).unwrap()),
}
let boards = pdscs
.iter()
.flat_map(|pdsc| pdsc.boards.iter())
.map(|b| (&b.name, b))
.collect::<HashMap<_, _>>();
match board_dest {
Some(to_file) => {
let mut file_contents = Vec::new();
let mut old_boards: HashMap<String, Board> = HashMap::new();
if let Ok(mut fd) = OpenOptions::new().read(true).open(to_file.as_ref()) {
fd.read_to_end(&mut file_contents)?;
old_boards = serde_json::from_slice(&file_contents).unwrap_or_default();
}
let mut all_boards = BTreeMap::new();
all_boards.extend(old_boards.iter());
all_boards.extend(boards.iter());
let mut options = OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
if let Ok(fd) = options.open(to_file.as_ref()) {
serde_json::to_writer_pretty(fd, &all_boards).unwrap();
} else {
println!("Could not open file {:?}", to_file.as_ref());
}
}
None => println!("{}", &serde_json::to_string_pretty(&devices).unwrap()),
}
Ok(())
}
pub fn dumps_components<'a, I>(pdscs: I) -> Result<String, FailError>
where I: IntoIterator<Item = &'a Package>,
{
let components = pdscs
.into_iter()
.flat_map(|pdsc| pdsc.make_components().into_iter())
.collect::<Vec<_>>();
Ok(serde_json::to_string_pretty(&components)?)
}
| 31.660606 | 93 | 0.537615 |
9b835b0d14ec954c8d79d7aa697fccbb720919f9 | 4,760 | //! Simple HTTPS echo service based on hyper-rustls
//!
//! First parameter is the mandatory port to use.
//! Certificate and private key are hardcoded to sample files.
//! hyper will automatically use HTTP/2 if a client starts talking HTTP/2,
//! otherwise HTTP/1.1 will be used.
use std::{env, fs, io, sync};
use async_stream::stream;
use futures_util::future::TryFutureExt;
use hyper::server::accept;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use tokio::net::TcpListener;
use tokio_rustls::TlsAcceptor;
fn main() {
// Serve an echo service over HTTPS, with proper error handling.
if let Err(e) = run_server() {
eprintln!("FAILED: {}", e);
std::process::exit(1);
}
}
fn error(err: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
#[tokio::main]
async fn run_server() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// First parameter is port number (optional, defaults to 1337)
let port = match env::args().nth(1) {
Some(ref p) => p.to_owned(),
None => "1337".to_owned(),
};
let addr = format!("127.0.0.1:{}", port);
// Build TLS configuration.
let tls_cfg = {
// Load public certificate.
let certs = load_certs("examples/sample.pem")?;
// Load private key.
let key = load_private_key("examples/sample.rsa")?;
// Do not use client certificate authentication.
let mut cfg = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, key)
.map_err(|e| error(format!("{}", e)))?;
// Configure ALPN to accept HTTP/2, HTTP/1.1 in that order.
cfg.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
sync::Arc::new(cfg)
};
// Create a TCP listener via tokio.
let tcp = TcpListener::bind(&addr).await?;
let tls_acceptor = TlsAcceptor::from(tls_cfg);
// Prepare a long-running future stream to accept and serve clients.
let incoming_tls_stream = stream! {
loop {
let (socket, _) = tcp.accept().await?;
let stream = tls_acceptor.accept(socket).map_err(|e| {
println!("[!] Voluntary server halt due to client-connection error...");
// Errors could be handled here, instead of server aborting.
// Ok(None)
error(format!("TLS Error: {:?}", e))
});
yield stream.await;
}
};
let acceptor = accept::from_stream(incoming_tls_stream);
let service = make_service_fn(|_| async { Ok::<_, io::Error>(service_fn(echo)) });
let server = Server::builder(acceptor).serve(service);
// Run the future, keep going until an error occurs.
println!("Starting to serve on https://{}.", addr);
server.await?;
Ok(())
}
// Custom echo service, handling two different routes and a
// catch-all 404 responder.
async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
let mut response = Response::new(Body::empty());
match (req.method(), req.uri().path()) {
// Help route.
(&Method::GET, "/") => {
*response.body_mut() = Body::from("Try POST /echo\n");
}
// Echo service route.
(&Method::POST, "/echo") => {
*response.body_mut() = req.into_body();
}
// Catch-all 404.
_ => {
*response.status_mut() = StatusCode::NOT_FOUND;
}
};
Ok(response)
}
// Load public certificate from file.
fn load_certs(filename: &str) -> io::Result<Vec<rustls::Certificate>> {
// Open certificate file.
let certfile = fs::File::open(filename)
.map_err(|e| error(format!("failed to open {}: {}", filename, e)))?;
let mut reader = io::BufReader::new(certfile);
// Load and return certificate.
let certs = rustls_pemfile::certs(&mut reader)
.map_err(|_| error("failed to load certificate".into()))?;
Ok(certs
.into_iter()
.map(rustls::Certificate)
.collect())
}
// Load private key from file.
fn load_private_key(filename: &str) -> io::Result<rustls::PrivateKey> {
// Open keyfile.
let keyfile = fs::File::open(filename)
.map_err(|e| error(format!("failed to open {}: {}", filename, e)))?;
let mut reader = io::BufReader::new(keyfile);
// Load and return a single private key.
let keys = rustls_pemfile::rsa_private_keys(&mut reader)
.map_err(|_| error("failed to load private key".into()))?;
if keys.len() != 1 {
return Err(error("expected a single private key".into()));
}
Ok(rustls::PrivateKey(keys[0].clone()))
}
| 35.522388 | 88 | 0.601261 |
764fc9a097b891512292122f32821e478ed4b151 | 3,380 | use pest::Parser;
#[cfg(debug_assertions)]
const _GRAMMAR: &'static str = include_str!("normalise.pest");
#[derive(Parser)]
#[grammar = "parse/normalise.pest"]
struct Normaliser;
pub fn normalise(input: String) -> String {
Normaliser::parse_str(Rule::space_split, &(input.to_lowercase()))
.unwrap()
.filter_map(|pair| match pair.as_rule() {
Rule::not_space => {
let chars = pair.clone().into_inner();
Some(
chars
.map(|c| match c.as_rule() {
Rule::double_quote => "\"".into(),
Rule::single_quote => "'".into(),
Rule::dash => "-".into(),
Rule::left_bracket => "(".into(),
Rule::right_bracket => ")".into(),
Rule::equals => "=".into(),
_ => c.clone().into_span().as_str().into(),
})
.collect::<Vec<String>>()
.join(""),
)
}
_ => None,
})
.collect::<Vec<String>>()
.join(" ")
}
#[cfg(test)]
macro_rules! test_normaliser {
($input:expr, $output:expr) =>
(assert_eq!(normalise(format!("{}", $input)), $output))
}
#[cfg(test)]
macro_rules! test_spaces {
($input:expr) =>
(test_normaliser!(format!("x{}x", $input), "x x"))
}
#[test]
fn spaces() {
// Usual spaces
test_spaces!("\n");
test_spaces!("\t");
test_spaces!(" ");
test_spaces!(" "); // IDEOGRAPHIC SPACE
// Unusual spaces
test_spaces!(" "); // NBSP
test_spaces!(" "); // EN QUAD
test_spaces!(" "); // EM QUAD
test_spaces!(" "); // EN SPACE
test_spaces!(" "); // EM SPACE
test_spaces!(" "); // FIGURE SPACE
// Rare spaces
test_spaces!("\u{85}");
test_spaces!("\u{2004}");
test_spaces!("\u{2005}");
test_spaces!("\u{2006}");
test_spaces!("\u{2008}");
test_spaces!("\u{2009}");
test_spaces!("\u{200A}");
test_spaces!("\u{202F}");
test_spaces!("\u{205F}");
// Space collapsing
test_spaces!(" ");
test_spaces!(" \n\t ");
}
#[test]
fn double_quotes() {
test_normaliser!("\"", "\"");
test_normaliser!("“", "\"");
test_normaliser!("”", "\"");
test_normaliser!("«", "\"");
test_normaliser!("»", "\"");
}
#[test]
fn single_quotes() {
test_normaliser!("'", "'");
test_normaliser!("‘", "'");
test_normaliser!("’", "'");
test_normaliser!("‹", "'");
test_normaliser!("›", "'");
}
#[test]
fn brackets() {
test_normaliser!("(", "(");
test_normaliser!("{", "(");
test_normaliser!("[", "(");
test_normaliser!(")", ")");
test_normaliser!("}", ")");
test_normaliser!("]", ")");
}
#[test]
fn equals() {
test_normaliser!("=", "=");
test_normaliser!(":", "=");
test_normaliser!("≈", "=");
}
#[test]
fn dashes() {
test_normaliser!("-", "-");
test_normaliser!("--", "-");
test_normaliser!("—", "-");
test_normaliser!("–", "-");
test_normaliser!("‒", "-");
test_normaliser!("⁓", "-");
test_normaliser!("ー", "-");
test_normaliser!("―", "-");
test_normaliser!("⸺", "-");
test_normaliser!("⸻", "-");
test_normaliser!("〜", "-");
test_normaliser!("~", "-");
}
| 25.606061 | 71 | 0.469231 |
8a574bcf325a4a4f973a77885f3503dc5288208f | 61,799 | #![allow(
unused_parens,
clippy::excessive_precision,
clippy::missing_safety_doc,
clippy::not_unsafe_ptr_arg_deref,
clippy::should_implement_trait,
clippy::too_many_arguments,
clippy::unused_unit,
)]
//! # Hierarchical Data Format I/O routines
//!
//! This module provides storage routines for Hierarchical Data Format objects.
//! # Hierarchical Data Format version 5
//!
//! Hierarchical Data Format version 5
//! --------------------------------------------------------
//!
//! In order to use it, the hdf5 library has to be installed, which
//! means cmake should find it using `find_package(HDF5)` .
use crate::{mod_prelude::*, core, sys, types};
pub mod prelude {
pub use { super::HDF5Const, super::HDF5 };
}
/// Get the chunk sizes of a dataset. see also: dsgetsize()
pub const HDF5_H5_GETCHUNKDIMS: i32 = 102;
/// Get the dimension information of a dataset. see also: dsgetsize()
pub const HDF5_H5_GETDIMS: i32 = 100;
/// Get the maximum dimension information of a dataset. see also: dsgetsize()
pub const HDF5_H5_GETMAXDIMS: i32 = 101;
/// No compression, see also: dscreate()
pub const HDF5_H5_NONE: i32 = -1;
/// The dimension size is unlimited, see also: dscreate()
pub const HDF5_H5_UNLIMITED: i32 = -1;
/// Open or create hdf5 file
/// ## Parameters
/// * HDF5Filename: specify the HDF5 filename.
///
/// Returns a pointer to the hdf5 object class
///
///
/// Note: If the specified file does not exist, it will be created using default properties.
/// Otherwise, it is opened in read and write mode with default access properties.
/// Any operations except dscreate() functions on object
/// will be thread safe. Multiple datasets can be created inside a single hdf5 file, and can be accessed
/// from the same hdf5 object from multiple instances as long read or write operations are done over
/// non-overlapping regions of dataset. Single hdf5 file also can be opened by multiple instances,
/// reads and writes can be instantiated at the same time as long as non-overlapping regions are involved. Object
/// is released using close().
///
/// - Example below opens and then releases the file.
/// ```ignore
/// // open / auto create hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // ...
/// // release
/// h5io->close();
/// ```
///
///
/// 
///
/// - Text dump (3x3 Hilbert matrix) of hdf5 dataset using **h5dump** tool:
/// ```ignore
/// $ h5dump test.h5
/// HDF5 "test.h5" {
/// GROUP "/" {
/// DATASET "hilbert" {
/// DATATYPE H5T_ARRAY { [2] H5T_IEEE_F64LE }
/// DATASPACE SIMPLE { ( 3, 3 ) / ( 3, 3 ) }
/// DATA {
/// (0,0): [ 1, -1 ], [ 0.5, -0.5 ], [ 0.333333, -0.333333 ],
/// (1,0): [ 0.5, -0.5 ], [ 0.333333, -0.333333 ], [ 0.25, -0.25 ],
/// (2,0): [ 0.333333, -0.333333 ], [ 0.25, -0.25 ], [ 0.2, -0.2 ]
/// }
/// }
/// }
/// }
/// ```
///
#[inline]
pub fn open(hdf5_filename: &str) -> Result<core::Ptr<dyn crate::hdf::HDF5>> {
extern_container_arg!(hdf5_filename);
let ret = unsafe { sys::cv_hdf_open_const_StringR(hdf5_filename.opencv_as_extern()) }.into_result()?;
let ret = unsafe { core::Ptr::<dyn crate::hdf::HDF5>::opencv_from_extern(ret) };
Ok(ret)
}
/// Hierarchical Data Format version 5 interface.
///
/// Notice that this module is compiled only when hdf5 is correctly installed.
pub trait HDF5Const {
fn as_raw_HDF5(&self) -> *const c_void;
/// Check if label exists or not.
/// ## Parameters
/// * label: specify the hdf5 dataset label.
///
/// Returns **true** if dataset exists, and **false** otherwise.
///
///
/// Note: Checks if dataset, group or other object type (hdf5 link) exists under the label name. It is thread safe.
#[inline]
fn hlexists(&self, label: &str) -> Result<bool> {
extern_container_arg!(label);
let ret = unsafe { sys::cv_hdf_HDF5_hlexists_const_const_StringR(self.as_raw_HDF5(), label.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Check whether a given attribute exits or not in the root group.
///
/// ## Parameters
/// * atlabel: the attribute name to be checked.
/// ## Returns
/// true if the attribute exists, false otherwise.
/// ## See also
/// atdelete, atwrite, atread
#[inline]
fn atexists(&self, atlabel: &str) -> Result<bool> {
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atexists_const_const_StringR(self.as_raw_HDF5(), atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Create and allocate storage for n-dimensional dataset, single or multichannel type.
/// ## Parameters
/// * n_dims: declare number of dimensions
/// * sizes: array containing sizes for each dimensions
/// * type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
/// * dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
/// * compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression.
/// The value 0 also means no compression.
/// A value 9 indicating the best compression ration. Note
/// that a higher compression level indicates a higher computational cost. It relies
/// on GNU gzip for compression.
/// * dims_chunks: each array member specifies chunking sizes to be used for block I/O,
/// by default NULL means none at all.
///
/// Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked
/// using hlexists().
///
/// - See example below that creates a 6 dimensional storage space:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: Activating compression requires internal chunking. Chunking can significantly improve access
/// speed both at read and write time, especially for windowed access logic that shifts offset inside dataset.
/// If no custom chunking is specified, the default one will be invoked by the size of **whole** dataset
/// as single big chunk of data.
///
/// - See example of level 0 compression (shallow) using chunking against the first
/// dimension, thus storage will consists of 100 chunks of data:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// int chunks[n_dims] = { 1, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: A value of H5_UNLIMITED inside the **sizes** array means **unlimited** data on that dimension, thus it is
/// possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension
/// **requires** to define custom chunking. No default chunking will be defined in unlimited scenario since the default size
/// on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on
/// some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of
/// dswrite() that allows to write only in predefined data space.
///
/// - Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// int n_dims = 3;
/// int chunks[n_dims] = { 1, 1, 1 };
/// int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
/// // release
/// h5io->close();
/// ```
///
///
/// ## Overloaded parameters
#[inline]
fn dscreate(&self, rows: i32, cols: i32, typ: i32, dslabel: &str) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_int_const_int_const_StringR(self.as_raw_HDF5(), rows, cols, typ, dslabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Create and allocate storage for n-dimensional dataset, single or multichannel type.
/// ## Parameters
/// * n_dims: declare number of dimensions
/// * sizes: array containing sizes for each dimensions
/// * type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
/// * dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
/// * compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression.
/// The value 0 also means no compression.
/// A value 9 indicating the best compression ration. Note
/// that a higher compression level indicates a higher computational cost. It relies
/// on GNU gzip for compression.
/// * dims_chunks: each array member specifies chunking sizes to be used for block I/O,
/// by default NULL means none at all.
///
/// Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked
/// using hlexists().
///
/// - See example below that creates a 6 dimensional storage space:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: Activating compression requires internal chunking. Chunking can significantly improve access
/// speed both at read and write time, especially for windowed access logic that shifts offset inside dataset.
/// If no custom chunking is specified, the default one will be invoked by the size of **whole** dataset
/// as single big chunk of data.
///
/// - See example of level 0 compression (shallow) using chunking against the first
/// dimension, thus storage will consists of 100 chunks of data:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// int chunks[n_dims] = { 1, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: A value of H5_UNLIMITED inside the **sizes** array means **unlimited** data on that dimension, thus it is
/// possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension
/// **requires** to define custom chunking. No default chunking will be defined in unlimited scenario since the default size
/// on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on
/// some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of
/// dswrite() that allows to write only in predefined data space.
///
/// - Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// int n_dims = 3;
/// int chunks[n_dims] = { 1, 1, 1 };
/// int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
/// // release
/// h5io->close();
/// ```
///
///
/// ## Overloaded parameters
#[inline]
fn dscreate_1(&self, rows: i32, cols: i32, typ: i32, dslabel: &str, compresslevel: i32) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_int_const_int_const_StringR_const_int(self.as_raw_HDF5(), rows, cols, typ, dslabel.opencv_as_extern(), compresslevel) }.into_result()?;
Ok(ret)
}
/// Create and allocate storage for n-dimensional dataset, single or multichannel type.
/// ## Parameters
/// * n_dims: declare number of dimensions
/// * sizes: array containing sizes for each dimensions
/// * type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
/// * dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
/// * compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression.
/// The value 0 also means no compression.
/// A value 9 indicating the best compression ration. Note
/// that a higher compression level indicates a higher computational cost. It relies
/// on GNU gzip for compression.
/// * dims_chunks: each array member specifies chunking sizes to be used for block I/O,
/// by default NULL means none at all.
///
/// Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked
/// using hlexists().
///
/// - See example below that creates a 6 dimensional storage space:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: Activating compression requires internal chunking. Chunking can significantly improve access
/// speed both at read and write time, especially for windowed access logic that shifts offset inside dataset.
/// If no custom chunking is specified, the default one will be invoked by the size of **whole** dataset
/// as single big chunk of data.
///
/// - See example of level 0 compression (shallow) using chunking against the first
/// dimension, thus storage will consists of 100 chunks of data:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// int chunks[n_dims] = { 1, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: A value of H5_UNLIMITED inside the **sizes** array means **unlimited** data on that dimension, thus it is
/// possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension
/// **requires** to define custom chunking. No default chunking will be defined in unlimited scenario since the default size
/// on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on
/// some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of
/// dswrite() that allows to write only in predefined data space.
///
/// - Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// int n_dims = 3;
/// int chunks[n_dims] = { 1, 1, 1 };
/// int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
/// // release
/// h5io->close();
/// ```
///
///
/// ## Overloaded parameters
#[inline]
fn dscreate_2(&self, rows: i32, cols: i32, typ: i32, dslabel: &str, compresslevel: i32, dims_chunks: &core::Vector<i32>) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_int_const_int_const_StringR_const_int_const_vector_int_R(self.as_raw_HDF5(), rows, cols, typ, dslabel.opencv_as_extern(), compresslevel, dims_chunks.as_raw_VectorOfi32()) }.into_result()?;
Ok(ret)
}
/// Create and allocate storage for two dimensional single or multi channel dataset.
/// ## Parameters
/// * rows: declare amount of rows
/// * cols: declare amount of columns
/// * type: type to be used, e.g, CV_8UC3, CV_32FC1 and etc.
/// * dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
/// * compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression.
/// The value 0 also means no compression.
/// A value 9 indicating the best compression ration. Note
/// that a higher compression level indicates a higher computational cost. It relies
/// on GNU gzip for compression.
/// * dims_chunks: each array member specifies the chunking size to be used for block I/O,
/// by default NULL means none at all.
///
///
/// Note: If the dataset already exists, an exception will be thrown (CV_Error() is called).
///
/// - Existence of the dataset can be checked using hlexists(), see in this example:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 100x50 CV_64FC2 matrix
/// if ( ! h5io->hlexists( "hilbert" ) )
/// h5io->dscreate( 100, 50, CV_64FC2, "hilbert" );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: Activating compression requires internal chunking. Chunking can significantly improve access
/// speed both at read and write time, especially for windowed access logic that shifts offset inside dataset.
/// If no custom chunking is specified, the default one will be invoked by the size of the **whole** dataset
/// as a single big chunk of data.
///
/// - See example of level 9 compression using internal default chunking:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create level 9 compressed space for CV_64FC2 matrix
/// if ( ! h5io->hlexists( "hilbert", 9 ) )
/// h5io->dscreate( 100, 50, CV_64FC2, "hilbert", 9 );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: A value of H5_UNLIMITED for **rows** or **cols** or both means **unlimited** data on the specified dimension,
/// thus, it is possible to expand anytime such a dataset on row, col or on both directions. Presence of H5_UNLIMITED on any
/// dimension **requires** to define custom chunking. No default chunking will be defined in the unlimited scenario since
/// default size on that dimension will be zero, and will grow once dataset is written. Writing into a dataset that has
/// H5_UNLIMITED on some of its dimensions requires dsinsert() that allows growth on unlimited dimensions, instead of dswrite()
/// that allows to write only in predefined data space.
///
/// - Example below shows no compression but unlimited dimension on cols using 100x100 internal chunking:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create level 9 compressed space for CV_64FC2 matrix
/// int chunks[2] = { 100, 100 };
/// h5io->dscreate( 100, cv::hdf::HDF5::H5_UNLIMITED, CV_64FC2, "hilbert", cv::hdf::HDF5::H5_NONE, chunks );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: It is **not** thread safe, it must be called only once at dataset creation, otherwise an exception will occur.
/// Multiple datasets inside a single hdf5 file are allowed.
#[inline]
fn dscreate_3(&self, rows: i32, cols: i32, typ: i32, dslabel: &str, compresslevel: i32, dims_chunks: &i32) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_int_const_int_const_StringR_const_int_const_intX(self.as_raw_HDF5(), rows, cols, typ, dslabel.opencv_as_extern(), compresslevel, dims_chunks) }.into_result()?;
Ok(ret)
}
#[inline]
fn dscreate_4(&self, n_dims: i32, sizes: &i32, typ: i32, dslabel: &str) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_intX_const_int_const_StringR(self.as_raw_HDF5(), n_dims, sizes, typ, dslabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
#[inline]
fn dscreate_5(&self, n_dims: i32, sizes: &i32, typ: i32, dslabel: &str, compresslevel: i32) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_intX_const_int_const_StringR_const_int(self.as_raw_HDF5(), n_dims, sizes, typ, dslabel.opencv_as_extern(), compresslevel) }.into_result()?;
Ok(ret)
}
/// ## C++ default parameters
/// * compresslevel: HDF5::H5_NONE
/// * dims_chunks: vector<int>()
#[inline]
fn dscreate_6(&self, sizes: &core::Vector<i32>, typ: i32, dslabel: &str, compresslevel: i32, dims_chunks: &core::Vector<i32>) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_vector_int_R_const_int_const_StringR_const_int_const_vector_int_R(self.as_raw_HDF5(), sizes.as_raw_VectorOfi32(), typ, dslabel.opencv_as_extern(), compresslevel, dims_chunks.as_raw_VectorOfi32()) }.into_result()?;
Ok(ret)
}
/// Create and allocate storage for n-dimensional dataset, single or multichannel type.
/// ## Parameters
/// * n_dims: declare number of dimensions
/// * sizes: array containing sizes for each dimensions
/// * type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
/// * dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
/// * compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression.
/// The value 0 also means no compression.
/// A value 9 indicating the best compression ration. Note
/// that a higher compression level indicates a higher computational cost. It relies
/// on GNU gzip for compression.
/// * dims_chunks: each array member specifies chunking sizes to be used for block I/O,
/// by default NULL means none at all.
///
/// Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked
/// using hlexists().
///
/// - See example below that creates a 6 dimensional storage space:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: Activating compression requires internal chunking. Chunking can significantly improve access
/// speed both at read and write time, especially for windowed access logic that shifts offset inside dataset.
/// If no custom chunking is specified, the default one will be invoked by the size of **whole** dataset
/// as single big chunk of data.
///
/// - See example of level 0 compression (shallow) using chunking against the first
/// dimension, thus storage will consists of 100 chunks of data:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create space for 6 dimensional CV_64FC2 matrix
/// if ( ! h5io->hlexists( "nddata" ) )
/// int n_dims = 5;
/// int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
/// int chunks[n_dims] = { 1, 100, 20, 10, 5, 5 };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
/// else
/// printf("DS already created, skipping\n" );
/// // release
/// h5io->close();
/// ```
///
///
///
/// Note: A value of H5_UNLIMITED inside the **sizes** array means **unlimited** data on that dimension, thus it is
/// possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension
/// **requires** to define custom chunking. No default chunking will be defined in unlimited scenario since the default size
/// on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on
/// some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of
/// dswrite() that allows to write only in predefined data space.
///
/// - Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
/// ```ignore
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// int n_dims = 3;
/// int chunks[n_dims] = { 1, 1, 1 };
/// int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
/// h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
/// // release
/// h5io->close();
/// ```
///
#[inline]
fn dscreate_7(&self, n_dims: i32, sizes: &i32, typ: i32, dslabel: &str, compresslevel: i32, dims_chunks: &i32) -> Result<()> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dscreate_const_const_int_const_intX_const_int_const_StringR_const_int_const_intX(self.as_raw_HDF5(), n_dims, sizes, typ, dslabel.opencv_as_extern(), compresslevel, dims_chunks) }.into_result()?;
Ok(ret)
}
/// Fetch dataset sizes
/// ## Parameters
/// * dslabel: specify the hdf5 dataset label to be measured.
/// * dims_flag: will fetch dataset dimensions on H5_GETDIMS, dataset maximum dimensions on H5_GETMAXDIMS,
/// and chunk sizes on H5_GETCHUNKDIMS.
///
/// Returns vector object containing sizes of dataset on each dimensions.
///
///
/// Note: Resulting vector size will match the amount of dataset dimensions. By default H5_GETDIMS will return
/// actual dataset dimensions. Using H5_GETMAXDIM flag will get maximum allowed dimension which normally match
/// actual dataset dimension but can hold H5_UNLIMITED value if dataset was prepared in **unlimited** mode on
/// some of its dimension. It can be useful to check existing dataset dimensions before overwrite it as whole or subset.
/// Trying to write with oversized source data into dataset target will thrown exception. The H5_GETCHUNKDIMS will
/// return the dimension of chunk if dataset was created with chunking options otherwise returned vector size
/// will be zero.
///
/// ## C++ default parameters
/// * dims_flag: HDF5::H5_GETDIMS
#[inline]
fn dsgetsize(&self, dslabel: &str, dims_flag: i32) -> Result<core::Vector<i32>> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsgetsize_const_const_StringR_int(self.as_raw_HDF5(), dslabel.opencv_as_extern(), dims_flag) }.into_result()?;
let ret = unsafe { core::Vector::<i32>::opencv_from_extern(ret) };
Ok(ret)
}
/// Fetch dataset type
/// ## Parameters
/// * dslabel: specify the hdf5 dataset label to be checked.
///
/// Returns the stored matrix type. This is an identifier compatible with the CvMat type system,
/// like e.g. CV_16SC5 (16-bit signed 5-channel array), and so on.
///
///
/// Note: Result can be parsed with CV_MAT_CN() to obtain amount of channels and CV_MAT_DEPTH() to obtain native cvdata type.
/// It is thread safe.
#[inline]
fn dsgettype(&self, dslabel: &str) -> Result<i32> {
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsgettype_const_const_StringR(self.as_raw_HDF5(), dslabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
#[inline]
fn dswrite(&self, array: &dyn core::ToInputArray, dslabel: &str) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dswrite_const_const__InputArrayR_const_StringR(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
#[inline]
fn dswrite_1(&self, array: &dyn core::ToInputArray, dslabel: &str, dims_offset: &i32) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dswrite_const_const__InputArrayR_const_StringR_const_intX(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern(), dims_offset) }.into_result()?;
Ok(ret)
}
/// ## C++ default parameters
/// * dims_counts: vector<int>()
#[inline]
fn dswrite_2(&self, array: &dyn core::ToInputArray, dslabel: &str, dims_offset: &core::Vector<i32>, dims_counts: &core::Vector<i32>) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dswrite_const_const__InputArrayR_const_StringR_const_vector_int_R_const_vector_int_R(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern(), dims_offset.as_raw_VectorOfi32(), dims_counts.as_raw_VectorOfi32()) }.into_result()?;
Ok(ret)
}
/// Write or overwrite a Mat object into specified dataset of hdf5 file.
/// ## Parameters
/// * Array: specify Mat data array to be written.
/// * dslabel: specify the target hdf5 dataset label.
/// * dims_offset: each array member specify the offset location
/// over dataset's each dimensions from where InputArray will be (over)written into dataset.
/// * dims_counts: each array member specifies the amount of data over dataset's
/// each dimensions from InputArray that will be written into dataset.
///
/// Writes Mat object into targeted dataset.
///
///
/// Note: If dataset is not created and does not exist it will be created **automatically**. Only Mat is supported and
/// it must be **continuous**. It is thread safe but it is recommended that writes to happen over separate non-overlapping
/// regions. Multiple datasets can be written inside a single hdf5 file.
///
/// - Example below writes a 100x100 CV_64FC2 matrix into a dataset. No dataset pre-creation required. If routine
/// is called multiple times dataset will be just overwritten:
/// ```ignore
/// // dual channel hilbert matrix
/// cv::Mat H(100, 100, CV_64FC2);
/// for(int i = 0; i < H.rows; i++)
/// for(int j = 0; j < H.cols; j++)
/// {
/// H.at<cv::Vec2d>(i,j)[0] = 1./(i+j+1);
/// H.at<cv::Vec2d>(i,j)[1] = -1./(i+j+1);
/// count++;
/// }
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // write / overwrite dataset
/// h5io->dswrite( H, "hilbert" );
/// // release
/// h5io->close();
/// ```
///
///
/// - Example below writes a smaller 50x100 matrix into 100x100 compressed space optimised by two 50x100 chunks.
/// Matrix is written twice into first half (0->50) and second half (50->100) of data space using offset.
/// ```ignore
/// // dual channel hilbert matrix
/// cv::Mat H(50, 100, CV_64FC2);
/// for(int i = 0; i < H.rows; i++)
/// for(int j = 0; j < H.cols; j++)
/// {
/// H.at<cv::Vec2d>(i,j)[0] = 1./(i+j+1);
/// H.at<cv::Vec2d>(i,j)[1] = -1./(i+j+1);
/// count++;
/// }
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // optimise dataset by two chunks
/// int chunks[2] = { 50, 100 };
/// // create 100x100 CV_64FC2 compressed space
/// h5io->dscreate( 100, 100, CV_64FC2, "hilbert", 9, chunks );
/// // write into first half
/// int offset1[2] = { 0, 0 };
/// h5io->dswrite( H, "hilbert", offset1 );
/// // write into second half
/// int offset2[2] = { 50, 0 };
/// h5io->dswrite( H, "hilbert", offset2 );
/// // release
/// h5io->close();
/// ```
///
#[inline]
fn dswrite_3(&self, array: &dyn core::ToInputArray, dslabel: &str, dims_offset: &i32, dims_counts: &i32) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dswrite_const_const__InputArrayR_const_StringR_const_intX_const_intX(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern(), dims_offset, dims_counts) }.into_result()?;
Ok(ret)
}
#[inline]
fn dsinsert(&self, array: &dyn core::ToInputArray, dslabel: &str) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsinsert_const_const__InputArrayR_const_StringR(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
#[inline]
fn dsinsert_1(&self, array: &dyn core::ToInputArray, dslabel: &str, dims_offset: &i32) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsinsert_const_const__InputArrayR_const_StringR_const_intX(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern(), dims_offset) }.into_result()?;
Ok(ret)
}
/// ## C++ default parameters
/// * dims_counts: vector<int>()
#[inline]
fn dsinsert_2(&self, array: &dyn core::ToInputArray, dslabel: &str, dims_offset: &core::Vector<i32>, dims_counts: &core::Vector<i32>) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsinsert_const_const__InputArrayR_const_StringR_const_vector_int_R_const_vector_int_R(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern(), dims_offset.as_raw_VectorOfi32(), dims_counts.as_raw_VectorOfi32()) }.into_result()?;
Ok(ret)
}
/// Insert or overwrite a Mat object into specified dataset and auto expand dataset size if **unlimited** property allows.
/// ## Parameters
/// * Array: specify Mat data array to be written.
/// * dslabel: specify the target hdf5 dataset label.
/// * dims_offset: each array member specify the offset location
/// over dataset's each dimensions from where InputArray will be (over)written into dataset.
/// * dims_counts: each array member specify the amount of data over dataset's
/// each dimensions from InputArray that will be written into dataset.
///
/// Writes Mat object into targeted dataset and **autoexpand** dataset dimension if allowed.
///
///
/// Note: Unlike dswrite(), datasets are **not** created **automatically**. Only Mat is supported and it must be **continuous**.
/// If dsinsert() happens over outer regions of dataset dimensions and on that dimension of dataset is in **unlimited** mode then
/// dataset is expanded, otherwise exception is thrown. To create datasets with **unlimited** property on specific or more
/// dimensions see dscreate() and the optional H5_UNLIMITED flag at creation time. It is not thread safe over same dataset
/// but multiple datasets can be merged inside a single hdf5 file.
///
/// - Example below creates **unlimited** rows x 100 cols and expands rows 5 times with dsinsert() using single 100x100 CV_64FC2
/// over the dataset. Final size will have 5x100 rows and 100 cols, reflecting H matrix five times over row's span. Chunks size is
/// 100x100 just optimized against the H matrix size having compression disabled. If routine is called multiple times dataset will be
/// just overwritten:
/// ```ignore
/// // dual channel hilbert matrix
/// cv::Mat H(50, 100, CV_64FC2);
/// for(int i = 0; i < H.rows; i++)
/// for(int j = 0; j < H.cols; j++)
/// {
/// H.at<cv::Vec2d>(i,j)[0] = 1./(i+j+1);
/// H.at<cv::Vec2d>(i,j)[1] = -1./(i+j+1);
/// count++;
/// }
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // optimise dataset by chunks
/// int chunks[2] = { 100, 100 };
/// // create Unlimited x 100 CV_64FC2 space
/// h5io->dscreate( cv::hdf::HDF5::H5_UNLIMITED, 100, CV_64FC2, "hilbert", cv::hdf::HDF5::H5_NONE, chunks );
/// // write into first half
/// int offset[2] = { 0, 0 };
/// for ( int t = 0; t < 5; t++ )
/// {
/// offset[0] += 100 * t;
/// h5io->dsinsert( H, "hilbert", offset );
/// }
/// // release
/// h5io->close();
/// ```
///
#[inline]
fn dsinsert_3(&self, array: &dyn core::ToInputArray, dslabel: &str, dims_offset: &i32, dims_counts: &i32) -> Result<()> {
input_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsinsert_const_const__InputArrayR_const_StringR_const_intX_const_intX(self.as_raw_HDF5(), array.as_raw__InputArray(), dslabel.opencv_as_extern(), dims_offset, dims_counts) }.into_result()?;
Ok(ret)
}
#[inline]
fn dsread(&self, array: &mut dyn core::ToOutputArray, dslabel: &str) -> Result<()> {
output_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsread_const_const__OutputArrayR_const_StringR(self.as_raw_HDF5(), array.as_raw__OutputArray(), dslabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
#[inline]
fn dsread_1(&self, array: &mut dyn core::ToOutputArray, dslabel: &str, dims_offset: &i32) -> Result<()> {
output_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsread_const_const__OutputArrayR_const_StringR_const_intX(self.as_raw_HDF5(), array.as_raw__OutputArray(), dslabel.opencv_as_extern(), dims_offset) }.into_result()?;
Ok(ret)
}
/// ## C++ default parameters
/// * dims_counts: vector<int>()
#[inline]
fn dsread_2(&self, array: &mut dyn core::ToOutputArray, dslabel: &str, dims_offset: &core::Vector<i32>, dims_counts: &core::Vector<i32>) -> Result<()> {
output_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsread_const_const__OutputArrayR_const_StringR_const_vector_int_R_const_vector_int_R(self.as_raw_HDF5(), array.as_raw__OutputArray(), dslabel.opencv_as_extern(), dims_offset.as_raw_VectorOfi32(), dims_counts.as_raw_VectorOfi32()) }.into_result()?;
Ok(ret)
}
/// Read specific dataset from hdf5 file into Mat object.
/// ## Parameters
/// * Array: Mat container where data reads will be returned.
/// * dslabel: specify the source hdf5 dataset label.
/// * dims_offset: each array member specify the offset location over
/// each dimensions from where dataset starts to read into OutputArray.
/// * dims_counts: each array member specify the amount over dataset's each
/// dimensions of dataset to read into OutputArray.
///
/// Reads out Mat object reflecting the stored dataset.
///
///
/// Note: If hdf5 file does not exist an exception will be thrown. Use hlexists() to check dataset presence.
/// It is thread safe.
///
/// - Example below reads a dataset:
/// ```ignore
/// // open hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // blank Mat container
/// cv::Mat H;
/// // read hibert dataset
/// h5io->read( H, "hilbert" );
/// // release
/// h5io->close();
/// ```
///
///
/// - Example below perform read of 3x5 submatrix from second row and third element.
/// ```ignore
/// // open hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // blank Mat container
/// cv::Mat H;
/// int offset[2] = { 1, 2 };
/// int counts[2] = { 3, 5 };
/// // read hibert dataset
/// h5io->read( H, "hilbert", offset, counts );
/// // release
/// h5io->close();
/// ```
///
#[inline]
fn dsread_3(&self, array: &mut dyn core::ToOutputArray, dslabel: &str, dims_offset: &i32, dims_counts: &i32) -> Result<()> {
output_array_arg!(array);
extern_container_arg!(dslabel);
let ret = unsafe { sys::cv_hdf_HDF5_dsread_const_const__OutputArrayR_const_StringR_const_intX_const_intX(self.as_raw_HDF5(), array.as_raw__OutputArray(), dslabel.opencv_as_extern(), dims_offset, dims_counts) }.into_result()?;
Ok(ret)
}
/// Fetch keypoint dataset size
/// ## Parameters
/// * kplabel: specify the hdf5 dataset label to be measured.
/// * dims_flag: will fetch dataset dimensions on H5_GETDIMS, and dataset maximum dimensions on H5_GETMAXDIMS.
///
/// Returns size of keypoints dataset.
///
///
/// Note: Resulting size will match the amount of keypoints. By default H5_GETDIMS will return actual dataset dimension.
/// Using H5_GETMAXDIM flag will get maximum allowed dimension which normally match actual dataset dimension but can hold
/// H5_UNLIMITED value if dataset was prepared in **unlimited** mode. It can be useful to check existing dataset dimension
/// before overwrite it as whole or subset. Trying to write with oversized source data into dataset target will thrown
/// exception. The H5_GETCHUNKDIMS will return the dimension of chunk if dataset was created with chunking options otherwise
/// returned vector size will be zero.
///
/// ## C++ default parameters
/// * dims_flag: HDF5::H5_GETDIMS
#[inline]
fn kpgetsize(&self, kplabel: &str, dims_flag: i32) -> Result<i32> {
extern_container_arg!(kplabel);
let ret = unsafe { sys::cv_hdf_HDF5_kpgetsize_const_const_StringR_int(self.as_raw_HDF5(), kplabel.opencv_as_extern(), dims_flag) }.into_result()?;
Ok(ret)
}
/// Create and allocate special storage for cv::KeyPoint dataset.
/// ## Parameters
/// * size: declare fixed number of KeyPoints
/// * kplabel: specify the hdf5 dataset label, any existing dataset with the same label will be overwritten.
/// * compresslevel: specify the compression level 0-9 to be used, H5_NONE is default and means no compression.
/// * chunks: each array member specifies chunking sizes to be used for block I/O,
/// H5_NONE is default and means no compression.
///
/// Note: If the dataset already exists an exception will be thrown. Existence of the dataset can be checked
/// using hlexists().
///
/// - See example below that creates space for 100 keypoints in the dataset:
/// ```ignore
/// // open hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// if ( ! h5io->hlexists( "keypoints" ) )
/// h5io->kpcreate( 100, "keypoints" );
/// else
/// printf("DS already created, skipping\n" );
/// ```
///
///
///
/// Note: A value of H5_UNLIMITED for **size** means **unlimited** keypoints, thus is possible to expand anytime such
/// dataset by adding or inserting. Presence of H5_UNLIMITED **require** to define custom chunking. No default chunking
/// will be defined in unlimited scenario since default size on that dimension will be zero, and will grow once dataset
/// is written. Writing into dataset that have H5_UNLIMITED on some of its dimension requires kpinsert() that allow
/// growth on unlimited dimension instead of kpwrite() that allows to write only in predefined data space.
///
/// - See example below that creates unlimited space for keypoints chunking size of 100 but no compression:
/// ```ignore
/// // open hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// if ( ! h5io->hlexists( "keypoints" ) )
/// h5io->kpcreate( cv::hdf::HDF5::H5_UNLIMITED, "keypoints", cv::hdf::HDF5::H5_NONE, 100 );
/// else
/// printf("DS already created, skipping\n" );
/// ```
///
///
/// ## C++ default parameters
/// * compresslevel: H5_NONE
/// * chunks: H5_NONE
#[inline]
fn kpcreate(&self, size: i32, kplabel: &str, compresslevel: i32, chunks: i32) -> Result<()> {
extern_container_arg!(kplabel);
let ret = unsafe { sys::cv_hdf_HDF5_kpcreate_const_const_int_const_StringR_const_int_const_int(self.as_raw_HDF5(), size, kplabel.opencv_as_extern(), compresslevel, chunks) }.into_result()?;
Ok(ret)
}
/// Write or overwrite list of KeyPoint into specified dataset of hdf5 file.
/// ## Parameters
/// * keypoints: specify keypoints data list to be written.
/// * kplabel: specify the target hdf5 dataset label.
/// * offset: specify the offset location on dataset from where keypoints will be (over)written into dataset.
/// * counts: specify the amount of keypoints that will be written into dataset.
///
/// Writes vector<KeyPoint> object into targeted dataset.
///
///
/// Note: If dataset is not created and does not exist it will be created **automatically**. It is thread safe but
/// it is recommended that writes to happen over separate non overlapping regions. Multiple datasets can be written
/// inside single hdf5 file.
///
/// - Example below writes a 100 keypoints into a dataset. No dataset precreation required. If routine is called multiple
/// times dataset will be just overwritten:
/// ```ignore
/// // generate 100 dummy keypoints
/// std::vector<cv::KeyPoint> keypoints;
/// for(int i = 0; i < 100; i++)
/// keypoints.push_back( cv::KeyPoint(i, -i, 1, -1, 0, 0, -1) );
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // write / overwrite dataset
/// h5io->kpwrite( keypoints, "keypoints" );
/// // release
/// h5io->close();
/// ```
///
///
/// - Example below uses smaller set of 50 keypoints and writes into compressed space of 100 keypoints optimised by 10 chunks.
/// Same keypoint set is written three times, first into first half (0->50) and at second half (50->75) then into remaining slots
/// (75->99) of data space using offset and count parameters to settle the window for write access.If routine is called multiple times
/// dataset will be just overwritten:
/// ```ignore
/// // generate 50 dummy keypoints
/// std::vector<cv::KeyPoint> keypoints;
/// for(int i = 0; i < 50; i++)
/// keypoints.push_back( cv::KeyPoint(i, -i, 1, -1, 0, 0, -1) );
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create maximum compressed space of size 100 with chunk size 10
/// h5io->kpcreate( 100, "keypoints", 9, 10 );
/// // write into first half
/// h5io->kpwrite( keypoints, "keypoints", 0 );
/// // write first 25 keypoints into second half
/// h5io->kpwrite( keypoints, "keypoints", 50, 25 );
/// // write first 25 keypoints into remained space of second half
/// h5io->kpwrite( keypoints, "keypoints", 75, 25 );
/// // release
/// h5io->close();
/// ```
///
///
/// ## C++ default parameters
/// * offset: H5_NONE
/// * counts: H5_NONE
#[inline]
fn kpwrite(&self, keypoints: core::Vector<core::KeyPoint>, kplabel: &str, offset: i32, counts: i32) -> Result<()> {
extern_container_arg!(kplabel);
let ret = unsafe { sys::cv_hdf_HDF5_kpwrite_const_const_vector_KeyPoint__const_StringR_const_int_const_int(self.as_raw_HDF5(), keypoints.as_raw_VectorOfKeyPoint(), kplabel.opencv_as_extern(), offset, counts) }.into_result()?;
Ok(ret)
}
/// Insert or overwrite list of KeyPoint into specified dataset and autoexpand dataset size if **unlimited** property allows.
/// ## Parameters
/// * keypoints: specify keypoints data list to be written.
/// * kplabel: specify the target hdf5 dataset label.
/// * offset: specify the offset location on dataset from where keypoints will be (over)written into dataset.
/// * counts: specify the amount of keypoints that will be written into dataset.
///
/// Writes vector<KeyPoint> object into targeted dataset and **autoexpand** dataset dimension if allowed.
///
///
/// Note: Unlike kpwrite(), datasets are **not** created **automatically**. If dsinsert() happen over outer region of dataset
/// and dataset has been created in **unlimited** mode then dataset is expanded, otherwise exception is thrown. To create datasets
/// with **unlimited** property see kpcreate() and the optional H5_UNLIMITED flag at creation time. It is not thread safe over same
/// dataset but multiple datasets can be merged inside single hdf5 file.
///
/// - Example below creates **unlimited** space for keypoints storage, and inserts a list of 10 keypoints ten times into that space.
/// Final dataset will have 100 keypoints. Chunks size is 10 just optimized against list of keypoints. If routine is called multiple
/// times dataset will be just overwritten:
/// ```ignore
/// // generate 10 dummy keypoints
/// std::vector<cv::KeyPoint> keypoints;
/// for(int i = 0; i < 10; i++)
/// keypoints.push_back( cv::KeyPoint(i, -i, 1, -1, 0, 0, -1) );
/// // open / autocreate hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // create unlimited size space with chunk size of 10
/// h5io->kpcreate( cv::hdf::HDF5::H5_UNLIMITED, "keypoints", -1, 10 );
/// // insert 10 times same 10 keypoints
/// for(int i = 0; i < 10; i++)
/// h5io->kpinsert( keypoints, "keypoints", i * 10 );
/// // release
/// h5io->close();
/// ```
///
///
/// ## C++ default parameters
/// * offset: H5_NONE
/// * counts: H5_NONE
#[inline]
fn kpinsert(&self, keypoints: core::Vector<core::KeyPoint>, kplabel: &str, offset: i32, counts: i32) -> Result<()> {
extern_container_arg!(kplabel);
let ret = unsafe { sys::cv_hdf_HDF5_kpinsert_const_const_vector_KeyPoint__const_StringR_const_int_const_int(self.as_raw_HDF5(), keypoints.as_raw_VectorOfKeyPoint(), kplabel.opencv_as_extern(), offset, counts) }.into_result()?;
Ok(ret)
}
/// Read specific keypoint dataset from hdf5 file into vector<KeyPoint> object.
/// ## Parameters
/// * keypoints: vector<KeyPoint> container where data reads will be returned.
/// * kplabel: specify the source hdf5 dataset label.
/// * offset: specify the offset location over dataset from where read starts.
/// * counts: specify the amount of keypoints from dataset to read.
///
/// Reads out vector<KeyPoint> object reflecting the stored dataset.
///
///
/// Note: If hdf5 file does not exist an exception will be thrown. Use hlexists() to check dataset presence.
/// It is thread safe.
///
/// - Example below reads a dataset containing keypoints starting with second entry:
/// ```ignore
/// // open hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // blank KeyPoint container
/// std::vector<cv::KeyPoint> keypoints;
/// // read keypoints starting second one
/// h5io->kpread( keypoints, "keypoints", 1 );
/// // release
/// h5io->close();
/// ```
///
///
/// - Example below perform read of 3 keypoints from second entry.
/// ```ignore
/// // open hdf5 file
/// cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
/// // blank KeyPoint container
/// std::vector<cv::KeyPoint> keypoints;
/// // read three keypoints starting second one
/// h5io->kpread( keypoints, "keypoints", 1, 3 );
/// // release
/// h5io->close();
/// ```
///
///
/// ## C++ default parameters
/// * offset: H5_NONE
/// * counts: H5_NONE
#[inline]
fn kpread(&self, keypoints: &mut core::Vector<core::KeyPoint>, kplabel: &str, offset: i32, counts: i32) -> Result<()> {
extern_container_arg!(kplabel);
let ret = unsafe { sys::cv_hdf_HDF5_kpread_const_vector_KeyPoint_R_const_StringR_const_int_const_int(self.as_raw_HDF5(), keypoints.as_raw_mut_VectorOfKeyPoint(), kplabel.opencv_as_extern(), offset, counts) }.into_result()?;
Ok(ret)
}
}
pub trait HDF5: crate::hdf::HDF5Const {
fn as_raw_mut_HDF5(&mut self) -> *mut c_void;
/// Close and release hdf5 object.
#[inline]
fn close(&mut self) -> Result<()> {
let ret = unsafe { sys::cv_hdf_HDF5_close(self.as_raw_mut_HDF5()) }.into_result()?;
Ok(ret)
}
/// Create a group.
/// ## Parameters
/// * grlabel: specify the hdf5 group label.
///
/// Create a hdf5 group with default properties. The group is closed automatically after creation.
///
///
/// Note: Groups are useful for better organising multiple datasets. It is possible to create subgroups within any group.
/// Existence of a particular group can be checked using hlexists(). In case of subgroups, a label would be e.g: 'Group1/SubGroup1'
/// where SubGroup1 is within the root group Group1. Before creating a subgroup, its parent group MUST be created.
///
/// - In this example, Group1 will have one subgroup called SubGroup1:
///
/// [create_group](https://github.com/opencv/opencv_contrib/blob/4.5.4/modules/hdf/samples/create_groups.cpp#L1)
///
/// The corresponding result visualized using the HDFView tool is
///
/// 
///
///
/// Note: When a dataset is created with dscreate() or kpcreate(), it can be created within a group by specifying the
/// full path within the label. In our example, it would be: 'Group1/SubGroup1/MyDataSet'. It is not thread safe.
#[inline]
fn grcreate(&mut self, grlabel: &str) -> Result<()> {
extern_container_arg!(grlabel);
let ret = unsafe { sys::cv_hdf_HDF5_grcreate_const_StringR(self.as_raw_mut_HDF5(), grlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Delete an attribute from the root group.
///
/// ## Parameters
/// * atlabel: the attribute to be deleted.
///
///
/// Note: CV_Error() is called if the given attribute does not exist. Use atexists()
/// to check whether it exists or not beforehand.
/// ## See also
/// atexists, atwrite, atread
#[inline]
fn atdelete(&mut self, atlabel: &str) -> Result<()> {
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atdelete_const_StringR(self.as_raw_mut_HDF5(), atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Write an attribute inside the root group.
///
/// ## Parameters
/// * value: attribute value.
/// * atlabel: attribute name.
///
/// The following example demonstrates how to write an attribute of type cv::String:
///
/// [snippets_write_str](https://github.com/opencv/opencv_contrib/blob/4.5.4/modules/hdf/samples/read_write_attributes.cpp#L1)
///
///
/// Note: CV_Error() is called if the given attribute already exists. Use atexists()
/// to check whether it exists or not beforehand. And use atdelete() to delete
/// it if it already exists.
/// ## See also
/// atexists, atdelete, atread
#[inline]
fn atwrite(&mut self, value: i32, atlabel: &str) -> Result<()> {
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atwrite_const_int_const_StringR(self.as_raw_mut_HDF5(), value, atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Read an attribute from the root group.
///
/// ## Parameters
/// * value: address where the attribute is read into
/// * atlabel: attribute name
///
/// The following example demonstrates how to read an attribute of type cv::String:
///
/// [snippets_read_str](https://github.com/opencv/opencv_contrib/blob/4.5.4/modules/hdf/samples/read_write_attributes.cpp#L1)
///
///
/// Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists()
/// to check if it exists beforehand.
/// ## See also
/// atexists, atdelete, atwrite
#[inline]
fn atread(&mut self, value: &mut i32, atlabel: &str) -> Result<()> {
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atread_intX_const_StringR(self.as_raw_mut_HDF5(), value, atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Write an attribute into the root group.
///
/// ## Parameters
/// * value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
/// * atlabel: attribute name.
///
///
/// Note: CV_Error() is called if the given attribute already exists. Use atexists()
/// to check whether it exists or not beforehand. And use atdelete() to delete
/// it if it already exists.
/// ## See also
/// atexists, atdelete, atread.
///
/// ## Overloaded parameters
#[inline]
fn atwrite_1(&mut self, value: f64, atlabel: &str) -> Result<()> {
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atwrite_const_double_const_StringR(self.as_raw_mut_HDF5(), value, atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Read an attribute from the root group.
///
/// ## Parameters
/// * value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
/// * atlabel: attribute name.
///
///
/// Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists()
/// to check if it exists beforehand.
/// ## See also
/// atexists, atdelete, atwrite
///
/// ## Overloaded parameters
#[inline]
fn atread_1(&mut self, value: &mut f64, atlabel: &str) -> Result<()> {
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atread_doubleX_const_StringR(self.as_raw_mut_HDF5(), value, atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Write an attribute into the root group.
///
/// ## Parameters
/// * value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
/// * atlabel: attribute name.
///
///
/// Note: CV_Error() is called if the given attribute already exists. Use atexists()
/// to check whether it exists or not beforehand. And use atdelete() to delete
/// it if it already exists.
/// ## See also
/// atexists, atdelete, atread.
///
/// ## Overloaded parameters
#[inline]
fn atwrite_2(&mut self, value: &str, atlabel: &str) -> Result<()> {
extern_container_arg!(value);
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atwrite_const_StringR_const_StringR(self.as_raw_mut_HDF5(), value.opencv_as_extern(), atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Read an attribute from the root group.
///
/// ## Parameters
/// * value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
/// * atlabel: attribute name.
///
///
/// Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists()
/// to check if it exists beforehand.
/// ## See also
/// atexists, atdelete, atwrite
///
/// ## Overloaded parameters
#[inline]
fn atread_2(&mut self, value: &mut String, atlabel: &str) -> Result<()> {
string_arg_output_send!(via value_via);
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atread_StringX_const_StringR(self.as_raw_mut_HDF5(), &mut value_via, atlabel.opencv_as_extern()) }.into_result()?;
string_arg_output_receive!(value_via => value);
Ok(ret)
}
/// Write an attribute into the root group.
///
/// ## Parameters
/// * value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
/// * atlabel: attribute name.
///
///
/// Note: CV_Error() is called if the given attribute already exists. Use atexists()
/// to check whether it exists or not beforehand. And use atdelete() to delete
/// it if it already exists.
/// ## See also
/// atexists, atdelete, atread.
#[inline]
fn atwrite_3(&mut self, value: &dyn core::ToInputArray, atlabel: &str) -> Result<()> {
input_array_arg!(value);
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atwrite_const__InputArrayR_const_StringR(self.as_raw_mut_HDF5(), value.as_raw__InputArray(), atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
/// Read an attribute from the root group.
///
/// ## Parameters
/// * value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
/// * atlabel: attribute name.
///
///
/// Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists()
/// to check if it exists beforehand.
/// ## See also
/// atexists, atdelete, atwrite
#[inline]
fn atread_3(&mut self, value: &mut dyn core::ToOutputArray, atlabel: &str) -> Result<()> {
output_array_arg!(value);
extern_container_arg!(atlabel);
let ret = unsafe { sys::cv_hdf_HDF5_atread_const__OutputArrayR_const_StringR(self.as_raw_mut_HDF5(), value.as_raw__OutputArray(), atlabel.opencv_as_extern()) }.into_result()?;
Ok(ret)
}
}
| 46.326087 | 285 | 0.669121 |
8a1103c5d557a4c35a9d33495515bba670fcd3e2 | 5,799 | use rustpython_vm::obj::objstr::PyStrRef;
use rustpython_vm::pyobject::{BorrowValue, PyIterable, PyResult, TryFromObject};
use rustpython_vm::scope::{NameProtocol, Scope};
use rustpython_vm::VirtualMachine;
pub struct ShellHelper<'vm> {
vm: &'vm VirtualMachine,
scope: Scope,
}
fn reverse_string(s: &mut String) {
let rev = s.chars().rev().collect();
*s = rev;
}
fn split_idents_on_dot(line: &str) -> Option<(usize, Vec<String>)> {
let mut words = vec![String::new()];
let mut startpos = 0;
for (i, c) in line.chars().rev().enumerate() {
match c {
'.' => {
// check for a double dot
if i != 0 && words.last().map_or(false, |s| s.is_empty()) {
return None;
}
reverse_string(words.last_mut().unwrap());
if words.len() == 1 {
startpos = line.len() - i;
}
words.push(String::new());
}
c if c.is_alphanumeric() || c == '_' => words.last_mut().unwrap().push(c),
_ => {
if words.len() == 1 {
if words.last().unwrap().is_empty() {
return None;
}
startpos = line.len() - i;
}
break;
}
}
}
if words == [String::new()] {
return None;
}
reverse_string(words.last_mut().unwrap());
words.reverse();
Some((startpos, words))
}
impl<'vm> ShellHelper<'vm> {
pub fn new(vm: &'vm VirtualMachine, scope: Scope) -> Self {
ShellHelper { vm, scope }
}
#[allow(clippy::type_complexity)]
fn get_available_completions<'w>(
&self,
words: &'w [String],
) -> Option<(&'w str, Box<dyn Iterator<Item = PyResult<PyStrRef>> + 'vm>)> {
// the very first word and then all the ones after the dot
let (first, rest) = words.split_first().unwrap();
let str_iter_method = |obj, name| {
let iter = self.vm.call_method(obj, name, ())?;
PyIterable::<PyStrRef>::try_from_object(self.vm, iter)?.iter(self.vm)
};
if let Some((last, parents)) = rest.split_last() {
// we need to get an attribute based off of the dir() of an object
// last: the last word, could be empty if it ends with a dot
// parents: the words before the dot
let mut current = self.scope.load_global(self.vm, first)?;
for attr in parents {
current = self.vm.get_attribute(current.clone(), attr.as_str()).ok()?;
}
let current_iter = str_iter_method(¤t, "__dir__").ok()?;
Some((&last, Box::new(current_iter) as _))
} else {
// we need to get a variable based off of globals/builtins
let globals = str_iter_method(self.scope.globals.as_object(), "keys").ok()?;
let builtins = str_iter_method(&self.vm.builtins, "__dir__").ok()?;
Some((&first, Box::new(Iterator::chain(globals, builtins)) as _))
}
}
fn complete_opt(&self, line: &str) -> Option<(usize, Vec<String>)> {
let (startpos, words) = split_idents_on_dot(line)?;
let (word_start, iter) = self.get_available_completions(&words)?;
let all_completions = iter
.filter(|res| {
res.as_ref()
.ok()
.map_or(true, |s| s.borrow_value().starts_with(word_start))
})
.collect::<Result<Vec<_>, _>>()
.ok()?;
let mut completions = if word_start.starts_with('_') {
// if they're already looking for something starting with a '_', just give
// them all the completions
all_completions
} else {
// only the completions that don't start with a '_'
let no_underscore = all_completions
.iter()
.cloned()
.filter(|s| !s.borrow_value().starts_with('_'))
.collect::<Vec<_>>();
// if there are only completions that start with a '_', give them all of the
// completions, otherwise only the ones that don't start with '_'
if no_underscore.is_empty() {
all_completions
} else {
no_underscore
}
};
// sort the completions alphabetically
completions.sort_by(|a, b| std::cmp::Ord::cmp(a.borrow_value(), b.borrow_value()));
Some((
startpos,
completions
.into_iter()
.map(|s| s.borrow_value().to_owned())
.collect(),
))
}
}
cfg_if::cfg_if! {
if #[cfg(not(target_os = "wasi"))] {
use rustyline::{
completion::Completer, highlight::Highlighter, hint::Hinter, validate::Validator, Context,
Helper,
};
impl Completer for ShellHelper<'_> {
type Candidate = String;
fn complete(
&self,
line: &str,
pos: usize,
_ctx: &Context,
) -> rustyline::Result<(usize, Vec<String>)> {
Ok(self
.complete_opt(&line[0..pos])
// as far as I can tell, there's no better way to do both completion
// and indentation (or even just indentation)
.unwrap_or_else(|| (line.len(), vec!["\t".to_owned()])))
}
}
impl Hinter for ShellHelper<'_> {}
impl Highlighter for ShellHelper<'_> {}
impl Validator for ShellHelper<'_> {}
impl Helper for ShellHelper<'_> {}
}
}
| 33.912281 | 102 | 0.509226 |
bb39662b903941dabe6fe72268674e7ba4ff66c9 | 1,106 | use std::pin::Pin;
use pin_project_lite::pin_project;
use crate::stream::Stream;
use crate::task::{Context, Poll};
pin_project! {
/// A stream that does something with each element of another stream.
///
/// This `struct` is created by the [`inspect`] method on [`Stream`]. See its
/// documentation for more.
///
/// [`inspect`]: trait.Stream.html#method.inspect
/// [`Stream`]: trait.Stream.html
#[derive(Debug)]
pub struct Inspect<S, F> {
#[pin]
stream: S,
f: F,
}
}
impl<S, F> Inspect<S, F> {
pub(super) fn new(stream: S, f: F) -> Self {
Self {
stream,
f,
}
}
}
impl<S, F> Stream for Inspect<S, F>
where
S: Stream,
F: FnMut(&S::Item),
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
let next = futures_core::ready!(this.stream.as_mut().poll_next(cx));
Poll::Ready(next.and_then(|x| {
(this.f)(&x);
Some(x)
}))
}
}
| 22.12 | 90 | 0.534358 |
79b06f086f6cf5296e60261392efdb029cf74465 | 107 | use super::component_prelude::*;
#[derive(Default, Component)]
#[storage(NullStorage)]
pub struct Player;
| 17.833333 | 32 | 0.747664 |
76c636f6e560b7ca0cfb8765ea533505ad0f7975 | 4,084 | use super::{Value, ValueInternal};
use crate::context::internal::Env;
use crate::context::Context;
use crate::handle::{Handle, Managed};
use crate::object::Object;
use crate::result::{JsResult, JsResultExt};
use neon_runtime;
use neon_runtime::raw;
use std::error::Error;
use std::fmt;
use std::fmt::Debug;
/// A JavaScript Date object
#[repr(C)]
#[derive(Debug, Copy, Clone)]
#[cfg_attr(docsrs, doc(cfg(feature = "napi-5")))]
pub struct JsDate(raw::Local);
impl Value for JsDate {}
impl Managed for JsDate {
fn to_raw(self) -> raw::Local {
self.0
}
fn from_raw(_: Env, h: raw::Local) -> Self {
JsDate(h)
}
}
/// The Error struct for a Date
#[derive(Debug)]
#[cfg_attr(docsrs, doc(cfg(feature = "napi-5")))]
pub struct DateError(DateErrorKind);
impl DateError {
pub fn kind(&self) -> DateErrorKind {
self.0
}
}
impl fmt::Display for DateError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(self.0.as_str())
}
}
impl Error for DateError {}
/// The error kinds corresponding to `DateError`
#[derive(Debug, Copy, Clone, PartialEq)]
#[cfg_attr(docsrs, doc(cfg(feature = "napi-5")))]
pub enum DateErrorKind {
Overflow,
Underflow,
}
impl DateErrorKind {
fn as_str(&self) -> &'static str {
match *self {
DateErrorKind::Overflow => "Date overflow",
DateErrorKind::Underflow => "Date underflow",
}
}
}
impl<'a, T: Value> JsResultExt<'a, T> for Result<Handle<'a, T>, DateError> {
/// Creates an `Error` on error
fn or_throw<'b, C: Context<'b>>(self, cx: &mut C) -> JsResult<'a, T> {
self.or_else(|e| cx.throw_range_error(e.0.as_str()))
}
}
impl JsDate {
/// The smallest possible Date value, defined by ECMAScript. See <https://www.ecma-international.org/ecma-262/5.1/#sec-15.7.3.3>
pub const MIN_VALUE: f64 = -8.64e15;
/// The largest possible Date value, defined by ECMAScript. See <https://www.ecma-international.org/ecma-262/5.1/#sec-15.7.3.2>
pub const MAX_VALUE: f64 = 8.64e15;
/// Creates a new Date. It errors when `value` is outside the range of valid JavaScript Date values. When `value`
/// is `NaN`, the operation will succeed but with an invalid Date
pub fn new<'a, C: Context<'a>, T: Into<f64>>(
cx: &mut C,
value: T,
) -> Result<Handle<'a, JsDate>, DateError> {
let env = cx.env().to_raw();
let time = value.into();
if time > JsDate::MAX_VALUE {
return Err(DateError(DateErrorKind::Overflow));
} else if time < JsDate::MIN_VALUE {
return Err(DateError(DateErrorKind::Underflow));
}
let local = unsafe { neon_runtime::date::new_date(env, time) };
let date = Handle::new_internal(JsDate(local));
Ok(date)
}
/// Creates a new Date with lossy conversion for out of bounds Date values. Out of bounds
/// values will be treated as NaN
pub fn new_lossy<'a, C: Context<'a>, V: Into<f64>>(cx: &mut C, value: V) -> Handle<'a, JsDate> {
let env = cx.env().to_raw();
let local = unsafe { neon_runtime::date::new_date(env, value.into()) };
Handle::new_internal(JsDate(local))
}
/// Gets the Date's value. An invalid Date will return `std::f64::NaN`
pub fn value<'a, C: Context<'a>>(self, cx: &mut C) -> f64 {
let env = cx.env().to_raw();
unsafe { neon_runtime::date::value(env, self.to_raw()) }
}
/// Checks if the Date's value is valid. A Date is valid if its value is between
/// `JsDate::MIN_VALUE` and `JsDate::MAX_VALUE` or if it is `NaN`
pub fn is_valid<'a, C: Context<'a>>(self, cx: &mut C) -> bool {
let value = self.value(cx);
(JsDate::MIN_VALUE..=JsDate::MAX_VALUE).contains(&value)
}
}
impl ValueInternal for JsDate {
fn name() -> String {
"object".to_string()
}
fn is_typeof<Other: Value>(env: Env, other: Other) -> bool {
unsafe { neon_runtime::tag::is_date(env.to_raw(), other.to_raw()) }
}
}
impl Object for JsDate {}
| 30.706767 | 132 | 0.615328 |
bff0717c678201bddeacc73448e4cc8de224b983 | 19,685 | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use std::cmp::Ordering;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering};
use std::sync::Arc;
use crate::storage::mvcc::{Lock, LockType, WriteRef, WriteType};
use engine_traits::{
IterOptions, Iterable, Iterator as EngineIterator, KvEngine, Peekable, SeekKey,
};
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_RAFT, CF_WRITE};
use kvproto::kvrpcpb::{MvccInfo, MvccLock, MvccValue, MvccWrite, Op};
use raftstore::coprocessor::{ConsistencyCheckMethod, ConsistencyCheckObserver, Coprocessor};
use raftstore::Result;
use tikv_util::keybuilder::KeyBuilder;
use txn_types::Key;
const PHYSICAL_SHIFT_BITS: usize = 18;
const SAFE_POINT_WINDOW: usize = 120;
// When leader broadcasts a ComputeHash command to followers, it's possible that the safe point
// becomes stale when the command reaches followers. So use a 2 minutes window to reduce this.
fn get_safe_point_for_check(mut safe_point: u64) -> u64 {
safe_point >>= PHYSICAL_SHIFT_BITS;
safe_point += (SAFE_POINT_WINDOW * 1000) as u64; // 120s * 1000ms/s.
safe_point << PHYSICAL_SHIFT_BITS
}
const fn zero_safe_point_for_check() -> u64 {
let mut safe_point = 0;
safe_point >>= PHYSICAL_SHIFT_BITS;
safe_point += (SAFE_POINT_WINDOW * 1000) as u64; // 120s * 1000ms/s.
safe_point << PHYSICAL_SHIFT_BITS
}
#[derive(Clone)]
pub struct Mvcc<E: KvEngine> {
_engine: PhantomData<E>,
local_safe_point: Arc<AtomicU64>,
}
impl<E: KvEngine> Coprocessor for Mvcc<E> {}
impl<E: KvEngine> Mvcc<E> {
pub fn new(safe_point: Arc<AtomicU64>) -> Self {
Mvcc {
_engine: Default::default(),
local_safe_point: safe_point,
}
}
}
impl<E: KvEngine> ConsistencyCheckObserver<E> for Mvcc<E> {
fn update_context(&self, context: &mut Vec<u8>) -> bool {
context.push(ConsistencyCheckMethod::Mvcc as u8);
context.reserve(8);
let len = context.len();
let mut safe_point = self.local_safe_point.load(AtomicOrdering::Acquire);
safe_point = get_safe_point_for_check(safe_point);
unsafe {
context.set_len(len + 8);
std::ptr::copy_nonoverlapping(
safe_point.to_le_bytes().as_ptr(),
&mut context[len] as _,
8,
);
}
// Skiped all other observers.
true
}
fn compute_hash(
&self,
region: &kvproto::metapb::Region,
context: &mut &[u8],
snap: &E::Snapshot,
) -> Result<Option<u32>> {
if context.is_empty() {
return Ok(None);
}
assert_eq!(context[0], ConsistencyCheckMethod::Mvcc as u8);
let safe_point = u64::from_le_bytes(context[1..9].try_into().unwrap());
*context = &context[9..];
let local_safe_point = self.local_safe_point.load(AtomicOrdering::Acquire);
if safe_point < local_safe_point || safe_point <= zero_safe_point_for_check() {
warn!(
"skip consistency check"; "region_id" => region.get_id(),
"safe_ponit" => safe_point,
"local_safe_point" => local_safe_point,
"zero" => zero_safe_point_for_check(),
);
return Ok(None);
}
let mut scanner = MvccInfoScanner::new(
|cf, opts| snap.iterator_cf_opt(cf, opts).map_err(|e| box_err!(e)),
Some(&keys::data_key(region.get_start_key())),
Some(&keys::data_end_key(region.get_end_key())),
MvccChecksum::new(safe_point),
)?;
while scanner.next_item()?.is_some() {}
// Computes the hash from the Region state too.
let mut digest = scanner.observer.digest;
let region_state_key = keys::region_state_key(region.get_id());
digest.update(®ion_state_key);
match snap.get_value_cf(CF_RAFT, ®ion_state_key) {
Err(e) => return Err(e.into()),
Ok(Some(v)) => digest.update(&v),
Ok(None) => {}
}
Ok(Some(digest.finalize()))
}
}
pub trait MvccInfoObserver {
type Target;
// Meet a new mvcc record prefixed `key`.
fn on_new_item(&mut self, key: &[u8]);
// Emit a complete mvcc record.
fn emit(&mut self) -> Self::Target;
fn on_write(&mut self, key: &[u8], value: &[u8]) -> Result<bool>;
fn on_lock(&mut self, key: &[u8], value: &[u8]) -> Result<bool>;
fn on_default(&mut self, key: &[u8], value: &[u8]) -> Result<bool>;
}
pub struct MvccInfoScanner<Iter: EngineIterator, Ob: MvccInfoObserver> {
lock_iter: Iter,
default_iter: Iter,
write_iter: Iter,
observer: Ob,
}
impl<Iter: EngineIterator, Ob: MvccInfoObserver> MvccInfoScanner<Iter, Ob> {
pub fn new<F>(f: F, from: Option<&[u8]>, to: Option<&[u8]>, ob: Ob) -> Result<Self>
where
F: Fn(&str, IterOptions) -> Result<Iter>,
{
let from = from.unwrap_or(keys::DATA_MIN_KEY);
let to = to.unwrap_or(keys::DATA_MAX_KEY);
let key_builder = |key: &[u8]| -> Result<Option<KeyBuilder>> {
if !keys::validate_data_key(key) && key != keys::DATA_MAX_KEY {
return Err(box_err!("non-mvcc area {}", hex::encode_upper(key)));
}
Ok(Some(KeyBuilder::from_vec(key.to_vec(), 0, 0)))
};
let iter_opts = IterOptions::new(key_builder(from)?, key_builder(to)?, false);
let gen_iter = |cf: &str| -> Result<Iter> {
let mut iter = f(cf, iter_opts.clone())?;
box_try!(iter.seek(SeekKey::Key(from)));
Ok(iter)
};
Ok(MvccInfoScanner {
lock_iter: gen_iter(CF_LOCK)?,
default_iter: gen_iter(CF_DEFAULT)?,
write_iter: gen_iter(CF_WRITE)?,
observer: ob,
})
}
fn next_item(&mut self) -> Result<Option<Ob::Target>> {
let mut lock_ok = box_try!(self.lock_iter.valid());
let mut writes_ok = box_try!(self.write_iter.valid());
let prefix = match (lock_ok, writes_ok) {
(false, false) => return Ok(None),
(true, false) => self.lock_iter.key(),
(false, true) => box_try!(Key::truncate_ts_for(self.write_iter.key())),
(true, true) => {
let prefix1 = self.lock_iter.key();
let prefix2 = box_try!(Key::truncate_ts_for(self.write_iter.key()));
match prefix1.cmp(prefix2) {
Ordering::Less => {
writes_ok = false;
prefix1
}
Ordering::Greater => {
lock_ok = false;
prefix2
}
Ordering::Equal => prefix1,
}
}
};
self.observer.on_new_item(prefix);
while writes_ok {
let (key, value) = (self.write_iter.key(), self.write_iter.value());
writes_ok = self.observer.on_write(key, value)? && box_try!(self.write_iter.next());
}
while lock_ok {
let (key, value) = (self.lock_iter.key(), self.lock_iter.value());
lock_ok = self.observer.on_lock(key, value)? && box_try!(self.lock_iter.next());
}
let mut ok = box_try!(self.default_iter.valid());
while ok {
let (key, value) = (self.default_iter.key(), self.default_iter.value());
ok = self.observer.on_default(key, value)? && box_try!(self.default_iter.next());
}
Ok(Some(self.observer.emit()))
}
}
#[derive(Clone, Default)]
struct MvccInfoCollector {
current_item: Vec<u8>,
mvcc_info: MvccInfo,
}
impl MvccInfoObserver for MvccInfoCollector {
type Target = (Vec<u8>, MvccInfo);
fn on_new_item(&mut self, key: &[u8]) {
self.current_item = key.to_vec();
}
fn emit(&mut self) -> Self::Target {
let item = std::mem::take(&mut self.current_item);
let info = std::mem::take(&mut self.mvcc_info);
(item, info)
}
fn on_write(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, commit_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let write = box_try!(WriteRef::parse(&value));
let mut write_info = MvccWrite::default();
match write.write_type {
WriteType::Put => write_info.set_type(Op::Put),
WriteType::Delete => write_info.set_type(Op::Del),
WriteType::Lock => write_info.set_type(Op::Lock),
WriteType::Rollback => write_info.set_type(Op::Rollback),
}
write_info.set_start_ts(write.start_ts.into_inner());
write_info.set_commit_ts(commit_ts.into_inner());
if let Some(ref value) = write.short_value {
write_info.set_short_value(value.to_vec());
}
self.mvcc_info.mut_writes().push(write_info);
Ok(true)
}
fn on_lock(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
if key != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let lock = box_try!(Lock::parse(value));
let mut lock_info = MvccLock::default();
match lock.lock_type {
LockType::Put => lock_info.set_type(Op::Put),
LockType::Delete => lock_info.set_type(Op::Del),
LockType::Lock => lock_info.set_type(Op::Lock),
LockType::Pessimistic => lock_info.set_type(Op::PessimisticLock),
}
lock_info.set_start_ts(lock.ts.into_inner());
lock_info.set_primary(lock.primary);
lock_info.set_short_value(lock.short_value.unwrap_or_default());
self.mvcc_info.set_lock(lock_info);
Ok(true)
}
fn on_default(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, start_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let mut value_info = MvccValue::default();
value_info.set_start_ts(start_ts.into_inner());
value_info.set_value(value.to_vec());
self.mvcc_info.mut_values().push(value_info);
Ok(true)
}
}
pub struct MvccInfoIterator<Iter: EngineIterator> {
scanner: MvccInfoScanner<Iter, MvccInfoCollector>,
limit: usize,
count: usize,
}
impl<Iter: EngineIterator> MvccInfoIterator<Iter> {
pub fn new<F>(f: F, from: Option<&[u8]>, to: Option<&[u8]>, limit: usize) -> Result<Self>
where
F: Fn(&str, IterOptions) -> Result<Iter>,
{
let scanner = MvccInfoScanner::new(f, from, to, MvccInfoCollector::default())?;
Ok(Self {
scanner,
limit,
count: 0,
})
}
}
impl<Iter: EngineIterator> Iterator for MvccInfoIterator<Iter> {
type Item = Result<(Vec<u8>, MvccInfo)>;
fn next(&mut self) -> Option<Result<(Vec<u8>, MvccInfo)>> {
if self.limit != 0 && self.count >= self.limit {
return None;
}
match self.scanner.next_item() {
Ok(Some(item)) => {
self.count += 1;
Some(Ok(item))
}
Ok(None) => None,
Err(e) => Some(Err(e)),
}
}
}
struct MvccChecksum {
safe_point: u64,
digest: crc32fast::Hasher,
current_item: Vec<u8>,
committed_txns: Vec<u64>,
committed_txns_sorted: bool,
}
impl MvccChecksum {
fn new(safe_point: u64) -> Self {
Self {
safe_point,
digest: crc32fast::Hasher::new(),
current_item: vec![],
committed_txns: vec![],
committed_txns_sorted: false,
}
}
}
impl MvccInfoObserver for MvccChecksum {
type Target = ();
fn on_new_item(&mut self, key: &[u8]) {
self.current_item = key.to_vec();
}
fn emit(&mut self) -> Self::Target {
self.current_item.clear();
self.committed_txns.clear();
}
fn on_write(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, commit_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
let commit_ts = commit_ts.into_inner();
if commit_ts <= self.safe_point {
// Skip stale records.
return Ok(true);
}
let write = box_try!(WriteRef::parse(&value));
let start_ts = write.start_ts.into_inner();
self.digest.update(key);
self.digest.update(value);
self.committed_txns.push(start_ts);
Ok(true)
}
fn on_lock(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let lock = box_try!(Lock::parse(value));
if lock.ts.into_inner() <= self.safe_point {
// Skip stale records.
return Ok(true);
}
self.digest.update(key);
self.digest.update(value);
Ok(true)
}
fn on_default(&mut self, key: &[u8], value: &[u8]) -> Result<bool> {
let (prefix, start_ts) = box_try!(Key::split_on_ts_for(key));
if prefix != AsRef::<[u8]>::as_ref(&self.current_item) {
return Ok(false);
}
if !self.committed_txns_sorted {
self.committed_txns.sort();
self.committed_txns_sorted = true;
}
let start_ts = start_ts.into_inner();
if start_ts > self.safe_point && self.committed_txns.binary_search(&start_ts).is_ok() {
self.digest.update(key);
self.digest.update(value);
}
Ok(true)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::kv::TestEngineBuilder;
use crate::storage::mvcc::tests::{must_prewrite_delete, must_prewrite_put, must_rollback};
use crate::storage::txn::tests::must_commit;
use engine_rocks::RocksEngine;
#[test]
fn test_update_context() {
let safe_point = Arc::new(AtomicU64::new((123 << PHYSICAL_SHIFT_BITS) * 1000));
let observer = Mvcc::<RocksEngine>::new(safe_point);
let mut context = Vec::new();
assert!(observer.update_context(&mut context));
assert_eq!(context.len(), 9);
assert_eq!(context[0], ConsistencyCheckMethod::Mvcc as u8);
let safe_point = u64::from_le_bytes(context[1..9].try_into().unwrap());
assert_eq!(safe_point, (243 << PHYSICAL_SHIFT_BITS) * 1000);
}
#[test]
fn test_mvcc_checksum() {
let engine = TestEngineBuilder::new().build().unwrap();
must_prewrite_put(&engine, b"zAAAAA", b"value", b"PRIMARY", 100);
must_commit(&engine, b"zAAAAA", 100, 101);
must_prewrite_put(&engine, b"zCCCCC", b"value", b"PRIMARY", 110);
must_commit(&engine, b"zCCCCC", 110, 111);
must_prewrite_put(&engine, b"zBBBBB", b"value", b"PRIMARY", 200);
must_commit(&engine, b"zBBBBB", 200, 201);
must_prewrite_put(&engine, b"zDDDDD", b"value", b"PRIMARY", 200);
must_rollback(&engine, b"zDDDDD", 200);
must_prewrite_put(&engine, b"zFFFFF", b"value", b"PRIMARY", 200);
must_prewrite_delete(&engine, b"zGGGGG", b"PRIMARY", 200);
let mut checksums = Vec::with_capacity(3);
for &safe_point in &[150, 160, 100] {
let raw = engine.get_rocksdb();
let mut scanner = MvccInfoScanner::new(
|cf, opts| raw.iterator_cf_opt(cf, opts).map_err(|e| box_err!(e)),
Some(&keys::data_key(b"")),
Some(&keys::data_end_key(b"")),
MvccChecksum::new(safe_point),
)
.unwrap();
while scanner.next_item().unwrap().is_some() {}
let digest = scanner.observer.digest;
checksums.push(digest.finalize());
}
assert_eq!(checksums[0], checksums[1]);
assert_ne!(checksums[0], checksums[2]);
}
#[test]
fn test_mvcc_info_collector() {
use crate::storage::mvcc::Write;
use engine_rocks::raw::{ColumnFamilyOptions, DBOptions};
use engine_rocks::raw_util::CFOptions;
use engine_traits::SyncMutable;
use txn_types::TimeStamp;
let tmp = tempfile::Builder::new()
.prefix("test_debug")
.tempdir()
.unwrap();
let path = tmp.path().to_str().unwrap();
let engine = Arc::new(
engine_rocks::raw_util::new_engine_opt(
path,
DBOptions::new(),
vec![
CFOptions::new(CF_DEFAULT, ColumnFamilyOptions::new()),
CFOptions::new(CF_WRITE, ColumnFamilyOptions::new()),
CFOptions::new(CF_LOCK, ColumnFamilyOptions::new()),
CFOptions::new(CF_RAFT, ColumnFamilyOptions::new()),
],
)
.unwrap(),
);
let engine = RocksEngine::from_db(engine);
let cf_default_data = vec![
(b"k1", b"v", 5.into()),
(b"k2", b"x", 10.into()),
(b"k3", b"y", 15.into()),
];
for &(prefix, value, ts) in &cf_default_data {
let encoded_key = Key::from_raw(prefix).append_ts(ts);
let key = keys::data_key(encoded_key.as_encoded().as_slice());
engine.put(key.as_slice(), value).unwrap();
}
let cf_lock_data = vec![
(b"k1", LockType::Put, b"v", 5.into()),
(b"k4", LockType::Lock, b"x", 10.into()),
(b"k5", LockType::Delete, b"y", 15.into()),
];
for &(prefix, tp, value, version) in &cf_lock_data {
let encoded_key = Key::from_raw(prefix);
let key = keys::data_key(encoded_key.as_encoded().as_slice());
let lock = Lock::new(
tp,
value.to_vec(),
version,
0,
None,
TimeStamp::zero(),
0,
TimeStamp::zero(),
);
let value = lock.to_bytes();
engine
.put_cf(CF_LOCK, key.as_slice(), value.as_slice())
.unwrap();
}
let cf_write_data = vec![
(b"k2", WriteType::Put, 5.into(), 10.into()),
(b"k3", WriteType::Put, 15.into(), 20.into()),
(b"k6", WriteType::Lock, 25.into(), 30.into()),
(b"k7", WriteType::Rollback, 35.into(), 40.into()),
];
for &(prefix, tp, start_ts, commit_ts) in &cf_write_data {
let encoded_key = Key::from_raw(prefix).append_ts(commit_ts);
let key = keys::data_key(encoded_key.as_encoded().as_slice());
let write = Write::new(tp, start_ts, None);
let value = write.as_ref().to_bytes();
engine
.put_cf(CF_WRITE, key.as_slice(), value.as_slice())
.unwrap();
}
let scan_mvcc = |start: &[u8], end: &[u8], limit: u64| {
MvccInfoIterator::new(
|cf, opts| engine.iterator_cf_opt(cf, opts).map_err(|e| box_err!(e)),
if start.is_empty() { None } else { Some(start) },
if end.is_empty() { None } else { Some(end) },
limit as usize,
)
.unwrap()
};
let mut count = 0;
for key_and_mvcc in scan_mvcc(b"z", &[], 30) {
assert!(key_and_mvcc.is_ok());
count += 1;
}
assert_eq!(count, 7);
}
}
| 34.65669 | 96 | 0.561443 |
265ad53692d69fee0de2f648ac94ef4c697e1b1d | 8,019 | use crate::Analyzer;
use anyhow::Result;
use common::filters::{bandpass_filter, convolve, cutoff_from_frequency};
use common::synth::quantize_samples;
use ndarray::prelude::*;
use ndarray::{ScalarOperand, Slice};
use num::{traits::FloatConst, Float, NumCast, One};
use std::error;
use std::fmt;
type Peak = i16;
#[derive(Debug)]
enum BpmDetectionError {
NoMatches(),
}
impl fmt::Display for BpmDetectionError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::NoMatches() => {
write!(f, "did not find sufficient matches")
}
}
}
}
impl error::Error for BpmDetectionError {}
// todo: replace with proto
#[derive(Debug)]
pub struct BpmDetectionAnalyzerConfig {
pub lowpass_cutoff_freq_hz: f64,
pub highpass_cutoff_freq_hz: f64,
pub band: f64,
pub sample_rate: u32,
pub nchannels: u16,
}
impl Default for BpmDetectionAnalyzerConfig {
fn default() -> Self {
Self {
lowpass_cutoff_freq_hz: 150.0,
highpass_cutoff_freq_hz: 100.0,
band: 0.01,
nchannels: 2,
sample_rate: 44100,
}
}
}
#[derive(Debug)]
pub struct BpmDetectionAnalyzer {
pub config: BpmDetectionAnalyzerConfig,
pub chunk_size: usize,
pub buffer_window_size: usize,
buffer: Array1<f64>,
}
struct PeakIntervalGroup {
pub tempo: f32,
pub count: usize,
}
impl BpmDetectionAnalyzer {
pub fn new(config: BpmDetectionAnalyzerConfig) -> Result<Self> {
let sample_rate: f32 = NumCast::from(config.sample_rate).unwrap();
// the larger the total window the more precise the bpm
let chunk_size: usize = NumCast::from(sample_rate * 0.5).unwrap();
let buffer_window_size: usize = NumCast::from(sample_rate * 10.0).unwrap();
Ok(Self {
config,
chunk_size,
buffer_window_size,
buffer: Array1::zeros(0),
})
}
fn find_intervals(&self, peaks: Vec<(usize, Peak)>) -> Vec<PeakIntervalGroup> {
// What we now do is get all of our peaks, and then measure the distance to
// other peaks, to create intervals. Then based on the distance between
// those peaks (the distance of the intervals) we can calculate the BPM of
// that particular interval.
// The interval that is seen the most should have the BPM that corresponds
// to the track itself.
let mut groups: Vec<PeakIntervalGroup> = Vec::new();
for (idx, _) in peaks.iter().enumerate() {
for i in (idx + 1)..(peaks.len().min(idx + 10)) {
let minute: f32 = NumCast::from(60 * self.config.sample_rate).unwrap();
let distance: f32 = NumCast::from(peaks[i].0 - peaks[idx].0).unwrap();
let tempo = minute / distance;
let mut group = PeakIntervalGroup { tempo, count: 1 };
while group.tempo < 90.0 {
group.tempo *= 2.0;
}
while group.tempo > 180.0 {
group.tempo /= 2.0;
}
group.tempo = group.tempo.round();
for other_group in groups.iter_mut() {
if (other_group.tempo - group.tempo).abs() < f32::EPSILON {
other_group.count += 1;
}
}
if !groups.iter().any(|other_group: &PeakIntervalGroup| {
(other_group.tempo - group.tempo).abs() < f32::EPSILON
}) {
groups.push(group);
}
}
}
groups
}
fn detect_peaks(&self, samples: Array1<Peak>) -> Vec<(usize, Peak)> {
let part_size = self.chunk_size;
let parts: f32 = samples.len() as f32 / part_size as f32;
let parts: usize = NumCast::from(parts).unwrap();
let mut peaks: Vec<(usize, Peak)> = Vec::new();
for part in 0..parts {
let chunk = samples.slice_axis(
Axis(0),
Slice::from(part * part_size..(part + 1) * part_size),
);
let mut max: Option<(usize, &Peak)> = None;
for new in chunk.indexed_iter() {
max = match max {
Some(old) => {
if old.1 > new.1 {
Some(old)
} else {
Some(new)
}
}
None => Some(new),
};
}
if let Some((idx, peak)) = max {
peaks.push((part * part_size + idx, *peak));
}
}
peaks.sort_by(|a, b| b.1.cmp(&a.1));
// println!("peaks sorted by volume: {:?}", peaks);
// ...take the loundest half of those...
let center: f32 = NumCast::from(peaks.len()).unwrap();
let center = center * 0.5;
let center: usize = NumCast::from(center).unwrap();
let loudest_peaks = &peaks[..center];
let mut loudest_peaks = loudest_peaks.to_vec();
// ...and re-sort it back based on position.
loudest_peaks.sort_by(|a, b| a.0.cmp(&b.0));
// println!("peaks sorted back by position: {:?}", loudest_peaks);
loudest_peaks
}
}
impl<T> Analyzer<Array2<T>> for BpmDetectionAnalyzer
where
T: Float + FloatConst + One + Send + Sync + Default + std::fmt::Debug + ScalarOperand,
{
fn window_size(&self) -> usize {
self.buffer_window_size
}
fn descriptor(&self) -> proto::grpc::AudioAnalyzerDescriptor {
proto::grpc::AudioAnalyzerDescriptor {
name: "BpmDetectionAnalyzer".to_string(),
input: None,
}
}
fn analyze_samples(
&mut self,
samples: Array2<T>,
) -> Result<proto::audio::analysis::AudioAnalysisResult> {
let samples = samples.mapv(|v| {
let v: f64 = NumCast::from(v.abs()).unwrap();
v
});
// combine channels and choose the maximum
let samples = samples.map_axis(Axis(1), |row| row.iter().fold(0f64, |acc, v| acc.max(*v)));
// let start = Instant::now();
let filter = bandpass_filter(
cutoff_from_frequency(
self.config.highpass_cutoff_freq_hz,
NumCast::from(self.config.sample_rate).unwrap(),
),
cutoff_from_frequency(
self.config.lowpass_cutoff_freq_hz,
NumCast::from(self.config.sample_rate).unwrap(),
),
self.config.band,
);
let filtered_samples = Array::from_iter(quantize_samples::<Peak>(&convolve(
&filter,
samples.as_slice().unwrap(),
)));
// println!("filtered: {:?}", filtered_samples);
let peaks = self.detect_peaks(filtered_samples);
// println!("found {:?} peaks", peaks.len());
// println!("found {:?} peaks: {:?}", peaks.len(), peaks);
let mut intervals = self.find_intervals(peaks);
// println!("found {:?} intervals", intervals.len());
intervals.sort_by(|a, b| b.count.cmp(&a.count));
// let duration = start.elapsed();
// println!("computing bpm took: {:?}", duration);
if intervals.len() > 0 {
let top_guess = &intervals[0];
println!("guessed {} BPM", top_guess.tempo);
let result = proto::audio::analysis::BpmDetectionAudioAnalysisResult {
bpm: top_guess.tempo,
};
return Ok(proto::audio::analysis::AudioAnalysisResult {
seq_num: 0,
window_size: NumCast::from(self.buffer_window_size).unwrap(),
result: Some(proto::audio::analysis::audio_analysis_result::Result::Bpm(
result,
)),
});
}
return Err(BpmDetectionError::NoMatches().into());
}
}
| 34.123404 | 99 | 0.539593 |
3993de1486c52c0608bccb3b6635acb7efc6f566 | 20,037 | use std::error::Error;
use std::io;
use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, RwLock};
use super::events::{Event, Events};
use crate::network::{NetworkInfo, PacketInfo, PacketType};
use termion::{event::Key, raw::IntoRawMode};
use tui::{
backend::TermionBackend,
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
text::{Span, Spans},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph},
Terminal,
};
#[allow(unused_imports)]
use pnet::packet::{
arp::ArpPacket, icmp::IcmpPacket, icmpv6::Icmpv6Packet, tcp::TcpPacket, udp::UdpPacket,
};
/// Main function which renders UI on the terminal
pub fn draw_ui(
net_info: Arc<RwLock<NetworkInfo>>,
running: Arc<AtomicBool>,
) -> Result<(), Box<dyn Error>> {
let stdout = io::stdout().into_raw_mode()?;
let backend = TermionBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let events = Events::new();
let mut packets_state_selected = true;
let mut packets_state = ListState::default();
let mut packets_info_state = ListState::default();
let mut packets_info_len: usize = 0;
while running.load(Ordering::Relaxed) {
terminal.draw(|f| {
// Setting the layout of the UI
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints(
[
Constraint::Percentage(80),
Constraint::Percentage(15),
Constraint::Percentage(5),
]
.as_ref(),
)
.split(f.size());
// Header for packet capture view
let header = Spans::from(Span::styled(
get_packets_ui_header(),
Style::default().fg(Color::Black).bg(Color::White),
));
// Getting info about packets captured
let items: Vec<ListItem> = net_info
.read()
.unwrap()
.packets
.iter()
.enumerate()
.map(|(current_num, p)| {
let ptype = get_packet_info(p, current_num + 1);
ListItem::new(Spans::from(ptype))
.style(Style::default().fg(Color::White).bg(Color::Black))
})
.collect();
let items = List::new(items)
.block(
Block::default()
.title(header)
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black)),
)
.highlight_style(Style::default().bg(Color::Red).add_modifier(Modifier::BOLD));
// Rendering the packets that are captured
f.render_stateful_widget(items, chunks[0], &mut packets_state);
// Rendering logic for displaying packet information in the bottom window pane
if let Some(i) = packets_state.selected() {
if i < net_info.read().unwrap().packets.len() {
let items: Vec<ListItem> =
get_packet_description(&net_info.read().unwrap().packets[i])
.iter()
.map(|field| {
let field_val = field.to_string();
ListItem::new(Spans::from(field_val))
.style(Style::default().fg(Color::White).bg(Color::Black))
})
.collect();
packets_info_len = items.len();
let items = List::new(items)
.block(
Block::default()
.title("Packet Information")
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black)),
)
.highlight_style(
Style::default().bg(Color::Red).add_modifier(Modifier::BOLD),
);
f.render_stateful_widget(items, chunks[1], &mut packets_info_state);
}
} else {
let items = List::new(vec![])
.block(
Block::default()
.title("Packet Information")
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black)),
)
.highlight_style(Style::default().bg(Color::Red).add_modifier(Modifier::BOLD));
f.render_stateful_widget(items, chunks[1], &mut packets_info_state);
}
// Footer info rendering
let footer = vec![Spans::from(vec![
Span::raw(format!(
"Captured Packets: {} ",
net_info.read().unwrap().captured_packets
)),
Span::raw(format!(
"Dropped Packets: {} ",
net_info.read().unwrap().dropped_packets
)),
])];
let footer_para = Paragraph::new(footer)
.block(Block::default())
.style(Style::default().fg(Color::White).bg(Color::Black))
.alignment(Alignment::Left);
f.render_widget(footer_para, chunks[2]);
})?;
// Capture events from the keyboard
match events.next()? {
Event::Input(input) => match input {
Key::Char('q') => {
terminal.clear()?;
running.store(false, Ordering::SeqCst);
}
Key::Left | Key::Esc => {
packets_state.select(None);
}
Key::Down | Key::Char('j') => {
if packets_state_selected {
let i = match packets_state.selected() {
Some(i) => {
if i >= net_info.read().unwrap().packets.len() {
0
} else {
i + 1
}
}
None => 0,
};
packets_state.select(Some(i));
} else {
let i = match packets_info_state.selected() {
Some(i) => {
if i >= packets_info_len {
0
} else {
i + 1
}
}
None => 0,
};
packets_info_state.select(Some(i));
}
}
Key::Up | Key::Char('k') => {
if packets_state_selected {
let i = match packets_state.selected() {
Some(i) => {
if i == 0 {
net_info.read().unwrap().packets.len().saturating_sub(1)
} else {
i - 1
}
}
None => 0,
};
packets_state.select(Some(i));
} else {
let i = match packets_info_state.selected() {
Some(i) => {
if i == 0 {
packets_info_len.saturating_sub(1)
} else {
i - 1
}
}
None => 0,
};
packets_info_state.select(Some(i));
}
}
Key::Char('g') => {
if packets_state_selected {
packets_state.select(Some(0));
} else {
packets_info_state.select(Some(0));
}
}
Key::Char('G') => {
if packets_state_selected {
packets_state.select(Some(
net_info.read().unwrap().packets.len().saturating_sub(1),
));
} else {
packets_info_state.select(Some(packets_info_len.saturating_sub(1)));
}
}
Key::Char('\t') | Key::Char('J') => {
packets_state_selected = !packets_state_selected;
}
_ => {}
},
Event::Tick => {}
}
}
Ok(())
}
/// Get header of packet capture UI
fn get_packets_ui_header() -> String {
format!(
"{:<10} {:<40} {:<40} {:<10} {:<6} {:<20}",
"Num", "Source", "Destination", "Protocol", "Length", "Info"
)
}
/// Get brief packet info
fn get_packet_info(packet: &PacketInfo, current_num: usize) -> String {
match packet.packet_type {
PacketType::TCP => {
let raw_packet = packet.packet_data.packet();
let payload = packet.packet_data.payload();
let source_ip = if let Some(ip) = packet.source_ip {
ip.to_string()
} else {
"NA".to_string()
};
let dest_ip = if let Some(ip) = packet.dest_ip {
ip.to_string()
} else {
"NA".to_string()
};
let tcp = TcpPacket::new(raw_packet);
if let Some(tcp) = tcp {
format!(
"{:<10} {:<40} {:<40} {:<10} {:<6} {:<6} -> {:<6}",
current_num,
source_ip,
dest_ip,
"TCP",
payload.to_vec().len(),
tcp.get_source(),
tcp.get_destination()
)
} else {
"TCP packet malformed".to_string()
}
}
PacketType::UDP => {
let raw_packet = packet.packet_data.packet();
let payload = packet.packet_data.payload();
let source_ip = if let Some(ip) = packet.source_ip {
ip.to_string()
} else {
"NA".to_string()
};
let dest_ip = if let Some(ip) = packet.dest_ip {
ip.to_string()
} else {
"NA".to_string()
};
let udp = UdpPacket::new(raw_packet);
if let Some(udp) = udp {
format!(
"{:<10} {:<40} {:<40} {:<10} {:<6} {:<6} -> {:<6}",
current_num,
source_ip,
dest_ip,
"UDP",
payload.to_vec().len(),
udp.get_source(),
udp.get_destination()
)
} else {
"UDP packet malformed".to_string()
}
}
PacketType::ARP => {
let raw_packet = packet.packet_data.packet();
let payload = packet.packet_data.payload();
let arp = ArpPacket::new(raw_packet);
if let Some(arp) = arp {
format!(
"{:<10} {:<40} {:<40} {:<10} {:<6} {:?}",
current_num,
arp.get_sender_hw_addr(),
arp.get_target_hw_addr(),
"ARP",
payload.to_vec().len(),
arp.get_operation()
)
} else {
"ARP malformed".to_string()
}
}
PacketType::ICMP => {
let raw_packet = packet.packet_data.packet();
let payload = packet.packet_data.payload();
let source_ip = if let Some(ip) = packet.source_ip {
ip.to_string()
} else {
"NA".to_string()
};
let dest_ip = if let Some(ip) = packet.dest_ip {
ip.to_string()
} else {
"NA".to_string()
};
let icmp = IcmpPacket::new(raw_packet);
// TODO: Improve print information based on ICMP Type
if let Some(icmp) = icmp {
format!(
"{:<10} {:<40} {:<40} {:<10} {:<6} {:?}",
current_num,
source_ip,
dest_ip,
"ICMP",
payload.to_vec().len(),
icmp.get_icmp_code()
)
} else {
"ICMP packet malformed".to_string()
}
}
// TODO: Print information for ICMP
PacketType::ICMPv6 => {
let raw_packet = packet.packet_data.packet();
let payload = packet.packet_data.payload();
let source_ip = if let Some(ip) = packet.source_ip {
ip.to_string()
} else {
"NA".to_string()
};
let dest_ip = if let Some(ip) = packet.dest_ip {
ip.to_string()
} else {
"NA".to_string()
};
let icmpv6 = Icmpv6Packet::new(raw_packet);
// TODO: Improve print information based on ICMP Type
if let Some(icmpv6) = icmpv6 {
format!(
"{:<10} {:<40} {:<40} {:<10} {:<6} {:?}",
current_num,
source_ip,
dest_ip,
"ICMPv6",
payload.to_vec().len(),
icmpv6.get_icmpv6_code()
)
} else {
"ICMPv6 packet malformed".to_string()
}
}
}
}
/// Get detailed packet description
fn get_packet_description(packet: &PacketInfo) -> Vec<String> {
let mut pkt_desc: Vec<String> = vec![];
match packet.packet_type {
PacketType::TCP => {
let raw_packet = packet.packet_data.packet();
// let payload = packet.packet_data.payload().to_ascii_lowercase();
if let Some(ip) = packet.source_ip {
pkt_desc.push(format!("Source IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Source IP: {}", "NA".to_string()));
}
if let Some(ip) = packet.dest_ip {
pkt_desc.push(format!("Destination IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Destination IP: {}", "NA".to_string()));
}
let tcp = TcpPacket::new(raw_packet);
if let Some(tcp) = tcp {
pkt_desc.push(format!("Source Port: {}", tcp.get_source()));
pkt_desc.push(format!("Destination Port: {}", tcp.get_destination()));
pkt_desc.push(format!("Sequence Number: {}", tcp.get_sequence()));
pkt_desc.push(format!(
"Acknowledgement Number: {}",
tcp.get_acknowledgement()
));
pkt_desc.push(format!("Flags: {:b}", tcp.get_flags()));
pkt_desc.push(format!("Window: {}", tcp.get_window()));
}
}
PacketType::UDP => {
let raw_packet = packet.packet_data.packet();
// let payload = packet.packet_data.payload();
if let Some(ip) = packet.source_ip {
pkt_desc.push(format!("Source IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Source IP: {}", "NA".to_string()));
}
if let Some(ip) = packet.dest_ip {
pkt_desc.push(format!("Destination IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Destination IP: {}", "NA".to_string()));
}
let udp = UdpPacket::new(raw_packet);
if let Some(udp) = udp {
pkt_desc.push(format!("Source Port: {}", udp.get_source()));
pkt_desc.push(format!("Destination Port: {}", udp.get_destination()));
}
}
PacketType::ARP => {
let raw_packet = packet.packet_data.packet();
// let payload = packet.packet_data.payload();
let arp = ArpPacket::new(raw_packet);
if let Some(arp) = arp {
pkt_desc.push(format!("Hardware Type: {:?}", arp.get_hardware_type()));
pkt_desc.push(format!("Protocol Type: {:?}", arp.get_protocol_type()));
// TODO: Elaborate on the ARP option
pkt_desc.push(format!("Operation: {:?}", arp.get_operation()));
pkt_desc.push(format!(
"Sender Hardware Address: {}",
arp.get_sender_hw_addr()
));
pkt_desc.push(format!(
"Target Hardware Address: {}",
arp.get_target_hw_addr()
));
pkt_desc.push(format!(
"Sender IP Address: {}",
arp.get_sender_proto_addr()
));
pkt_desc.push(format!(
"Target IP Address: {}",
arp.get_target_proto_addr()
));
}
}
PacketType::ICMP => {
let raw_packet = packet.packet_data.packet();
// let payload = packet.packet_data.payload();
if let Some(ip) = packet.source_ip {
pkt_desc.push(format!("Source IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Source IP: {}", "NA".to_string()));
}
if let Some(ip) = packet.dest_ip {
pkt_desc.push(format!("Destination IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Destination IP: {}", "NA".to_string()));
}
let icmp = IcmpPacket::new(raw_packet);
// TODO: Expand description based on ICMP type
if let Some(icmp) = icmp {
pkt_desc.push(format!("ICMP Type: {:?}", icmp.get_icmp_type()));
pkt_desc.push(format!("ICMP Code: {:?}", icmp.get_icmp_code()));
}
}
// TODO: Packet description for ICMPv6 packets
PacketType::ICMPv6 => {
let raw_packet = packet.packet_data.packet();
// let payload = packet.packet_data.payload();
if let Some(ip) = packet.source_ip {
pkt_desc.push(format!("Source IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Source IP: {}", "NA".to_string()));
}
if let Some(ip) = packet.dest_ip {
pkt_desc.push(format!("Destination IP: {}", ip.to_string()));
} else {
pkt_desc.push(format!("Destination IP: {}", "NA".to_string()));
}
let icmpv6 = Icmpv6Packet::new(raw_packet);
// TODO: Expand description based on ICMP type
if let Some(icmpv6) = icmpv6 {
pkt_desc.push(format!("ICMPv6 Type: {:?}", icmpv6.get_icmpv6_type()));
pkt_desc.push(format!("ICMPv6 Code: {:?}", icmpv6.get_icmpv6_code()));
}
}
};
pkt_desc
}
| 36.968635 | 99 | 0.42177 |
ed5979eb11dae8b29a6e8a440248d29a4fd155d0 | 38,482 | //!
//! Asynchronous serial communication using UART/USART peripherals
//!
//! # Word length
//!
//! By default, the UART/USART uses 8 data bits. The `Serial`, `Rx`, and `Tx` structs implement
//! the embedded-hal read and write traits with `u8` as the word type.
//!
//! You can also configure the hardware to use 9 data bits with the `Config` `wordlength_9()`
//! function. After creating a `Serial` with this option, use the `with_u16_data()` function to
//! convert the `Serial<_, _, u8>` object into a `Serial<_, _, u16>` that can send and receive
//! `u16`s.
//!
//! In this mode, the `Serial<_, _, u16>`, `Rx<_, u16>`, and `Tx<_, u16>` structs instead implement
//! the embedded-hal read and write traits with `u16` as the word type. You can use these
//! implementations for 9-bit words.
//!
use core::fmt;
use core::marker::PhantomData;
use crate::rcc;
use embedded_hal::blocking;
use embedded_hal::prelude::*;
use embedded_hal::serial;
use nb::block;
use crate::gpio::{Const, SetAlternate};
#[cfg(feature = "gpiod")]
use crate::gpio::gpiod;
#[allow(unused)]
#[cfg(feature = "gpioe")]
use crate::gpio::gpioe;
#[allow(unused)]
#[cfg(feature = "gpiof")]
use crate::gpio::gpiof;
#[allow(unused)]
#[cfg(feature = "gpiog")]
use crate::gpio::gpiog;
use crate::gpio::{gpioa, gpiob, gpioc};
use crate::pac::{RCC, USART1, USART2, USART6};
#[cfg(feature = "usart3")]
use crate::pac::USART3;
#[cfg(feature = "uart10")]
use crate::pac::UART10;
#[cfg(feature = "uart4")]
use crate::pac::UART4;
#[cfg(feature = "uart5")]
use crate::pac::UART5;
#[cfg(feature = "uart7")]
use crate::pac::UART7;
#[cfg(feature = "uart8")]
use crate::pac::UART8;
#[cfg(feature = "uart9")]
use crate::pac::UART9;
use crate::gpio::NoPin;
use crate::rcc::Clocks;
use crate::dma::traits::PeriAddress;
/// Serial error
#[non_exhaustive]
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum Error {
/// Framing error
Framing,
/// Noise error
Noise,
/// RX buffer overrun
Overrun,
/// Parity check error
Parity,
}
/// Interrupt event
pub enum Event {
/// New data has been received
Rxne,
/// New data can be sent
Txe,
/// Idle line state detected
Idle,
}
pub mod config {
use crate::time::Bps;
use crate::time::U32Ext;
pub enum WordLength {
DataBits8,
DataBits9,
}
pub enum Parity {
ParityNone,
ParityEven,
ParityOdd,
}
pub enum StopBits {
#[doc = "1 stop bit"]
STOP1,
#[doc = "0.5 stop bits"]
STOP0P5,
#[doc = "2 stop bits"]
STOP2,
#[doc = "1.5 stop bits"]
STOP1P5,
}
pub enum DmaConfig {
None,
Tx,
Rx,
TxRx,
}
pub struct Config {
pub baudrate: Bps,
pub wordlength: WordLength,
pub parity: Parity,
pub stopbits: StopBits,
pub dma: DmaConfig,
}
impl Config {
pub fn baudrate(mut self, baudrate: Bps) -> Self {
self.baudrate = baudrate;
self
}
pub fn parity_none(mut self) -> Self {
self.parity = Parity::ParityNone;
self
}
pub fn parity_even(mut self) -> Self {
self.parity = Parity::ParityEven;
self
}
pub fn parity_odd(mut self) -> Self {
self.parity = Parity::ParityOdd;
self
}
pub fn wordlength_8(mut self) -> Self {
self.wordlength = WordLength::DataBits8;
self
}
pub fn wordlength_9(mut self) -> Self {
self.wordlength = WordLength::DataBits9;
self
}
pub fn stopbits(mut self, stopbits: StopBits) -> Self {
self.stopbits = stopbits;
self
}
}
#[derive(Debug)]
pub struct InvalidConfig;
impl Default for Config {
fn default() -> Config {
let baudrate = 115_200_u32.bps();
Config {
baudrate,
wordlength: WordLength::DataBits8,
parity: Parity::ParityNone,
stopbits: StopBits::STOP1,
dma: DmaConfig::None,
}
}
}
}
pub trait Pins<USART> {}
pub trait PinTx<USART> {
type A;
}
pub trait PinRx<USART> {
type A;
}
impl<USART, TX, RX> Pins<USART> for (TX, RX)
where
TX: PinTx<USART>,
RX: PinRx<USART>,
{
}
/// A filler type for when the Tx pin is unnecessary
pub type NoTx = NoPin;
/// A filler type for when the Rx pin is unnecessary
pub type NoRx = NoPin;
impl<USART> PinTx<USART> for NoPin
where
USART: Instance,
{
type A = Const<0>;
}
impl<USART> PinRx<USART> for NoPin
where
USART: Instance,
{
type A = Const<0>;
}
macro_rules! pin {
($trait:ident<$USART:ident> for $gpio:ident::$PX:ident<$A:literal>) => {
impl<MODE> $trait<$USART> for $gpio::$PX<MODE> {
type A = Const<$A>;
}
};
}
pin!(PinTx<USART1> for gpioa::PA9<7>);
pin!(PinRx<USART1> for gpioa::PA10<7>);
#[cfg(any(
feature = "stm32f410",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f423"
))]
pin!(PinTx<USART1> for gpioa::PA15<7>);
#[cfg(any(
feature = "stm32f410",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f423"
))]
pin!(PinRx<USART1> for gpiob::PB3<7>);
pin!(PinTx<USART1> for gpiob::PB6<7>);
pin!(PinRx<USART1> for gpiob::PB7<7>);
pin!(PinTx<USART2> for gpioa::PA2<7>);
pin!(PinRx<USART2> for gpioa::PA3<7>);
#[cfg(any(
feature = "stm32f401",
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinTx<USART2> for gpiod::PD5<7>);
#[cfg(any(
feature = "stm32f401",
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinRx<USART2> for gpiod::PD6<7>);
#[cfg(feature = "usart3")]
pin!(PinTx<USART3> for gpiob::PB10<7>);
#[cfg(feature = "usart3")]
pin!(PinRx<USART3> for gpiob::PB11<7>);
#[cfg(any(
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f423",
feature = "stm32f446"
))]
pin!(PinRx<USART3> for gpioc::PC5<7>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinTx<USART3> for gpioc::PC10<7>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinRx<USART3> for gpioc::PC11<7>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinTx<USART3> for gpiod::PD8<7>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinRx<USART3> for gpiod::PD9<7>);
#[cfg(feature = "uart4")]
pin!(PinTx<UART4> for gpioa::PA0<8>);
#[cfg(feature = "uart4")]
pin!(PinRx<UART4> for gpioa::PA1<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART4> for gpioa::PA12<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART4> for gpioa::PA11<11>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinTx<UART4> for gpioc::PC10<8>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinRx<UART4> for gpioc::PC11<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART4> for gpiod::PD1<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART4> for gpiod::PD0<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART4> for gpiod::PD10<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART4> for gpioc::PC11<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART5> for gpiob::PB6<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART5> for gpiob::PB5<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART5> for gpiob::PB9<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART5> for gpiob::PB8<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART5> for gpiob::PB13<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART5> for gpiob::PB12<11>);
#[cfg(feature = "uart5")]
pin!(PinTx<UART5> for gpioc::PC12<8>);
#[cfg(feature = "uart5")]
pin!(PinRx<UART5> for gpiod::PD2<8>);
#[cfg(any(feature = "stm32f446"))]
pin!(PinTx<UART5> for gpioe::PE8<8>);
#[cfg(any(feature = "stm32f446"))]
pin!(PinRx<UART5> for gpioe::PE7<8>);
#[cfg(any(
feature = "stm32f401",
feature = "stm32f410",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f423"
))]
pin!(PinTx<USART6> for gpioa::PA11<8>);
#[cfg(any(
feature = "stm32f401",
feature = "stm32f410",
feature = "stm32f411",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f423"
))]
pin!(PinRx<USART6> for gpioa::PA12<8>);
pin!(PinTx<USART6> for gpioc::PC6<8>);
pin!(PinRx<USART6> for gpioc::PC7<8>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinTx<USART6> for gpiog::PG14<8>);
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f412",
feature = "stm32f413",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f423",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
pin!(PinRx<USART6> for gpiog::PG9<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART7> for gpioa::PA15<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART7> for gpioa::PA8<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART7> for gpiob::PB4<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART7> for gpiob::PB3<8>);
#[cfg(all(feature = "uart7", feature = "gpioe"))]
pin!(PinTx<UART7> for gpioe::PE8<8>);
#[cfg(all(feature = "uart7", feature = "gpioe"))]
pin!(PinRx<UART7> for gpioe::PE7<8>);
#[cfg(all(feature = "uart7", feature = "gpiof"))]
pin!(PinTx<UART7> for gpiof::PF7<8>);
#[cfg(all(feature = "uart7", feature = "gpiof"))]
pin!(PinRx<UART7> for gpiof::PF6<8>);
#[cfg(all(feature = "uart8", feature = "gpioe"))]
pin!(PinTx<UART8> for gpioe::PE1<8>);
#[cfg(all(feature = "uart8", feature = "gpioe"))]
pin!(PinRx<UART8> for gpioe::PE0<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART8> for gpiof::PF9<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART8> for gpiof::PF8<8>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART9> for gpiod::PD15<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART9> for gpiod::PD14<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART9> for gpiog::PG1<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART9> for gpiog::PG0<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART10> for gpioe::PE3<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART10> for gpioe::PE2<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinTx<UART10> for gpiog::PG12<11>);
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
pin!(PinRx<UART10> for gpiog::PG11<11>);
/// Serial abstraction
pub struct Serial<USART, PINS, WORD = u8> {
usart: USART,
pins: PINS,
tx: Tx<USART, WORD>,
rx: Rx<USART, WORD>,
}
/// Serial receiver
pub struct Rx<USART, WORD = u8> {
_usart: PhantomData<USART>,
_word: PhantomData<WORD>,
}
/// Serial transmitter
pub struct Tx<USART, WORD = u8> {
_usart: PhantomData<USART>,
_word: PhantomData<WORD>,
}
impl<USART, WORD> Rx<USART, WORD>
where
USART: Instance,
{
fn new() -> Self {
Self {
_usart: PhantomData,
_word: PhantomData,
}
}
/// Start listening for an rx not empty interrupt event
///
/// Note, you will also have to enable the corresponding interrupt
/// in the NVIC to start receiving events.
pub fn listen(&mut self) {
unsafe { (*USART::ptr()).cr1.modify(|_, w| w.rxneie().set_bit()) }
}
/// Stop listening for the rx not empty interrupt event
pub fn unlisten(&mut self) {
unsafe { (*USART::ptr()).cr1.modify(|_, w| w.rxneie().clear_bit()) }
}
/// Start listening for a line idle interrupt event
///
/// Note, you will also have to enable the corresponding interrupt
/// in the NVIC to start receiving events.
pub fn listen_idle(&mut self) {
unsafe { (*USART::ptr()).cr1.modify(|_, w| w.idleie().set_bit()) }
}
/// Stop listening for the line idle interrupt event
pub fn unlisten_idle(&mut self) {
unsafe { (*USART::ptr()).cr1.modify(|_, w| w.idleie().clear_bit()) }
}
/// Return true if the line idle status is set
pub fn is_idle(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().idle().bit_is_set() }
}
/// Return true if the rx register is not empty (and can be read)
pub fn is_rx_not_empty(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().rxne().bit_is_set() }
}
/// Clear idle line interrupt flag
pub fn clear_idle_interrupt(&self) {
unsafe {
let _ = (*USART::ptr()).sr.read();
let _ = (*USART::ptr()).dr.read();
}
}
}
impl<USART, WORD> Tx<USART, WORD>
where
USART: Instance,
{
fn new() -> Self {
Self {
_usart: PhantomData,
_word: PhantomData,
}
}
/// Start listening for a tx empty interrupt event
///
/// Note, you will also have to enable the corresponding interrupt
/// in the NVIC to start receiving events.
pub fn listen(&mut self) {
unsafe { (*USART::ptr()).cr1.modify(|_, w| w.txeie().set_bit()) }
}
/// Stop listening for the tx empty interrupt event
pub fn unlisten(&mut self) {
unsafe { (*USART::ptr()).cr1.modify(|_, w| w.txeie().clear_bit()) }
}
/// Return true if the tx register is empty (and can accept data)
pub fn is_tx_empty(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().txe().bit_is_set() }
}
}
impl<USART, PINS, WORD> AsRef<Tx<USART, WORD>> for Serial<USART, PINS, WORD> {
fn as_ref(&self) -> &Tx<USART, WORD> {
&self.tx
}
}
impl<USART, PINS, WORD> AsRef<Rx<USART, WORD>> for Serial<USART, PINS, WORD> {
fn as_ref(&self) -> &Rx<USART, WORD> {
&self.rx
}
}
impl<USART, PINS, WORD> AsMut<Tx<USART, WORD>> for Serial<USART, PINS, WORD> {
fn as_mut(&mut self) -> &mut Tx<USART, WORD> {
&mut self.tx
}
}
impl<USART, PINS, WORD> AsMut<Rx<USART, WORD>> for Serial<USART, PINS, WORD> {
fn as_mut(&mut self) -> &mut Rx<USART, WORD> {
&mut self.rx
}
}
impl<USART, TX, RX, WORD, const TXA: u8, const RXA: u8> Serial<USART, (TX, RX), WORD>
where
TX: PinTx<USART, A = Const<TXA>> + SetAlternate<TXA>,
RX: PinRx<USART, A = Const<RXA>> + SetAlternate<RXA>,
USART: Instance,
{
/*
StopBits::STOP0P5 and StopBits::STOP1P5 aren't supported when using UART
STOP_A::STOP1 and STOP_A::STOP2 will be used, respectively
*/
pub fn new(
usart: USART,
mut pins: (TX, RX),
config: config::Config,
clocks: Clocks,
) -> Result<Self, config::InvalidConfig> {
use self::config::*;
unsafe {
// NOTE(unsafe) this reference will only be used for atomic writes with no side effects.
let rcc = &(*RCC::ptr());
// Enable clock.
USART::enable(rcc);
USART::reset(rcc);
}
let pclk_freq = USART::get_frequency(&clocks).0;
let baud = config.baudrate.0;
// The frequency to calculate USARTDIV is this:
//
// (Taken from STM32F411xC/E Reference Manual,
// Section 19.3.4, Equation 1)
//
// 16 bit oversample: OVER8 = 0
// 8 bit oversample: OVER8 = 1
//
// USARTDIV = (pclk)
// ------------------------
// 8 x (2 - OVER8) x (baud)
//
// BUT, the USARTDIV has 4 "fractional" bits, which effectively
// means that we need to "correct" the equation as follows:
//
// USARTDIV = (pclk) * 16
// ------------------------
// 8 x (2 - OVER8) x (baud)
//
// When OVER8 is enabled, we can only use the lowest three
// fractional bits, so we'll need to shift those last four bits
// right one bit
// Calculate correct baudrate divisor on the fly
let (over8, div) = if (pclk_freq / 16) >= baud {
// We have the ability to oversample to 16 bits, take
// advantage of it.
//
// We also add `baud / 2` to the `pclk_freq` to ensure
// rounding of values to the closest scale, rather than the
// floored behavior of normal integer division.
let div = (pclk_freq + (baud / 2)) / baud;
(false, div)
} else if (pclk_freq / 8) >= baud {
// We are close enough to pclk where we can only
// oversample 8.
//
// See note above regarding `baud` and rounding.
let div = ((pclk_freq * 2) + (baud / 2)) / baud;
// Ensure the the fractional bits (only 3) are
// right-aligned.
let frac = div & 0xF;
let div = (div & !0xF) | (frac >> 1);
(true, div)
} else {
return Err(config::InvalidConfig);
};
unsafe { (*USART::ptr()).brr.write(|w| w.bits(div)) };
// Reset other registers to disable advanced USART features
unsafe { (*USART::ptr()).cr2.reset() };
unsafe { (*USART::ptr()).cr3.reset() };
// Enable transmission and receiving
// and configure frame
unsafe {
(*USART::ptr()).cr1.write(|w| {
w.ue()
.set_bit()
.over8()
.bit(over8)
.te()
.set_bit()
.re()
.set_bit()
.m()
.bit(match config.wordlength {
WordLength::DataBits8 => false,
WordLength::DataBits9 => true,
})
.pce()
.bit(!matches!(config.parity, Parity::ParityNone))
.ps()
.bit(matches!(config.parity, Parity::ParityOdd))
})
};
match config.dma {
DmaConfig::Tx => unsafe { (*USART::ptr()).cr3.write(|w| w.dmat().enabled()) },
DmaConfig::Rx => unsafe { (*USART::ptr()).cr3.write(|w| w.dmar().enabled()) },
DmaConfig::TxRx => unsafe {
(*USART::ptr())
.cr3
.write(|w| w.dmar().enabled().dmat().enabled())
},
DmaConfig::None => {}
}
pins.0.set_alt_mode();
pins.1.set_alt_mode();
Ok(Serial {
usart,
pins,
tx: Tx::new(),
rx: Rx::new(),
}
.config_stop(config))
}
pub fn release(mut self) -> (USART, (TX, RX)) {
self.pins.0.restore_mode();
self.pins.1.restore_mode();
(self.usart, self.pins)
}
}
impl<USART, TX, WORD, const TXA: u8> Serial<USART, (TX, NoPin), WORD>
where
TX: PinTx<USART, A = Const<TXA>> + SetAlternate<TXA>,
USART: Instance,
{
pub fn tx(
usart: USART,
tx_pin: TX,
config: config::Config,
clocks: Clocks,
) -> Result<Tx<USART, WORD>, config::InvalidConfig> {
Self::new(usart, (tx_pin, NoPin), config, clocks).map(|s| s.split().0)
}
}
impl<USART, RX, WORD, const RXA: u8> Serial<USART, (NoPin, RX), WORD>
where
RX: PinRx<USART, A = Const<RXA>> + SetAlternate<RXA>,
USART: Instance,
{
pub fn rx(
usart: USART,
rx_pin: RX,
config: config::Config,
clocks: Clocks,
) -> Result<Rx<USART, WORD>, config::InvalidConfig> {
Self::new(usart, (NoPin, rx_pin), config, clocks).map(|s| s.split().1)
}
}
impl<USART, PINS, WORD> Serial<USART, PINS, WORD>
where
USART: Instance,
{
/// Starts listening for an interrupt event
///
/// Note, you will also have to enable the corresponding interrupt
/// in the NVIC to start receiving events.
pub fn listen(&mut self, event: Event) {
match event {
Event::Rxne => unsafe { (*USART::ptr()).cr1.modify(|_, w| w.rxneie().set_bit()) },
Event::Txe => unsafe { (*USART::ptr()).cr1.modify(|_, w| w.txeie().set_bit()) },
Event::Idle => unsafe { (*USART::ptr()).cr1.modify(|_, w| w.idleie().set_bit()) },
}
}
/// Stop listening for an interrupt event
pub fn unlisten(&mut self, event: Event) {
match event {
Event::Rxne => unsafe { (*USART::ptr()).cr1.modify(|_, w| w.rxneie().clear_bit()) },
Event::Txe => unsafe { (*USART::ptr()).cr1.modify(|_, w| w.txeie().clear_bit()) },
Event::Idle => unsafe { (*USART::ptr()).cr1.modify(|_, w| w.idleie().clear_bit()) },
}
}
/// Return true if the line idle status is set
pub fn is_idle(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().idle().bit_is_set() }
}
/// Return true if the tx register is empty (and can accept data)
pub fn is_tx_empty(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().txe().bit_is_set() }
}
/// Return true if the tx register is empty (and can accept data)
#[deprecated(since = "0.10.0")]
pub fn is_txe(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().txe().bit_is_set() }
}
/// Return true if the rx register is not empty (and can be read)
pub fn is_rx_not_empty(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().rxne().bit_is_set() }
}
/// Return true if the rx register is not empty (and can be read)
#[deprecated(since = "0.10.0")]
pub fn is_rxne(&self) -> bool {
unsafe { (*USART::ptr()).sr.read().rxne().bit_is_set() }
}
/// Clear idle line interrupt flag
pub fn clear_idle_interrupt(&self) {
unsafe {
let _ = (*USART::ptr()).sr.read();
let _ = (*USART::ptr()).dr.read();
}
}
pub fn split(self) -> (Tx<USART, WORD>, Rx<USART, WORD>) {
(self.tx, self.rx)
}
}
impl<USART, PINS> Serial<USART, PINS, u8>
where
USART: Instance,
{
/// Converts this Serial into a version that can read and write `u16` values instead of `u8`s
///
/// This can be used with a word length of 9 bits.
pub fn with_u16_data(self) -> Serial<USART, PINS, u16> {
Serial {
usart: self.usart,
pins: self.pins,
tx: Tx::new(),
rx: Rx::new(),
}
}
}
impl<USART, PINS> Serial<USART, PINS, u16>
where
USART: Instance,
{
/// Converts this Serial into a version that can read and write `u8` values instead of `u16`s
///
/// This can be used with a word length of 8 bits.
pub fn with_u8_data(self) -> Serial<USART, PINS, u8> {
Serial {
usart: self.usart,
pins: self.pins,
tx: Tx::new(),
rx: Rx::new(),
}
}
}
impl<USART, PINS, WORD> serial::Read<WORD> for Serial<USART, PINS, WORD>
where
USART: Instance,
Rx<USART, WORD>: serial::Read<WORD, Error = Error>,
{
type Error = Error;
fn read(&mut self) -> nb::Result<WORD, Error> {
self.rx.read()
}
}
impl<USART> serial::Read<u8> for Rx<USART, u8>
where
USART: Instance,
{
type Error = Error;
fn read(&mut self) -> nb::Result<u8, Self::Error> {
// Delegate to the Read<u16> implementation, then truncate to 8 bits
Rx::<USART, u16>::new().read().map(|word16| word16 as u8)
}
}
/// Reads 9-bit words from the UART/USART
///
/// If the UART/USART was configured with `WordLength::DataBits9`, the returned value will contain
/// 9 received data bits and all other bits set to zero. Otherwise, the returned value will contain
/// 8 received data bits and all other bits set to zero.
impl<USART> serial::Read<u16> for Rx<USART, u16>
where
USART: Instance,
{
type Error = Error;
fn read(&mut self) -> nb::Result<u16, Error> {
// NOTE(unsafe) atomic read with no side effects
let sr = unsafe { (*USART::ptr()).sr.read() };
// Any error requires the dr to be read to clear
if sr.pe().bit_is_set()
|| sr.fe().bit_is_set()
|| sr.nf().bit_is_set()
|| sr.ore().bit_is_set()
{
unsafe { (*USART::ptr()).dr.read() };
}
Err(if sr.pe().bit_is_set() {
Error::Parity.into()
} else if sr.fe().bit_is_set() {
Error::Framing.into()
} else if sr.nf().bit_is_set() {
Error::Noise.into()
} else if sr.ore().bit_is_set() {
Error::Overrun.into()
} else if sr.rxne().bit_is_set() {
// NOTE(unsafe) atomic read from stateless register
return Ok(unsafe { &*USART::ptr() }.dr.read().dr().bits());
} else {
nb::Error::WouldBlock
})
}
}
unsafe impl<USART> PeriAddress for Rx<USART, u8>
where
USART: Instance,
{
#[inline(always)]
fn address(&self) -> u32 {
&(unsafe { &(*USART::ptr()) }.dr) as *const _ as u32
}
type MemSize = u8;
}
impl<USART, PINS, WORD> serial::Write<WORD> for Serial<USART, PINS, WORD>
where
USART: Instance,
Tx<USART, WORD>: serial::Write<WORD, Error = Error>,
{
type Error = Error;
fn flush(&mut self) -> nb::Result<(), Self::Error> {
self.tx.flush()
}
fn write(&mut self, byte: WORD) -> nb::Result<(), Self::Error> {
self.tx.write(byte)
}
}
unsafe impl<USART> PeriAddress for Tx<USART, u8>
where
USART: Instance,
{
#[inline(always)]
fn address(&self) -> u32 {
&(unsafe { &(*USART::ptr()) }.dr) as *const _ as u32
}
type MemSize = u8;
}
impl<USART> serial::Write<u8> for Tx<USART, u8>
where
USART: Instance,
{
type Error = Error;
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
// Delegate to u16 version
Tx::<USART, u16>::new().write(u16::from(word))
}
fn flush(&mut self) -> nb::Result<(), Self::Error> {
// Delegate to u16 version
Tx::<USART, u16>::new().flush()
}
}
/// Writes 9-bit words to the UART/USART
///
/// If the UART/USART was configured with `WordLength::DataBits9`, the 9 least significant bits will
/// be transmitted and the other 7 bits will be ignored. Otherwise, the 8 least significant bits
/// will be transmitted and the other 8 bits will be ignored.
impl<USART> serial::Write<u16> for Tx<USART, u16>
where
USART: Instance,
{
type Error = Error;
fn flush(&mut self) -> nb::Result<(), Self::Error> {
// NOTE(unsafe) atomic read with no side effects
let sr = unsafe { (*USART::ptr()).sr.read() };
if sr.tc().bit_is_set() {
Ok(())
} else {
Err(nb::Error::WouldBlock)
}
}
fn write(&mut self, word: u16) -> nb::Result<(), Self::Error> {
// NOTE(unsafe) atomic read with no side effects
let sr = unsafe { (*USART::ptr()).sr.read() };
if sr.txe().bit_is_set() {
// NOTE(unsafe) atomic write to stateless register
unsafe { &*USART::ptr() }.dr.write(|w| w.dr().bits(word));
Ok(())
} else {
Err(nb::Error::WouldBlock)
}
}
}
impl<USART> blocking::serial::Write<u16> for Tx<USART, u16>
where
USART: Instance,
{
type Error = Error;
fn bwrite_all(&mut self, buffer: &[u16]) -> Result<(), Self::Error> {
for &b in buffer {
loop {
match self.write(b) {
Err(nb::Error::WouldBlock) => continue,
Err(nb::Error::Other(err)) => return Err(err),
Ok(()) => break,
}
}
}
Ok(())
}
fn bflush(&mut self) -> Result<(), Self::Error> {
loop {
match <Self as serial::Write<u16>>::flush(self) {
Ok(()) => return Ok(()),
Err(nb::Error::WouldBlock) => continue,
Err(nb::Error::Other(err)) => return Err(err),
}
}
}
}
impl<USART> blocking::serial::Write<u8> for Tx<USART, u8>
where
USART: Instance,
{
type Error = Error;
fn bwrite_all(&mut self, bytes: &[u8]) -> Result<(), Self::Error> {
for &b in bytes {
loop {
match self.write(b) {
Err(nb::Error::WouldBlock) => continue,
Err(nb::Error::Other(err)) => return Err(err),
Ok(()) => break,
}
}
}
Ok(())
}
fn bflush(&mut self) -> Result<(), Self::Error> {
loop {
match <Self as serial::Write<u8>>::flush(self) {
Ok(()) => return Ok(()),
Err(nb::Error::WouldBlock) => continue,
Err(nb::Error::Other(err)) => return Err(err),
}
}
}
}
impl<USART, PINS> blocking::serial::Write<u16> for Serial<USART, PINS, u16>
where
USART: Instance,
{
type Error = Error;
fn bwrite_all(&mut self, bytes: &[u16]) -> Result<(), Self::Error> {
self.tx.bwrite_all(bytes)
}
fn bflush(&mut self) -> Result<(), Self::Error> {
self.tx.bflush()
}
}
impl<USART, PINS> blocking::serial::Write<u8> for Serial<USART, PINS, u8>
where
USART: Instance,
{
type Error = Error;
fn bwrite_all(&mut self, bytes: &[u8]) -> Result<(), Self::Error> {
self.tx.bwrite_all(bytes)
}
fn bflush(&mut self) -> Result<(), Self::Error> {
self.tx.bflush()
}
}
impl<USART, PINS, WORD> Serial<USART, PINS, WORD>
where
USART: Instance,
{
fn config_stop(self, config: config::Config) -> Self {
self.usart.set_stopbits(config.stopbits);
self
}
}
#[cfg(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
))]
use crate::pac::uart4 as uart_base;
#[cfg(not(any(
feature = "stm32f405",
feature = "stm32f407",
feature = "stm32f415",
feature = "stm32f417",
feature = "stm32f427",
feature = "stm32f429",
feature = "stm32f437",
feature = "stm32f439",
feature = "stm32f446",
feature = "stm32f469",
feature = "stm32f479"
)))]
use crate::pac::usart1 as uart_base;
// Implemented by all USART instances
pub trait Instance: crate::Sealed + rcc::Enable + rcc::Reset + rcc::GetBusFreq {
#[doc(hidden)]
fn ptr() -> *const uart_base::RegisterBlock;
#[doc(hidden)]
fn set_stopbits(&self, bits: config::StopBits);
}
macro_rules! halUsart {
($USARTX:ty: ($usartX:ident)) => {
impl Instance for $USARTX {
fn ptr() -> *const uart_base::RegisterBlock {
<$USARTX>::ptr() as *const _
}
fn set_stopbits(&self, bits: config::StopBits) {
use crate::pac::usart1::cr2::STOP_A;
use config::StopBits;
self.cr2.write(|w| {
w.stop().variant(match bits {
StopBits::STOP0P5 => STOP_A::STOP0P5,
StopBits::STOP1 => STOP_A::STOP1,
StopBits::STOP1P5 => STOP_A::STOP1P5,
StopBits::STOP2 => STOP_A::STOP2,
})
});
}
}
impl<USART, TX, RX, const TXA: u8, const RXA: u8> Serial<USART, (TX, RX)>
where
TX: PinTx<USART, A = Const<TXA>> + SetAlternate<TXA>,
RX: PinRx<USART, A = Const<RXA>> + SetAlternate<RXA>,
USART: Instance,
{
#[deprecated(since = "0.10.0")]
pub fn $usartX(
usart: USART,
pins: (TX, RX),
config: config::Config,
clocks: Clocks,
) -> Result<Self, config::InvalidConfig> {
Self::new(usart, pins, config, clocks)
}
}
};
}
// TODO: fix stm32f413 UARTs
#[cfg(any(
feature = "uart4",
feature = "uart5",
feature = "uart7",
feature = "uart8",
feature = "uart9",
feature = "uart10"
))]
#[cfg(not(any(feature = "stm32f413", feature = "stm32f423",)))]
macro_rules! halUart {
($USARTX:ty: ($usartX:ident)) => {
impl Instance for $USARTX {
fn ptr() -> *const uart_base::RegisterBlock {
<$USARTX>::ptr() as *const _
}
fn set_stopbits(&self, bits: config::StopBits) {
use crate::pac::uart4::cr2::STOP_A;
use config::StopBits;
self.cr2.write(|w| {
w.stop().variant(match bits {
StopBits::STOP0P5 => STOP_A::STOP1,
StopBits::STOP1 => STOP_A::STOP1,
StopBits::STOP1P5 => STOP_A::STOP2,
StopBits::STOP2 => STOP_A::STOP2,
})
});
}
}
impl<USART, TX, RX, const TXA: u8, const RXA: u8> Serial<USART, (TX, RX)>
where
TX: PinTx<USART, A = Const<TXA>> + SetAlternate<TXA>,
RX: PinRx<USART, A = Const<RXA>> + SetAlternate<RXA>,
USART: Instance,
{
#[deprecated(since = "0.10.0")]
pub fn $usartX(
usart: USART,
pins: (TX, RX),
config: config::Config,
clocks: Clocks,
) -> Result<Self, config::InvalidConfig> {
Self::new(usart, pins, config, clocks)
}
}
};
}
halUsart! { USART1: (usart1) }
halUsart! { USART2: (usart2) }
halUsart! { USART6: (usart6) }
#[cfg(feature = "usart3")]
halUsart! { USART3: (usart3) }
#[cfg(feature = "uart4")]
#[cfg(not(any(feature = "stm32f413", feature = "stm32f423")))]
halUart! { UART4: (uart4) }
#[cfg(feature = "uart5")]
#[cfg(not(any(feature = "stm32f413", feature = "stm32f423")))]
halUart! { UART5: (uart5) }
#[cfg(feature = "uart4")]
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
halUsart! { UART4: (uart4) }
#[cfg(feature = "uart5")]
#[cfg(any(feature = "stm32f413", feature = "stm32f423"))]
halUsart! { UART5: (uart5) }
#[cfg(feature = "uart7")]
halUsart! { UART7: (uart7) }
#[cfg(feature = "uart8")]
halUsart! { UART8: (uart8) }
#[cfg(feature = "uart9")]
halUsart! { UART9: (uart9) }
#[cfg(feature = "uart10")]
halUsart! { UART10: (uart10) }
impl<USART, PINS> fmt::Write for Serial<USART, PINS>
where
Tx<USART>: serial::Write<u8>,
{
fn write_str(&mut self, s: &str) -> fmt::Result {
self.tx.write_str(s)
}
}
impl<USART> fmt::Write for Tx<USART>
where
Tx<USART>: serial::Write<u8>,
{
fn write_str(&mut self, s: &str) -> fmt::Result {
s.bytes()
.try_for_each(|c| block!(self.write(c)))
.map_err(|_| fmt::Error)
}
}
| 27.96657 | 100 | 0.552778 |
d725520ad1d13049b0b294bae9d3e2607d58a25d | 13,847 | //! Mutators that can handle recursive types.
//!
//! There are two main mutators:
//! 1. [`RecursiveMutator`] is the top-level mutator for the recursive type
//! 2. [`RecurToMutator`] is the mutator used at points of recursion. It is essentially a weak reference to [`RecursiveMutator`]
//!
//! In practice, you will want to use the [`make_mutator!`](crate::make_mutator) procedural macro to create recursive mutators.
//! For example:
//! ```
//! # #![feature(no_coverage)]
//! use fuzzcheck::mutators::{option::OptionMutator, boxed::BoxMutator};
//! use fuzzcheck::mutators::recursive::{RecursiveMutator, RecurToMutator};
//! use fuzzcheck::DefaultMutator;
//! use fuzzcheck::make_mutator;
//!
//! #[derive(Clone)]
//! struct S {
//! content: bool,
//! next: Option<Box<S>> // the type recurses here
//! }
//!
//! make_mutator! {
//! name: SMutator,
//! recursive: true, // this is important
//! default: false,
//! type: struct S {
//! content: bool,
//! // We need to specify a concrete sub-mutator for this field to avoid creating an infinite type.
//! // We use the standard Option and Box mutators, but replace what would be SMutator<M0, M1> by
//! // RecurToMutator<SMutator<M0>>, which indicates that this is a point of recursion
//! // and the mutator should be a weak reference to a RecursiveMutator
//! // The M0 part refers to the mutator for the `content: bool` field.
//! #[field_mutator(OptionMutator<Box<S>, BoxMutator<RecurToMutator<SMutator<M0>>>>)]
//! next: Option<Box<S>>
//! }
//! }
//! # fn main() {
//!
//! let s_mutator = RecursiveMutator::new(|mutator| {
//! SMutator::new(
//! /*content_mutator:*/ bool::default_mutator(),
//! /*next_mutator:*/ OptionMutator::new(BoxMutator::new(RecurToMutator::from(mutator)))
//! )
//! });
//! // s_mutator impl Mutator<S>
//! # }
//! ```
use crate::Mutator;
use std::{
any::Any,
fmt::Debug,
rc::{Rc, Weak},
};
/// The ArbitraryStep that is used for recursive mutators
#[derive(Clone, Debug, PartialEq)]
pub enum RecursingArbitraryStep<AS> {
Default,
Initialized(AS),
}
impl<AS> Default for RecursingArbitraryStep<AS> {
#[no_coverage]
fn default() -> Self {
Self::Default
}
}
/**
A wrapper that allows a mutator to call itself recursively.
For example, it is used to provide mutators for types such as:
```
struct S {
content: bool,
// to mutate this field, a mutator must be able to recursively call itself
next: Option<Box<S>>
}
```
`RecursiveMutator` is only the top-level type. It must be used in conjuction
with [`RecurToMutator`](crate::mutators::recursive::RecurToMutator) at points of recursion.
For example:
```
# #![feature(no_coverage)]
use fuzzcheck::DefaultMutator;
use fuzzcheck::mutators::{option::OptionMutator, boxed::BoxMutator};
use fuzzcheck::mutators::recursive::{RecursiveMutator, RecurToMutator};
# use fuzzcheck::make_mutator;
# #[derive(Clone)]
# struct S {
# content: bool,
# next: Option<Box<S>>
# }
# make_mutator! {
# name: SMutator,
# recursive: true,
# default: false,
# type: struct S {
# content: bool,
# #[field_mutator(OptionMutator<Box<S>, BoxMutator<RecurToMutator<SMutator<M0>>>>)]
# next: Option<Box<S>>
# }
# }
let s_mutator = RecursiveMutator::new(|mutator| {
SMutator::new(
/*content_mutator:*/ bool::default_mutator(),
/*next_mutator:*/ OptionMutator::new(BoxMutator::new(RecurToMutator::from(mutator)))
)
});
```
*/
pub struct RecursiveMutator<M> {
pub mutator: Rc<M>,
rng: fastrand::Rng,
}
impl<M> RecursiveMutator<M> {
/// Create a new `RecursiveMutator` using a weak reference to itself.
#[no_coverage]
pub fn new(data_fn: impl FnOnce(&Weak<M>) -> M) -> Self {
Self {
mutator: Rc::new_cyclic(data_fn),
rng: fastrand::Rng::new(),
}
}
}
/// A mutator that defers to a weak reference of a
/// [`RecursiveMutator`](crate::mutators::recursive::RecursiveMutator)
pub struct RecurToMutator<M> {
reference: Weak<M>,
}
impl<M> From<&Weak<M>> for RecurToMutator<M> {
#[no_coverage]
fn from(reference: &Weak<M>) -> Self {
Self {
reference: reference.clone(),
}
}
}
impl<T, M> Mutator<T> for RecurToMutator<M>
where
M: Mutator<T>,
T: Clone + 'static,
{
#[doc(hidden)]
type Cache = <M as Mutator<T>>::Cache;
#[doc(hidden)]
type MutationStep = <M as Mutator<T>>::MutationStep;
#[doc(hidden)]
type ArbitraryStep = RecursingArbitraryStep<<M as Mutator<T>>::ArbitraryStep>;
#[doc(hidden)]
type UnmutateToken = <M as Mutator<T>>::UnmutateToken;
#[doc(hidden)]
#[no_coverage]
fn default_arbitrary_step(&self) -> Self::ArbitraryStep {
RecursingArbitraryStep::Default
}
#[doc(hidden)]
#[no_coverage]
fn validate_value(&self, value: &T) -> Option<Self::Cache> {
self.reference.upgrade().unwrap().validate_value(value)
}
#[doc(hidden)]
#[no_coverage]
fn default_mutation_step(&self, value: &T, cache: &Self::Cache) -> Self::MutationStep {
self.reference.upgrade().unwrap().default_mutation_step(value, cache)
}
#[doc(hidden)]
#[no_coverage]
fn max_complexity(&self) -> f64 {
std::f64::INFINITY
}
#[doc(hidden)]
#[no_coverage]
fn min_complexity(&self) -> f64 {
// should be the min complexity of the mutator
if let Some(m) = self.reference.upgrade() {
m.as_ref().min_complexity()
} else {
1.0 // not right, but easy hack for now
}
}
#[doc(hidden)]
#[no_coverage]
fn complexity(&self, value: &T, cache: &Self::Cache) -> f64 {
self.reference.upgrade().unwrap().complexity(value, cache)
}
#[doc(hidden)]
#[no_coverage]
fn ordered_arbitrary(&self, step: &mut Self::ArbitraryStep, max_cplx: f64) -> Option<(T, f64)> {
match step {
RecursingArbitraryStep::Default => {
let mutator = self.reference.upgrade().unwrap();
let inner_step = mutator.default_arbitrary_step();
*step = RecursingArbitraryStep::Initialized(inner_step);
self.ordered_arbitrary(step, max_cplx)
}
RecursingArbitraryStep::Initialized(inner_step) => self
.reference
.upgrade()
.unwrap()
.ordered_arbitrary(inner_step, max_cplx),
}
}
#[doc(hidden)]
#[no_coverage]
fn random_arbitrary(&self, max_cplx: f64) -> (T, f64) {
self.reference.upgrade().unwrap().random_arbitrary(max_cplx)
}
#[doc(hidden)]
#[no_coverage]
fn ordered_mutate(
&self,
value: &mut T,
cache: &mut Self::Cache,
step: &mut Self::MutationStep,
max_cplx: f64,
) -> Option<(Self::UnmutateToken, f64)> {
self.reference
.upgrade()
.unwrap()
.ordered_mutate(value, cache, step, max_cplx)
}
#[doc(hidden)]
#[no_coverage]
fn random_mutate(&self, value: &mut T, cache: &mut Self::Cache, max_cplx: f64) -> (Self::UnmutateToken, f64) {
self.reference.upgrade().unwrap().random_mutate(value, cache, max_cplx)
}
#[doc(hidden)]
#[no_coverage]
fn unmutate(&self, value: &mut T, cache: &mut Self::Cache, t: Self::UnmutateToken) {
self.reference.upgrade().unwrap().unmutate(value, cache, t)
}
#[doc(hidden)]
type RecursingPartIndex = bool;
#[doc(hidden)]
#[no_coverage]
fn default_recursing_part_index(&self, _value: &T, _cache: &Self::Cache) -> Self::RecursingPartIndex {
false
}
#[doc(hidden)]
#[no_coverage]
fn recursing_part<'a, V, N>(&self, parent: &N, value: &'a T, index: &mut Self::RecursingPartIndex) -> Option<&'a V>
where
V: Clone + 'static,
N: Mutator<V>,
{
if *index {
None
} else {
*index = true;
let parent_any: &dyn Any = parent;
if let Some(parent) = parent_any.downcast_ref::<RecursiveMutator<M>>() {
if Rc::downgrade(&parent.mutator).ptr_eq(&self.reference) {
let v: &dyn Any = value;
let v = v.downcast_ref::<V>().unwrap();
Some(v)
} else {
None
}
} else {
None
}
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct RecursiveMutatorMutationStep<MS, RPI> {
recursing_part_index: Option<RPI>,
mutation_step: MS,
}
pub enum RecursiveMutatorUnmutateToken<T, UnmutateToken> {
Replace(T),
Token(UnmutateToken),
}
impl<M, T: Clone + 'static> Mutator<T> for RecursiveMutator<M>
where
M: Mutator<T>,
{
type Cache = M::Cache;
type MutationStep = RecursiveMutatorMutationStep<M::MutationStep, M::RecursingPartIndex>;
type ArbitraryStep = M::ArbitraryStep;
type UnmutateToken = RecursiveMutatorUnmutateToken<T, M::UnmutateToken>;
#[doc(hidden)]
#[no_coverage]
fn default_arbitrary_step(&self) -> Self::ArbitraryStep {
self.mutator.default_arbitrary_step()
}
#[doc(hidden)]
#[no_coverage]
fn validate_value(&self, value: &T) -> Option<Self::Cache> {
self.mutator.validate_value(value)
}
#[doc(hidden)]
#[no_coverage]
fn default_mutation_step(&self, value: &T, cache: &Self::Cache) -> Self::MutationStep {
let mutation_step = self.mutator.default_mutation_step(value, cache);
let recursing_part_index = Some(self.default_recursing_part_index(value, cache));
RecursiveMutatorMutationStep {
mutation_step,
recursing_part_index,
}
}
#[doc(hidden)]
#[no_coverage]
fn max_complexity(&self) -> f64 {
self.mutator.max_complexity()
}
#[doc(hidden)]
#[no_coverage]
fn min_complexity(&self) -> f64 {
self.mutator.min_complexity()
}
#[doc(hidden)]
#[no_coverage]
fn complexity(&self, value: &T, cache: &Self::Cache) -> f64 {
self.mutator.complexity(value, cache)
}
#[doc(hidden)]
#[no_coverage]
fn ordered_arbitrary(&self, step: &mut Self::ArbitraryStep, max_cplx: f64) -> Option<(T, f64)> {
self.mutator.ordered_arbitrary(step, max_cplx)
}
#[doc(hidden)]
#[no_coverage]
fn random_arbitrary(&self, max_cplx: f64) -> (T, f64) {
self.mutator.random_arbitrary(max_cplx)
}
#[doc(hidden)]
#[no_coverage]
fn ordered_mutate(
&self,
value: &mut T,
cache: &mut Self::Cache,
step: &mut Self::MutationStep,
max_cplx: f64,
) -> Option<(Self::UnmutateToken, f64)> {
if let Some(recursing_part_index) = &mut step.recursing_part_index {
if let Some(new) = self
.mutator
.recursing_part::<T, Self>(self, value, recursing_part_index)
{
let mut new = new.clone();
let cache = self.validate_value(&new).unwrap();
let cplx = self.complexity(&new, &cache);
std::mem::swap(value, &mut new);
let token = RecursiveMutatorUnmutateToken::Replace(new);
Some((token, cplx))
} else {
step.recursing_part_index = None;
self.ordered_mutate(value, cache, step, max_cplx)
}
} else {
if let Some((token, cplx)) = self
.mutator
.ordered_mutate(value, cache, &mut step.mutation_step, max_cplx)
{
Some((RecursiveMutatorUnmutateToken::Token(token), cplx))
} else {
None
}
}
}
#[doc(hidden)]
#[no_coverage]
fn random_mutate(&self, value: &mut T, cache: &mut Self::Cache, max_cplx: f64) -> (Self::UnmutateToken, f64) {
if self.rng.usize(..100) == 0 {
let mut recursing_part_index = self.default_recursing_part_index(value, cache);
if let Some(new) = self
.mutator
.recursing_part::<T, Self>(self, value, &mut recursing_part_index)
{
let mut new = new.clone();
let cache = self.validate_value(&new).unwrap();
let cplx = self.complexity(&new, &cache);
std::mem::swap(value, &mut new);
let token = RecursiveMutatorUnmutateToken::Replace(new);
return (token, cplx);
}
}
let (token, cplx) = self.mutator.random_mutate(value, cache, max_cplx);
let token = RecursiveMutatorUnmutateToken::Token(token);
(token, cplx)
}
#[doc(hidden)]
#[no_coverage]
fn unmutate(&self, value: &mut T, cache: &mut Self::Cache, t: Self::UnmutateToken) {
match t {
RecursiveMutatorUnmutateToken::Replace(x) => {
let _ = std::mem::replace(value, x);
}
RecursiveMutatorUnmutateToken::Token(t) => self.mutator.unmutate(value, cache, t),
}
}
#[doc(hidden)]
type RecursingPartIndex = M::RecursingPartIndex;
#[doc(hidden)]
#[no_coverage]
fn default_recursing_part_index(&self, value: &T, cache: &Self::Cache) -> Self::RecursingPartIndex {
self.mutator.default_recursing_part_index(value, cache)
}
#[doc(hidden)]
#[no_coverage]
fn recursing_part<'a, V, N>(&self, parent: &N, value: &'a T, index: &mut Self::RecursingPartIndex) -> Option<&'a V>
where
V: Clone + 'static,
N: Mutator<V>,
{
self.mutator.recursing_part::<V, N>(parent, value, index)
}
}
| 31.257336 | 128 | 0.589658 |
9cb59a463c3c41a3d4f8146ef51268171a1d2ebe | 4,397 | use std::ffi::{c_void, CString};
use ultralight_sys::{
ulBitmapErase, ulBitmapGetBpp, ulBitmapGetFormat, ulBitmapGetHeight, ulBitmapGetRowBytes,
ulBitmapGetSize, ulBitmapGetWidth, ulBitmapIsEmpty, ulBitmapLockPixels, ulBitmapOwnsPixels,
ulBitmapSwapRedBlueChannels, ulBitmapUnlockPixels, ulBitmapWritePNG, ulCreateBitmap,
ulCreateBitmapFromCopy, ulCreateBitmapFromPixels, ulCreateEmptyBitmap, ulDestroyBitmap,
ULBitmap, ULBitmapFormat,
};
pub struct Bitmap {
pub raw: ULBitmap,
created: bool,
}
pub type BitmapFormat = ULBitmapFormat;
impl Bitmap {
/// Create bitmap with certain dimensions and pixel format.
pub fn new(width: u32, height: u32, format: BitmapFormat) -> Self {
unsafe {
Bitmap {
raw: ulCreateBitmap(width, height, format),
created: true,
}
}
}
/// Create bitmap from existing pixel buffer.
pub fn new_from_pixels(
width: u32,
height: u32,
format: BitmapFormat,
row_bytes: u32,
pixels: *mut c_void,
size: u64,
should_copy: bool,
) -> Self {
unsafe {
Bitmap {
raw: ulCreateBitmapFromPixels(
width,
height,
format,
row_bytes,
pixels,
size,
should_copy,
),
created: true,
}
}
}
/// Create empty bitmap.
pub fn new_empty() -> Self {
unsafe {
Bitmap {
raw: ulCreateEmptyBitmap(),
created: true,
}
}
}
/// Get the width in pixels.
pub fn width(&self) -> u32 {
unsafe { ulBitmapGetWidth(self.raw) }
}
/// Get the height in pixels.
pub fn height(&self) -> u32 {
unsafe { ulBitmapGetHeight(self.raw) }
}
/// Get the pixel format.
pub fn format(&self) -> BitmapFormat {
unsafe { ulBitmapGetFormat(self.raw) }
}
/// Get the number of bytes per row.
pub fn row_bytes(&self) -> u32 {
unsafe { ulBitmapGetRowBytes(self.raw) }
}
/// Get the size in bytes of the underlying pixel buffer.
pub fn size(&self) -> u64 {
unsafe { ulBitmapGetSize(self.raw) }
}
/// Whether or not this bitmap is empty.
pub fn is_empty(&self) -> bool {
unsafe { ulBitmapIsEmpty(self.raw) }
}
/// Get the bytes per pixel.
pub fn bpp(&self) -> u32 {
unsafe { ulBitmapGetBpp(self.raw) }
}
/// Whether or not this bitmap owns its own pixel buffer.
pub fn owns_pixels(&self) -> bool {
unsafe { ulBitmapOwnsPixels(self.raw) }
}
/// Lock pixels for reading/writing for the current scope.
pub fn lock_pixels(&self) -> BitmapPixelsGuard {
unsafe {
BitmapPixelsGuard {
pixels: ulBitmapLockPixels(self.raw),
bitmap: self,
}
}
}
/// Write bitmap to a PNG on disk.
pub fn write_to_png(&self, path: &str) -> bool {
unsafe {
let cstr = CString::new(path).unwrap();
ulBitmapWritePNG(self.raw, cstr.as_ptr())
}
}
/// Reset bitmap pixels to 0.
pub fn erase(&mut self) {
unsafe {
ulBitmapErase(self.raw);
}
}
/// This converts a BGRA bitmap to RGBA bitmap and vice-versa by swapping the red and blue channels.
pub fn swap_red_blue(&mut self) {
unsafe {
ulBitmapSwapRedBlueChannels(self.raw);
}
}
}
impl Clone for Bitmap {
fn clone(&self) -> Self {
unsafe {
Bitmap {
raw: ulCreateBitmapFromCopy(self.raw),
created: true,
}
}
}
}
impl From<ULBitmap> for Bitmap {
fn from(raw: ULBitmap) -> Self {
Bitmap {
raw,
created: false,
}
}
}
impl Drop for Bitmap {
fn drop(&mut self) {
unsafe {
if self.created {
ulDestroyBitmap(self.raw);
}
}
}
}
pub struct BitmapPixelsGuard<'a> {
pub pixels: *mut c_void,
bitmap: &'a Bitmap,
}
impl Drop for BitmapPixelsGuard<'_> {
fn drop(&mut self) {
unsafe { ulBitmapUnlockPixels(self.bitmap.raw) }
}
}
| 24.702247 | 104 | 0.541506 |
67a6221df017cb08a9b8e801967e7971a49a2e12 | 155 | // error-pattern: attempted dynamic environment-capture
fn foo(x: int) {
fn mth() {
fn bar() { log(debug, x); }
}
}
fn main() { foo(2); }
| 17.222222 | 55 | 0.541935 |
2f0c668f87a7e7724a966a34bc4ce437e6b7430f | 5,435 | use crate::{assert_eq, *};
use currency::Amount;
use frame_support::transactional;
use redeem::RedeemRequestStatus;
pub const USER: [u8; 32] = ALICE;
pub const VAULT: [u8; 32] = BOB;
pub const USER_BTC_ADDRESS: BtcAddress = BtcAddress::P2PKH(H160([2u8; 20]));
pub struct ExecuteRedeemBuilder {
redeem_id: H256,
redeem: RedeemRequest<AccountId32, u32, u128>,
amount: Amount<Runtime>,
submitter: AccountId32,
inclusion_fee: Amount<Runtime>,
}
impl ExecuteRedeemBuilder {
pub fn new(redeem_id: H256) -> Self {
let redeem = RedeemPallet::get_open_redeem_request_from_id(&redeem_id).unwrap();
Self {
redeem_id,
redeem: redeem.clone(),
amount: redeem.amount_btc(),
submitter: redeem.redeemer,
inclusion_fee: wrapped(0),
}
}
pub fn with_amount(&mut self, amount: Amount<Runtime>) -> &mut Self {
self.amount = amount;
self
}
pub fn with_submitter(&mut self, submitter: [u8; 32]) -> &mut Self {
self.submitter = account_of(submitter);
self
}
pub fn with_inclusion_fee(&mut self, inclusion_fee: Amount<Runtime>) -> &mut Self {
self.inclusion_fee = inclusion_fee;
self
}
#[transactional]
pub fn execute(&self) -> DispatchResultWithPostInfo {
// send the btc from the user to the vault
let (_tx_id, _height, proof, raw_tx, _) = TransactionGenerator::new()
.with_address(self.redeem.btc_address)
.with_amount(self.amount)
.with_op_return(Some(self.redeem_id))
.mine();
SecurityPallet::set_active_block_number(SecurityPallet::active_block_number() + CONFIRMATIONS);
// alice executes the redeemrequest by confirming the btc transaction
Call::Redeem(RedeemCall::execute_redeem(self.redeem_id, proof, raw_tx))
.dispatch(origin_of(self.submitter.clone()))
}
pub fn assert_execute(&self) {
assert_ok!(self.execute());
}
pub fn assert_noop(&self, error: RedeemError) {
assert_noop!(self.execute(), error);
}
}
pub fn setup_cancelable_redeem(user: [u8; 32], vault: [u8; 32], issued_tokens: Amount<Runtime>) -> H256 {
let redeem_id = setup_redeem(issued_tokens, user, vault);
// expire request without transferring btc
mine_blocks((RedeemPallet::redeem_period() + 99) / 100 + 1);
SecurityPallet::set_active_block_number(RedeemPallet::redeem_period() + 1 + 1);
redeem_id
}
pub fn set_redeem_state(
currency_id: CurrencyId,
vault_to_be_redeemed: Amount<Runtime>,
user_to_redeem: Amount<Runtime>,
user: [u8; 32],
vault: [u8; 32],
) -> () {
let burned_tokens = user_to_redeem - FeePallet::get_redeem_fee(&user_to_redeem).unwrap();
let vault_issued_tokens = vault_to_be_redeemed + burned_tokens;
CoreVaultData::force_to(
vault,
CoreVaultData {
issued: vault_issued_tokens,
to_be_redeemed: vault_to_be_redeemed,
..CoreVaultData::get_default(currency_id)
},
);
let mut user_state = UserData::get(user);
(*user_state.balances.get_mut(&INTERBTC).unwrap()).free = user_to_redeem;
UserData::force_to(ALICE, user_state);
}
pub fn setup_redeem(issued_tokens: Amount<Runtime>, user: [u8; 32], vault: [u8; 32]) -> H256 {
// alice requests to redeem issued_tokens from Bob
assert_ok!(Call::Redeem(RedeemCall::request_redeem(
issued_tokens.amount(),
USER_BTC_ADDRESS,
account_of(vault)
))
.dispatch(origin_of(account_of(user))));
// assert that request happened and extract the id
assert_redeem_request_event()
}
// asserts redeem event happen and extracts its id for further testing
pub fn assert_redeem_request_event() -> H256 {
let events = SystemModule::events();
let ids = events
.iter()
.filter_map(|r| match r.event {
Event::Redeem(RedeemEvent::RequestRedeem(id, _, _, _, _, _, _, _)) => Some(id),
_ => None,
})
.collect::<Vec<H256>>();
assert_eq!(ids.len(), 1);
ids[0]
}
pub fn execute_redeem(redeem_id: H256) {
ExecuteRedeemBuilder::new(redeem_id).assert_execute();
}
pub fn cancel_redeem(redeem_id: H256, redeemer: [u8; 32], reimburse: bool) {
assert_ok!(Call::Redeem(RedeemCall::cancel_redeem(redeem_id, reimburse)).dispatch(origin_of(account_of(redeemer))));
}
pub fn assert_redeem_error(
redeem_id: H256,
user_btc_address: BtcAddress,
amount: Amount<Runtime>,
return_data: H256,
current_block_number: u32,
error: BTCRelayError,
) -> u32 {
// send the btc from the vault to the user
let (_tx_id, _tx_block_height, merkle_proof, raw_tx) =
generate_transaction_and_mine(user_btc_address, amount, Some(return_data));
SecurityPallet::set_active_block_number(current_block_number + 1 + CONFIRMATIONS);
assert_noop!(
Call::Redeem(RedeemCall::execute_redeem(redeem_id, merkle_proof.clone(), raw_tx))
.dispatch(origin_of(account_of(VAULT))),
error
);
return current_block_number + 1 + CONFIRMATIONS;
}
pub fn check_redeem_status(user: [u8; 32], status: RedeemRequestStatus) {
let redeems = RedeemPallet::get_redeem_requests_for_account(account_of(user));
assert_eq!(redeems.len(), 1);
let (_, redeem) = redeems[0].clone();
assert_eq!(redeem.status, status);
}
| 32.740964 | 120 | 0.662741 |
9021de5372e59ba59727ddb938a75014886bf118 | 3,778 | #![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! Liquid crystal display controller
//!
//! Used by: stm32l4x3, stm32l4x6
#[cfg(not(feature = "nosync"))]
pub use crate::stm32l4::peripherals::lcd_v2::Instance;
pub use crate::stm32l4::peripherals::lcd_v2::{RegisterBlock, ResetValues};
pub use crate::stm32l4::peripherals::lcd_v2::{
CLR, CR, FCR, RAM_COM0, RAM_COM1, RAM_COM2, RAM_COM3, RAM_COM4, RAM_COM5, RAM_COM6, RAM_COM7,
SR,
};
/// Access functions for the LCD peripheral instance
pub mod LCD {
use super::ResetValues;
#[cfg(not(feature = "nosync"))]
use super::Instance;
#[cfg(not(feature = "nosync"))]
const INSTANCE: Instance = Instance {
addr: 0x40002400,
_marker: ::core::marker::PhantomData,
};
/// Reset values for each field in LCD
pub const reset: ResetValues = ResetValues {
CR: 0x00000000,
FCR: 0x00000000,
SR: 0x00000020,
CLR: 0x00000000,
RAM_COM0: 0x00000000,
RAM_COM1: 0x00000000,
RAM_COM2: 0x00000000,
RAM_COM3: 0x00000000,
RAM_COM4: 0x00000000,
RAM_COM5: 0x00000000,
RAM_COM6: 0x00000000,
RAM_COM7: 0x00000000,
};
#[cfg(not(feature = "nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut LCD_TAKEN: bool = false;
/// Safe access to LCD
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn take() -> Option<Instance> {
external_cortex_m::interrupt::free(|_| unsafe {
if LCD_TAKEN {
None
} else {
LCD_TAKEN = true;
Some(INSTANCE)
}
})
}
/// Release exclusive access to LCD
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn release(inst: Instance) {
external_cortex_m::interrupt::free(|_| unsafe {
if LCD_TAKEN && inst.addr == INSTANCE.addr {
LCD_TAKEN = false;
} else {
panic!("Released a peripheral which was not taken");
}
});
}
/// Unsafely steal LCD
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature = "nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {
LCD_TAKEN = true;
INSTANCE
}
}
/// Raw pointer to LCD
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const LCD: *const RegisterBlock = 0x40002400 as *const _;
| 32.568966 | 97 | 0.619375 |
9cbaf35acd33fe978ae9fa0e73496a941352c249 | 82,099 | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This query borrow-checks the MIR to (further) ensure it is not broken.
use borrow_check::nll::region_infer::RegionInferenceContext;
use rustc::hir;
use rustc::hir::Node;
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
use rustc::infer::InferCtxt;
use rustc::lint::builtin::UNUSED_MUT;
use rustc::middle::borrowck::SignalledError;
use rustc::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
use rustc::mir::{ClearCrossCrate, Local, Location, Mir, Mutability, Operand, Place};
use rustc::mir::{Field, Projection, ProjectionElem, Rvalue, Statement, StatementKind};
use rustc::mir::{Terminator, TerminatorKind};
use rustc::ty::query::Providers;
use rustc::ty::{self, TyCtxt};
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, Level};
use rustc_data_structures::bit_set::BitSet;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::graph::dominators::Dominators;
use smallvec::SmallVec;
use std::rc::Rc;
use std::collections::BTreeMap;
use syntax_pos::Span;
use dataflow::indexes::BorrowIndex;
use dataflow::move_paths::{HasMoveData, LookupResult, MoveData, MoveError, MovePathIndex};
use dataflow::move_paths::indexes::MoveOutIndex;
use dataflow::Borrows;
use dataflow::DataflowResultsConsumer;
use dataflow::FlowAtLocation;
use dataflow::MoveDataParamEnv;
use dataflow::{do_dataflow, DebugFormatted};
use dataflow::EverInitializedPlaces;
use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
use util::borrowck_errors::{BorrowckErrors, Origin};
use self::borrow_set::{BorrowData, BorrowSet};
use self::flows::Flows;
use self::location::LocationTable;
use self::prefixes::PrefixSet;
use self::MutateMode::{JustWrite, WriteAndRead};
use self::mutability_errors::AccessKind;
use self::path_utils::*;
crate mod borrow_set;
mod error_reporting;
mod flows;
mod location;
mod move_errors;
mod mutability_errors;
mod path_utils;
crate mod place_ext;
mod places_conflict;
mod prefixes;
mod used_muts;
pub(crate) mod nll;
pub fn provide(providers: &mut Providers) {
*providers = Providers {
mir_borrowck,
..*providers
};
}
fn mir_borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> BorrowCheckResult<'tcx> {
let input_mir = tcx.mir_validated(def_id);
debug!("run query mir_borrowck: {}", tcx.item_path_str(def_id));
let mut return_early;
// Return early if we are not supposed to use MIR borrow checker for this function.
return_early = !tcx.has_attr(def_id, "rustc_mir") && !tcx.use_mir_borrowck();
if tcx.is_struct_constructor(def_id) {
// We are not borrow checking the automatically generated struct constructors
// because we want to accept structs such as this (taken from the `linked-hash-map`
// crate):
// ```rust
// struct Qey<Q: ?Sized>(Q);
// ```
// MIR of this struct constructor looks something like this:
// ```rust
// fn Qey(_1: Q) -> Qey<Q>{
// let mut _0: Qey<Q>; // return place
//
// bb0: {
// (_0.0: Q) = move _1; // bb0[0]: scope 0 at src/main.rs:1:1: 1:26
// return; // bb0[1]: scope 0 at src/main.rs:1:1: 1:26
// }
// }
// ```
// The problem here is that `(_0.0: Q) = move _1;` is valid only if `Q` is
// of statically known size, which is not known to be true because of the
// `Q: ?Sized` constraint. However, it is true because the constructor can be
// called only when `Q` is of statically known size.
return_early = true;
}
if return_early {
return BorrowCheckResult {
closure_requirements: None,
used_mut_upvars: SmallVec::new(),
};
}
let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
let input_mir: &Mir = &input_mir.borrow();
do_mir_borrowck(&infcx, input_mir, def_id)
});
debug!("mir_borrowck done");
opt_closure_req
}
fn do_mir_borrowck<'a, 'gcx, 'tcx>(
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
input_mir: &Mir<'gcx>,
def_id: DefId,
) -> BorrowCheckResult<'gcx> {
debug!("do_mir_borrowck(def_id = {:?})", def_id);
let tcx = infcx.tcx;
let attributes = tcx.get_attrs(def_id);
let param_env = tcx.param_env(def_id);
let id = tcx
.hir
.as_local_node_id(def_id)
.expect("do_mir_borrowck: non-local DefId");
// Replace all regions with fresh inference variables. This
// requires first making our own copy of the MIR. This copy will
// be modified (in place) to contain non-lexical lifetimes. It
// will have a lifetime tied to the inference context.
let mut mir: Mir<'tcx> = input_mir.clone();
let free_regions = nll::replace_regions_in_mir(infcx, def_id, param_env, &mut mir);
let mir = &mir; // no further changes
let location_table = &LocationTable::new(mir);
let mut errors_buffer = Vec::new();
let (move_data, move_errors): (MoveData<'tcx>, Option<Vec<(Place<'tcx>, MoveError<'tcx>)>>) =
match MoveData::gather_moves(mir, tcx) {
Ok(move_data) => (move_data, None),
Err((move_data, move_errors)) => (move_data, Some(move_errors)),
};
let mdpe = MoveDataParamEnv {
move_data: move_data,
param_env: param_env,
};
let body_id = match tcx.def_key(def_id).disambiguated_data.data {
DefPathData::StructCtor | DefPathData::EnumVariant(_) => None,
_ => Some(tcx.hir.body_owned_by(id)),
};
let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len());
let mut flow_inits = FlowAtLocation::new(do_dataflow(
tcx,
mir,
id,
&attributes,
&dead_unwinds,
MaybeInitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let locals_are_invalidated_at_exit = match tcx.hir.body_owner_kind(id) {
hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => false,
hir::BodyOwnerKind::Fn => true,
};
let borrow_set = Rc::new(BorrowSet::build(
tcx, mir, locals_are_invalidated_at_exit, &mdpe.move_data));
// If we are in non-lexical mode, compute the non-lexical lifetimes.
let (regioncx, polonius_output, opt_closure_req) = nll::compute_regions(
infcx,
def_id,
free_regions,
mir,
location_table,
param_env,
&mut flow_inits,
&mdpe.move_data,
&borrow_set,
&mut errors_buffer,
);
// The various `flow_*` structures can be large. We drop `flow_inits` here
// so it doesn't overlap with the others below. This reduces peak memory
// usage significantly on some benchmarks.
drop(flow_inits);
let regioncx = Rc::new(regioncx);
let flow_borrows = FlowAtLocation::new(do_dataflow(
tcx,
mir,
id,
&attributes,
&dead_unwinds,
Borrows::new(tcx, mir, regioncx.clone(), def_id, body_id, &borrow_set),
|rs, i| DebugFormatted::new(&rs.location(i)),
));
let flow_uninits = FlowAtLocation::new(do_dataflow(
tcx,
mir,
id,
&attributes,
&dead_unwinds,
MaybeUninitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let flow_ever_inits = FlowAtLocation::new(do_dataflow(
tcx,
mir,
id,
&attributes,
&dead_unwinds,
EverInitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().inits[i]),
));
let movable_generator = match tcx.hir.get(id) {
Node::Expr(&hir::Expr {
node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)),
..
}) => false,
_ => true,
};
let dominators = mir.dominators();
let mut mbcx = MirBorrowckCtxt {
infcx,
mir,
mir_def_id: def_id,
move_data: &mdpe.move_data,
location_table,
movable_generator,
locals_are_invalidated_at_exit,
access_place_error_reported: FxHashSet(),
reservation_error_reported: FxHashSet(),
move_error_reported: BTreeMap::new(),
uninitialized_error_reported: FxHashSet(),
errors_buffer,
nonlexical_regioncx: regioncx,
used_mut: FxHashSet(),
used_mut_upvars: SmallVec::new(),
borrow_set,
dominators,
};
let mut state = Flows::new(
flow_borrows,
flow_uninits,
flow_ever_inits,
polonius_output,
);
if let Some(errors) = move_errors {
mbcx.report_move_errors(errors);
}
mbcx.analyze_results(&mut state); // entry point for DataflowResultsConsumer
// For each non-user used mutable variable, check if it's been assigned from
// a user-declared local. If so, then put that local into the used_mut set.
// Note that this set is expected to be small - only upvars from closures
// would have a chance of erroneously adding non-user-defined mutable vars
// to the set.
let temporary_used_locals: FxHashSet<Local> = mbcx
.used_mut
.iter()
.filter(|&local| !mbcx.mir.local_decls[*local].is_user_variable.is_some())
.cloned()
.collect();
mbcx.gather_used_muts(temporary_used_locals);
debug!("mbcx.used_mut: {:?}", mbcx.used_mut);
let used_mut = mbcx.used_mut;
for local in mbcx
.mir
.mut_vars_and_args_iter()
.filter(|local| !used_mut.contains(local))
{
if let ClearCrossCrate::Set(ref vsi) = mbcx.mir.source_scope_local_data {
let local_decl = &mbcx.mir.local_decls[local];
// Skip implicit `self` argument for closures
if local.index() == 1 && tcx.is_closure(mbcx.mir_def_id) {
continue;
}
// Skip over locals that begin with an underscore or have no name
match local_decl.name {
Some(name) => if name.as_str().starts_with("_") {
continue;
},
None => continue,
}
let span = local_decl.source_info.span;
if span.compiler_desugaring_kind().is_some() {
// If the `mut` arises as part of a desugaring, we should ignore it.
continue;
}
let mut err = tcx.struct_span_lint_node(
UNUSED_MUT,
vsi[local_decl.source_info.scope].lint_root,
span,
"variable does not need to be mutable",
);
let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
err.span_suggestion_short_with_applicability(
mut_span,
"remove this `mut`",
String::new(),
Applicability::MachineApplicable);
err.buffer(&mut mbcx.errors_buffer);
}
}
// Buffer any move errors that we collected and de-duplicated.
for (_, (_, diag)) in mbcx.move_error_reported {
diag.buffer(&mut mbcx.errors_buffer);
}
if mbcx.errors_buffer.len() > 0 {
mbcx.errors_buffer.sort_by_key(|diag| diag.span.primary_span());
if tcx.migrate_borrowck() {
match tcx.borrowck(def_id).signalled_any_error {
SignalledError::NoErrorsSeen => {
// if AST-borrowck signalled no errors, then
// downgrade all the buffered MIR-borrowck errors
// to warnings.
for err in &mut mbcx.errors_buffer {
if err.is_error() {
err.level = Level::Warning;
err.warn("This error has been downgraded to a warning \
for backwards compatibility with previous releases.\n\
It represents potential unsoundness in your code.\n\
This warning will become a hard error in the future.");
}
}
}
SignalledError::SawSomeError => {
// if AST-borrowck signalled a (cancelled) error,
// then we will just emit the buffered
// MIR-borrowck errors as normal.
}
}
}
for diag in mbcx.errors_buffer.drain(..) {
DiagnosticBuilder::new_diagnostic(mbcx.infcx.tcx.sess.diagnostic(), diag).emit();
}
}
let result = BorrowCheckResult {
closure_requirements: opt_closure_req,
used_mut_upvars: mbcx.used_mut_upvars,
};
debug!("do_mir_borrowck: result = {:#?}", result);
result
}
pub struct MirBorrowckCtxt<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
mir: &'cx Mir<'tcx>,
mir_def_id: DefId,
move_data: &'cx MoveData<'tcx>,
/// Map from MIR `Location` to `LocationIndex`; created
/// when MIR borrowck begins.
location_table: &'cx LocationTable,
movable_generator: bool,
/// This keeps track of whether local variables are free-ed when the function
/// exits even without a `StorageDead`, which appears to be the case for
/// constants.
///
/// I'm not sure this is the right approach - @eddyb could you try and
/// figure this out?
locals_are_invalidated_at_exit: bool,
/// This field keeps track of when borrow errors are reported in the access_place function
/// so that there is no duplicate reporting. This field cannot also be used for the conflicting
/// borrow errors that is handled by the `reservation_error_reported` field as the inclusion
/// of the `Span` type (while required to mute some errors) stops the muting of the reservation
/// errors.
access_place_error_reported: FxHashSet<(Place<'tcx>, Span)>,
/// This field keeps track of when borrow conflict errors are reported
/// for reservations, so that we don't report seemingly duplicate
/// errors for corresponding activations
///
/// FIXME: Ideally this would be a set of BorrowIndex, not Places,
/// but it is currently inconvenient to track down the BorrowIndex
/// at the time we detect and report a reservation error.
reservation_error_reported: FxHashSet<Place<'tcx>>,
/// This field keeps track of move errors that are to be reported for given move indicies.
///
/// There are situations where many errors can be reported for a single move out (see #53807)
/// and we want only the best of those errors.
///
/// The `report_use_of_moved_or_uninitialized` function checks this map and replaces the
/// diagnostic (if there is one) if the `Place` of the error being reported is a prefix of the
/// `Place` of the previous most diagnostic. This happens instead of buffering the error. Once
/// all move errors have been reported, any diagnostics in this map are added to the buffer
/// to be emitted.
///
/// `BTreeMap` is used to preserve the order of insertions when iterating. This is necessary
/// when errors in the map are being re-added to the error buffer so that errors with the
/// same primary span come out in a consistent order.
move_error_reported: BTreeMap<Vec<MoveOutIndex>, (Place<'tcx>, DiagnosticBuilder<'cx>)>,
/// This field keeps track of errors reported in the checking of uninitialized variables,
/// so that we don't report seemingly duplicate errors.
uninitialized_error_reported: FxHashSet<Place<'tcx>>,
/// Errors to be reported buffer
errors_buffer: Vec<Diagnostic>,
/// This field keeps track of all the local variables that are declared mut and are mutated.
/// Used for the warning issued by an unused mutable local variable.
used_mut: FxHashSet<Local>,
/// If the function we're checking is a closure, then we'll need to report back the list of
/// mutable upvars that have been used. This field keeps track of them.
used_mut_upvars: SmallVec<[Field; 8]>,
/// Non-lexical region inference context, if NLL is enabled. This
/// contains the results from region inference and lets us e.g.
/// find out which CFG points are contained in each borrow region.
nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
/// The set of borrows extracted from the MIR
borrow_set: Rc<BorrowSet<'tcx>>,
/// Dominators for MIR
dominators: Dominators<BasicBlock>,
}
// Check that:
// 1. assignments are always made to mutable locations (FIXME: does that still really go here?)
// 2. loans made in overlapping scopes do not conflict
// 3. assignments do not affect things loaned out as immutable
// 4. moves do not affect things loaned out in any way
impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
type FlowState = Flows<'cx, 'gcx, 'tcx>;
fn mir(&self) -> &'cx Mir<'tcx> {
self.mir
}
fn visit_block_entry(&mut self, bb: BasicBlock, flow_state: &Self::FlowState) {
debug!("MirBorrowckCtxt::process_block({:?}): {}", bb, flow_state);
}
fn visit_statement_entry(
&mut self,
location: Location,
stmt: &Statement<'tcx>,
flow_state: &Self::FlowState,
) {
debug!(
"MirBorrowckCtxt::process_statement({:?}, {:?}): {}",
location, stmt, flow_state
);
let span = stmt.source_info.span;
self.check_activations(location, span, flow_state);
match stmt.kind {
StatementKind::Assign(ref lhs, ref rhs) => {
self.consume_rvalue(
ContextKind::AssignRhs.new(location),
(rhs, span),
location,
flow_state,
);
self.mutate_place(
ContextKind::AssignLhs.new(location),
(lhs, span),
Shallow(None),
JustWrite,
flow_state,
);
}
StatementKind::FakeRead(_, ref place) => {
// Read for match doesn't access any memory and is used to
// assert that a place is safe and live. So we don't have to
// do any checks here.
//
// FIXME: Remove check that the place is initialized. This is
// needed for now because matches don't have never patterns yet.
// So this is the only place we prevent
// let x: !;
// match x {};
// from compiling.
self.check_if_path_or_subpath_is_moved(
ContextKind::FakeRead.new(location),
InitializationRequiringAction::Use,
(place, span),
flow_state,
);
}
StatementKind::SetDiscriminant {
ref place,
variant_index: _,
} => {
self.mutate_place(
ContextKind::SetDiscrim.new(location),
(place, span),
Shallow(Some(ArtificialField::Discriminant)),
JustWrite,
flow_state,
);
}
StatementKind::InlineAsm {
ref asm,
ref outputs,
ref inputs,
} => {
let context = ContextKind::InlineAsm.new(location);
for (o, output) in asm.outputs.iter().zip(outputs.iter()) {
if o.is_indirect {
// FIXME(eddyb) indirect inline asm outputs should
// be encoeded through MIR place derefs instead.
self.access_place(
context,
(output, o.span),
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state,
);
self.check_if_path_or_subpath_is_moved(
context,
InitializationRequiringAction::Use,
(output, o.span),
flow_state,
);
} else {
self.mutate_place(
context,
(output, o.span),
if o.is_rw { Deep } else { Shallow(None) },
if o.is_rw { WriteAndRead } else { JustWrite },
flow_state,
);
}
}
for input in inputs.iter() {
self.consume_operand(context, (input, span), flow_state);
}
}
StatementKind::EndRegion(ref _rgn) => {
// ignored when consuming results (update to
// flow_state already handled).
}
StatementKind::Nop
| StatementKind::AscribeUserType(..)
| StatementKind::Validate(..)
| StatementKind::StorageLive(..) => {
// `Nop`, `AscribeUserType`, `Validate`, and `StorageLive` are irrelevant
// to borrow check.
}
StatementKind::StorageDead(local) => {
self.access_place(
ContextKind::StorageDead.new(location),
(&Place::Local(local), span),
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state,
);
}
}
}
fn visit_terminator_entry(
&mut self,
location: Location,
term: &Terminator<'tcx>,
flow_state: &Self::FlowState,
) {
let loc = location;
debug!(
"MirBorrowckCtxt::process_terminator({:?}, {:?}): {}",
location, term, flow_state
);
let span = term.source_info.span;
self.check_activations(location, span, flow_state);
match term.kind {
TerminatorKind::SwitchInt {
ref discr,
switch_ty: _,
values: _,
targets: _,
} => {
self.consume_operand(ContextKind::SwitchInt.new(loc), (discr, span), flow_state);
}
TerminatorKind::Drop {
location: ref drop_place,
target: _,
unwind: _,
} => {
let gcx = self.infcx.tcx.global_tcx();
// Compute the type with accurate region information.
let drop_place_ty = drop_place.ty(self.mir, self.infcx.tcx);
// Erase the regions.
let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty)
.to_ty(self.infcx.tcx);
// "Lift" into the gcx -- once regions are erased, this type should be in the
// global arenas; this "lift" operation basically just asserts that is true, but
// that is useful later.
let drop_place_ty = gcx.lift(&drop_place_ty).unwrap();
debug!("visit_terminator_drop \
loc: {:?} term: {:?} drop_place: {:?} drop_place_ty: {:?} span: {:?}",
loc, term, drop_place, drop_place_ty, span);
self.access_place(
ContextKind::Drop.new(loc),
(drop_place, span),
(AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state,
);
}
TerminatorKind::DropAndReplace {
location: ref drop_place,
value: ref new_value,
target: _,
unwind: _,
} => {
self.mutate_place(
ContextKind::DropAndReplace.new(loc),
(drop_place, span),
Deep,
JustWrite,
flow_state,
);
self.consume_operand(
ContextKind::DropAndReplace.new(loc),
(new_value, span),
flow_state,
);
}
TerminatorKind::Call {
ref func,
ref args,
ref destination,
cleanup: _,
from_hir_call: _,
} => {
self.consume_operand(ContextKind::CallOperator.new(loc), (func, span), flow_state);
for arg in args {
self.consume_operand(
ContextKind::CallOperand.new(loc),
(arg, span),
flow_state,
);
}
if let Some((ref dest, _ /*bb*/)) = *destination {
self.mutate_place(
ContextKind::CallDest.new(loc),
(dest, span),
Deep,
JustWrite,
flow_state,
);
}
}
TerminatorKind::Assert {
ref cond,
expected: _,
ref msg,
target: _,
cleanup: _,
} => {
self.consume_operand(ContextKind::Assert.new(loc), (cond, span), flow_state);
use rustc::mir::interpret::EvalErrorKind::BoundsCheck;
if let BoundsCheck { ref len, ref index } = *msg {
self.consume_operand(ContextKind::Assert.new(loc), (len, span), flow_state);
self.consume_operand(ContextKind::Assert.new(loc), (index, span), flow_state);
}
}
TerminatorKind::Yield {
ref value,
resume: _,
drop: _,
} => {
self.consume_operand(ContextKind::Yield.new(loc), (value, span), flow_state);
if self.movable_generator {
// Look for any active borrows to locals
let borrow_set = self.borrow_set.clone();
flow_state.with_outgoing_borrows(|borrows| {
for i in borrows {
let borrow = &borrow_set[i];
self.check_for_local_borrow(borrow, span);
}
});
}
}
TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
// Returning from the function implicitly kills storage for all locals and statics.
// Often, the storage will already have been killed by an explicit
// StorageDead, but we don't always emit those (notably on unwind paths),
// so this "extra check" serves as a kind of backup.
let borrow_set = self.borrow_set.clone();
flow_state.with_outgoing_borrows(|borrows| {
for i in borrows {
let borrow = &borrow_set[i];
let context = ContextKind::StorageDead.new(loc);
self.check_for_invalidation_at_exit(context, borrow, span);
}
});
}
TerminatorKind::Goto { target: _ }
| TerminatorKind::Abort
| TerminatorKind::Unreachable
| TerminatorKind::FalseEdges {
real_target: _,
imaginary_targets: _,
}
| TerminatorKind::FalseUnwind {
real_target: _,
unwind: _,
} => {
// no data used, thus irrelevant to borrowck
}
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum MutateMode {
JustWrite,
WriteAndRead,
}
use self::ReadOrWrite::{Activation, Read, Reservation, Write};
use self::AccessDepth::{Deep, Shallow};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ArtificialField {
Discriminant,
ArrayLength,
ShallowBorrow,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum AccessDepth {
/// From the RFC: "A *shallow* access means that the immediate
/// fields reached at P are accessed, but references or pointers
/// found within are not dereferenced. Right now, the only access
/// that is shallow is an assignment like `x = ...;`, which would
/// be a *shallow write* of `x`."
Shallow(Option<ArtificialField>),
/// From the RFC: "A *deep* access means that all data reachable
/// through the given place may be invalidated or accesses by
/// this action."
Deep,
/// Access is Deep only when there is a Drop implementation that
/// can reach the data behind the reference.
Drop,
}
/// Kind of access to a value: read or write
/// (For informational purposes only)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ReadOrWrite {
/// From the RFC: "A *read* means that the existing data may be
/// read, but will not be changed."
Read(ReadKind),
/// From the RFC: "A *write* means that the data may be mutated to
/// new values or otherwise invalidated (for example, it could be
/// de-initialized, as in a move operation).
Write(WriteKind),
/// For two-phase borrows, we distinguish a reservation (which is treated
/// like a Read) from an activation (which is treated like a write), and
/// each of those is furthermore distinguished from Reads/Writes above.
Reservation(WriteKind),
Activation(WriteKind, BorrowIndex),
}
/// Kind of read access to a value
/// (For informational purposes only)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ReadKind {
Borrow(BorrowKind),
Copy,
}
/// Kind of write access to a value
/// (For informational purposes only)
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum WriteKind {
StorageDeadOrDrop,
MutableBorrow(BorrowKind),
Mutate,
Move,
}
/// When checking permissions for a place access, this flag is used to indicate that an immutable
/// local place can be mutated.
///
/// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications:
/// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`
/// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and
/// `is_declared_mutable()`
/// - Take flow state into consideration in `is_assignable()` for local variables
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum LocalMutationIsAllowed {
Yes,
/// We want use of immutable upvars to cause a "write to immutable upvar"
/// error, not an "reassignment" error.
ExceptUpvars,
No,
}
#[derive(Copy, Clone, Debug)]
enum InitializationRequiringAction {
Update,
Borrow,
MatchOn,
Use,
Assignment,
}
struct RootPlace<'d, 'tcx: 'd> {
place: &'d Place<'tcx>,
is_local_mutation_allowed: LocalMutationIsAllowed,
}
impl InitializationRequiringAction {
fn as_noun(self) -> &'static str {
match self {
InitializationRequiringAction::Update => "update",
InitializationRequiringAction::Borrow => "borrow",
InitializationRequiringAction::MatchOn => "use", // no good noun
InitializationRequiringAction::Use => "use",
InitializationRequiringAction::Assignment => "assign",
}
}
fn as_verb_in_past_tense(self) -> &'static str {
match self {
InitializationRequiringAction::Update => "updated",
InitializationRequiringAction::Borrow => "borrowed",
InitializationRequiringAction::MatchOn => "matched on",
InitializationRequiringAction::Use => "used",
InitializationRequiringAction::Assignment => "assigned",
}
}
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
/// Checks an access to the given place to see if it is allowed. Examines the set of borrows
/// that are in scope, as well as which paths have been initialized, to ensure that (a) the
/// place is initialized and (b) it is not borrowed in some way that would prevent this
/// access.
///
/// Returns true if an error is reported, false otherwise.
fn access_place(
&mut self,
context: Context,
place_span: (&Place<'tcx>, Span),
kind: (AccessDepth, ReadOrWrite),
is_local_mutation_allowed: LocalMutationIsAllowed,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
let (sd, rw) = kind;
if let Activation(_, borrow_index) = rw {
if self.reservation_error_reported.contains(&place_span.0) {
debug!(
"skipping access_place for activation of invalid reservation \
place: {:?} borrow_index: {:?}",
place_span.0, borrow_index
);
return;
}
}
// Check is_empty() first because it's the common case, and doing that
// way we avoid the clone() call.
if !self.access_place_error_reported.is_empty() &&
self
.access_place_error_reported
.contains(&(place_span.0.clone(), place_span.1))
{
debug!(
"access_place: suppressing error place_span=`{:?}` kind=`{:?}`",
place_span, kind
);
return;
}
let mutability_error =
self.check_access_permissions(
place_span,
rw,
is_local_mutation_allowed,
flow_state,
context.loc,
);
let conflict_error =
self.check_access_for_conflict(context, place_span, sd, rw, flow_state);
if conflict_error || mutability_error {
debug!(
"access_place: logging error place_span=`{:?}` kind=`{:?}`",
place_span, kind
);
self.access_place_error_reported
.insert((place_span.0.clone(), place_span.1));
}
}
fn check_access_for_conflict(
&mut self,
context: Context,
place_span: (&Place<'tcx>, Span),
sd: AccessDepth,
rw: ReadOrWrite,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) -> bool {
debug!(
"check_access_for_conflict(context={:?}, place_span={:?}, sd={:?}, rw={:?})",
context, place_span, sd, rw,
);
let mut error_reported = false;
let tcx = self.infcx.tcx;
let mir = self.mir;
let location = self.location_table.start_index(context.loc);
let borrow_set = self.borrow_set.clone();
each_borrow_involving_path(
self,
tcx,
mir,
context,
(sd, place_span.0),
&borrow_set,
flow_state.borrows_in_scope(location),
|this, borrow_index, borrow| match (rw, borrow.kind) {
// Obviously an activation is compatible with its own
// reservation (or even prior activating uses of same
// borrow); so don't check if they interfere.
//
// NOTE: *reservations* do conflict with themselves;
// thus aren't injecting unsoundenss w/ this check.)
(Activation(_, activating), _) if activating == borrow_index => {
debug!(
"check_access_for_conflict place_span: {:?} sd: {:?} rw: {:?} \
skipping {:?} b/c activation of same borrow_index",
place_span,
sd,
rw,
(borrow_index, borrow),
);
Control::Continue
}
(Read(_), BorrowKind::Shared) | (Reservation(..), BorrowKind::Shared)
| (Read(_), BorrowKind::Shallow) | (Reservation(..), BorrowKind::Shallow) => {
Control::Continue
}
(Write(WriteKind::Move), BorrowKind::Shallow) => {
// Handled by initialization checks.
Control::Continue
}
(Read(kind), BorrowKind::Unique) | (Read(kind), BorrowKind::Mut { .. }) => {
// Reading from mere reservations of mutable-borrows is OK.
if !is_active(&this.dominators, borrow, context.loc) {
assert!(allow_two_phase_borrow(&this.infcx.tcx, borrow.kind));
return Control::Continue;
}
match kind {
ReadKind::Copy => {
error_reported = true;
this.report_use_while_mutably_borrowed(context, place_span, borrow)
}
ReadKind::Borrow(bk) => {
error_reported = true;
this.report_conflicting_borrow(context, place_span, bk, &borrow)
}
}
Control::Break
}
(Reservation(kind), BorrowKind::Unique)
| (Reservation(kind), BorrowKind::Mut { .. })
| (Activation(kind, _), _)
| (Write(kind), _) => {
match rw {
Reservation(_) => {
debug!(
"recording invalid reservation of \
place: {:?}",
place_span.0
);
this.reservation_error_reported.insert(place_span.0.clone());
}
Activation(_, activating) => {
debug!(
"observing check_place for activation of \
borrow_index: {:?}",
activating
);
}
Read(..) | Write(..) => {}
}
match kind {
WriteKind::MutableBorrow(bk) => {
error_reported = true;
this.report_conflicting_borrow(context, place_span, bk, &borrow)
}
WriteKind::StorageDeadOrDrop => {
error_reported = true;
this.report_borrowed_value_does_not_live_long_enough(
context,
borrow,
place_span,
Some(kind))
}
WriteKind::Mutate => {
error_reported = true;
this.report_illegal_mutation_of_borrowed(context, place_span, borrow)
}
WriteKind::Move => {
error_reported = true;
this.report_move_out_while_borrowed(context, place_span, &borrow)
}
}
Control::Break
}
},
);
error_reported
}
fn mutate_place(
&mut self,
context: Context,
place_span: (&Place<'tcx>, Span),
kind: AccessDepth,
mode: MutateMode,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
// Write of P[i] or *P, or WriteAndRead of any P, requires P init'd.
match mode {
MutateMode::WriteAndRead => {
self.check_if_path_or_subpath_is_moved(
context,
InitializationRequiringAction::Update,
place_span,
flow_state,
);
}
MutateMode::JustWrite => {
self.check_if_assigned_path_is_moved(context, place_span, flow_state);
}
}
// Special case: you can assign a immutable local variable
// (e.g., `x = ...`) so long as it has never been initialized
// before (at this point in the flow).
if let &Place::Local(local) = place_span.0 {
if let Mutability::Not = self.mir.local_decls[local].mutability {
// check for reassignments to immutable local variables
self.check_if_reassignment_to_immutable_state(
context,
local,
place_span,
flow_state,
);
return;
}
}
// Otherwise, use the normal access permission rules.
self.access_place(
context,
place_span,
(kind, Write(WriteKind::Mutate)),
LocalMutationIsAllowed::No,
flow_state,
);
}
fn consume_rvalue(
&mut self,
context: Context,
(rvalue, span): (&Rvalue<'tcx>, Span),
_location: Location,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
match *rvalue {
Rvalue::Ref(_ /*rgn*/, bk, ref place) => {
let access_kind = match bk {
BorrowKind::Shallow => {
(Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
},
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
BorrowKind::Unique | BorrowKind::Mut { .. } => {
let wk = WriteKind::MutableBorrow(bk);
if allow_two_phase_borrow(&self.infcx.tcx, bk) {
(Deep, Reservation(wk))
} else {
(Deep, Write(wk))
}
}
};
self.access_place(
context,
(place, span),
access_kind,
LocalMutationIsAllowed::No,
flow_state,
);
let action = if bk == BorrowKind::Shallow {
InitializationRequiringAction::MatchOn
} else {
InitializationRequiringAction::Borrow
};
self.check_if_path_or_subpath_is_moved(
context,
action,
(place, span),
flow_state,
);
}
Rvalue::Use(ref operand)
| Rvalue::Repeat(ref operand, _)
| Rvalue::UnaryOp(_ /*un_op*/, ref operand)
| Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/) => {
self.consume_operand(context, (operand, span), flow_state)
}
Rvalue::Len(ref place) | Rvalue::Discriminant(ref place) => {
let af = match *rvalue {
Rvalue::Len(..) => ArtificialField::ArrayLength,
Rvalue::Discriminant(..) => ArtificialField::Discriminant,
_ => unreachable!(),
};
self.access_place(
context,
(place, span),
(Shallow(Some(af)), Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state,
);
self.check_if_path_or_subpath_is_moved(
context,
InitializationRequiringAction::Use,
(place, span),
flow_state,
);
}
Rvalue::BinaryOp(_bin_op, ref operand1, ref operand2)
| Rvalue::CheckedBinaryOp(_bin_op, ref operand1, ref operand2) => {
self.consume_operand(context, (operand1, span), flow_state);
self.consume_operand(context, (operand2, span), flow_state);
}
Rvalue::NullaryOp(_op, _ty) => {
// nullary ops take no dynamic input; no borrowck effect.
//
// FIXME: is above actually true? Do we want to track
// the fact that uninitialized data can be created via
// `NullOp::Box`?
}
Rvalue::Aggregate(ref aggregate_kind, ref operands) => {
// We need to report back the list of mutable upvars that were
// moved into the closure and subsequently used by the closure,
// in order to populate our used_mut set.
match **aggregate_kind {
AggregateKind::Closure(def_id, _)
| AggregateKind::Generator(def_id, _, _) => {
let BorrowCheckResult {
used_mut_upvars, ..
} = self.infcx.tcx.mir_borrowck(def_id);
debug!("{:?} used_mut_upvars={:?}", def_id, used_mut_upvars);
for field in used_mut_upvars {
// This relies on the current way that by-value
// captures of a closure are copied/moved directly
// when generating MIR.
match operands[field.index()] {
Operand::Move(Place::Local(local))
| Operand::Copy(Place::Local(local)) => {
self.used_mut.insert(local);
}
Operand::Move(ref place @ Place::Projection(_))
| Operand::Copy(ref place @ Place::Projection(_)) => {
if let Some(field) = place.is_upvar_field_projection(
self.mir, &self.infcx.tcx) {
self.used_mut_upvars.push(field);
}
}
Operand::Move(Place::Static(..))
| Operand::Copy(Place::Static(..))
| Operand::Move(Place::Promoted(..))
| Operand::Copy(Place::Promoted(..))
| Operand::Constant(..) => {}
}
}
}
AggregateKind::Adt(..)
| AggregateKind::Array(..)
| AggregateKind::Tuple { .. } => (),
}
for operand in operands {
self.consume_operand(context, (operand, span), flow_state);
}
}
}
}
fn consume_operand(
&mut self,
context: Context,
(operand, span): (&Operand<'tcx>, Span),
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
match *operand {
Operand::Copy(ref place) => {
// copy of place: check if this is "copy of frozen path"
// (FIXME: see check_loans.rs)
self.access_place(
context,
(place, span),
(Deep, Read(ReadKind::Copy)),
LocalMutationIsAllowed::No,
flow_state,
);
// Finally, check if path was already moved.
self.check_if_path_or_subpath_is_moved(
context,
InitializationRequiringAction::Use,
(place, span),
flow_state,
);
}
Operand::Move(ref place) => {
// move of place: check if this is move of already borrowed path
self.access_place(
context,
(place, span),
(Deep, Write(WriteKind::Move)),
LocalMutationIsAllowed::Yes,
flow_state,
);
// Finally, check if path was already moved.
self.check_if_path_or_subpath_is_moved(
context,
InitializationRequiringAction::Use,
(place, span),
flow_state,
);
}
Operand::Constant(_) => {}
}
}
/// Checks whether a borrow of this place is invalidated when the function
/// exits
fn check_for_invalidation_at_exit(
&mut self,
context: Context,
borrow: &BorrowData<'tcx>,
span: Span,
) {
debug!("check_for_invalidation_at_exit({:?})", borrow);
let place = &borrow.borrowed_place;
let root_place = self.prefixes(place, PrefixSet::All).last().unwrap();
// FIXME(nll-rfc#40): do more precise destructor tracking here. For now
// we just know that all locals are dropped at function exit (otherwise
// we'll have a memory leak) and assume that all statics have a destructor.
//
// FIXME: allow thread-locals to borrow other thread locals?
let (might_be_alive, will_be_dropped) = match root_place {
Place::Promoted(_) => (true, false),
Place::Static(_) => {
// Thread-locals might be dropped after the function exits, but
// "true" statics will never be.
let is_thread_local = self.is_place_thread_local(&root_place);
(true, is_thread_local)
}
Place::Local(_) => {
// Locals are always dropped at function exit, and if they
// have a destructor it would've been called already.
(false, self.locals_are_invalidated_at_exit)
}
Place::Projection(..) => {
bug!("root of {:?} is a projection ({:?})?", place, root_place)
}
};
if !will_be_dropped {
debug!(
"place_is_invalidated_at_exit({:?}) - won't be dropped",
place
);
return;
}
let sd = if might_be_alive { Deep } else { Shallow(None) };
if places_conflict::borrow_conflicts_with_place(
self.infcx.tcx,
self.mir,
place,
borrow.kind,
root_place,
sd
) {
debug!("check_for_invalidation_at_exit({:?}): INVALID", place);
// FIXME: should be talking about the region lifetime instead
// of just a span here.
let span = self.infcx.tcx.sess.source_map().end_point(span);
self.report_borrowed_value_does_not_live_long_enough(
context,
borrow,
(place, span),
None,
)
}
}
/// Reports an error if this is a borrow of local data.
/// This is called for all Yield statements on movable generators
fn check_for_local_borrow(&mut self, borrow: &BorrowData<'tcx>, yield_span: Span) {
debug!("check_for_local_borrow({:?})", borrow);
if borrow_of_local_data(&borrow.borrowed_place) {
let err = self.infcx.tcx
.cannot_borrow_across_generator_yield(
self.retrieve_borrow_spans(borrow).var_or_use(),
yield_span,
Origin::Mir,
);
err.buffer(&mut self.errors_buffer);
}
}
fn check_activations(
&mut self,
location: Location,
span: Span,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
if !self.infcx.tcx.two_phase_borrows() {
return;
}
// Two-phase borrow support: For each activation that is newly
// generated at this statement, check if it interferes with
// another borrow.
let borrow_set = self.borrow_set.clone();
for &borrow_index in borrow_set.activations_at_location(location) {
let borrow = &borrow_set[borrow_index];
// only mutable borrows should be 2-phase
assert!(match borrow.kind {
BorrowKind::Shared | BorrowKind::Shallow => false,
BorrowKind::Unique | BorrowKind::Mut { .. } => true,
});
self.access_place(
ContextKind::Activation.new(location),
(&borrow.borrowed_place, span),
(
Deep,
Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index),
),
LocalMutationIsAllowed::No,
flow_state,
);
// We do not need to call `check_if_path_or_subpath_is_moved`
// again, as we already called it when we made the
// initial reservation.
}
}
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
fn check_if_reassignment_to_immutable_state(
&mut self,
context: Context,
local: Local,
place_span: (&Place<'tcx>, Span),
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
debug!("check_if_reassignment_to_immutable_state({:?})", local);
// Check if any of the initializiations of `local` have happened yet:
let mpi = self.move_data.rev_lookup.find_local(local);
let init_indices = &self.move_data.init_path_map[mpi];
let first_init_index = init_indices.iter().find(|&ii| flow_state.ever_inits.contains(*ii));
if let Some(&init_index) = first_init_index {
// And, if so, report an error.
let init = &self.move_data.inits[init_index];
let span = init.span(&self.mir);
self.report_illegal_reassignment(
context, place_span, span, place_span.0
);
}
}
fn check_if_full_path_is_moved(
&mut self,
context: Context,
desired_action: InitializationRequiringAction,
place_span: (&Place<'tcx>, Span),
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
let maybe_uninits = &flow_state.uninits;
// Bad scenarios:
//
// 1. Move of `a.b.c`, use of `a.b.c`
// 2. Move of `a.b.c`, use of `a.b.c.d` (without first reinitializing `a.b.c.d`)
// 3. Uninitialized `(a.b.c: &_)`, use of `*a.b.c`; note that with
// partial initialization support, one might have `a.x`
// initialized but not `a.b`.
//
// OK scenarios:
//
// 4. Move of `a.b.c`, use of `a.b.d`
// 5. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
// 6. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
// must have been initialized for the use to be sound.
// 7. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
// The dataflow tracks shallow prefixes distinctly (that is,
// field-accesses on P distinctly from P itself), in order to
// track substructure initialization separately from the whole
// structure.
//
// E.g., when looking at (*a.b.c).d, if the closest prefix for
// which we have a MovePath is `a.b`, then that means that the
// initialization state of `a.b` is all we need to inspect to
// know if `a.b.c` is valid (and from that we infer that the
// dereference and `.d` access is also valid, since we assume
// `a.b.c` is assigned a reference to a initialized and
// well-formed record structure.)
// Therefore, if we seek out the *closest* prefix for which we
// have a MovePath, that should capture the initialization
// state for the place scenario.
//
// This code covers scenarios 1, 2, and 3.
debug!("check_if_full_path_is_moved place: {:?}", place_span.0);
match self.move_path_closest_to(place_span.0) {
Ok(mpi) => {
if maybe_uninits.contains(mpi) {
self.report_use_of_moved_or_uninitialized(
context,
desired_action,
place_span,
mpi,
);
return; // don't bother finding other problems.
}
}
Err(NoMovePathFound::ReachedStatic) => {
// Okay: we do not build MoveData for static variables
} // Only query longest prefix with a MovePath, not further
// ancestors; dataflow recurs on children when parents
// move (to support partial (re)inits).
//
// (I.e. querying parents breaks scenario 7; but may want
// to do such a query based on partial-init feature-gate.)
}
}
fn check_if_path_or_subpath_is_moved(
&mut self,
context: Context,
desired_action: InitializationRequiringAction,
place_span: (&Place<'tcx>, Span),
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
let maybe_uninits = &flow_state.uninits;
// Bad scenarios:
//
// 1. Move of `a.b.c`, use of `a` or `a.b`
// partial initialization support, one might have `a.x`
// initialized but not `a.b`.
// 2. All bad scenarios from `check_if_full_path_is_moved`
//
// OK scenarios:
//
// 3. Move of `a.b.c`, use of `a.b.d`
// 4. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
// 5. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
// must have been initialized for the use to be sound.
// 6. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
self.check_if_full_path_is_moved(context, desired_action, place_span, flow_state);
// A move of any shallow suffix of `place` also interferes
// with an attempt to use `place`. This is scenario 3 above.
//
// (Distinct from handling of scenarios 1+2+4 above because
// `place` does not interfere with suffixes of its prefixes,
// e.g. `a.b.c` does not interfere with `a.b.d`)
//
// This code covers scenario 1.
debug!("check_if_path_or_subpath_is_moved place: {:?}", place_span.0);
if let Some(mpi) = self.move_path_for_place(place_span.0) {
if let Some(child_mpi) = maybe_uninits.has_any_child_of(mpi) {
self.report_use_of_moved_or_uninitialized(
context,
desired_action,
place_span,
child_mpi,
);
return; // don't bother finding other problems.
}
}
}
/// Currently MoveData does not store entries for all places in
/// the input MIR. For example it will currently filter out
/// places that are Copy; thus we do not track places of shared
/// reference type. This routine will walk up a place along its
/// prefixes, searching for a foundational place that *is*
/// tracked in the MoveData.
///
/// An Err result includes a tag indicated why the search failed.
/// Currently this can only occur if the place is built off of a
/// static variable, as we do not track those in the MoveData.
fn move_path_closest_to(
&mut self,
place: &Place<'tcx>,
) -> Result<MovePathIndex, NoMovePathFound> {
let mut last_prefix = place;
for prefix in self.prefixes(place, PrefixSet::All) {
if let Some(mpi) = self.move_path_for_place(prefix) {
return Ok(mpi);
}
last_prefix = prefix;
}
match *last_prefix {
Place::Local(_) => panic!("should have move path for every Local"),
Place::Projection(_) => panic!("PrefixSet::All meant don't stop for Projection"),
Place::Promoted(_) |
Place::Static(_) => return Err(NoMovePathFound::ReachedStatic),
}
}
fn move_path_for_place(&mut self, place: &Place<'tcx>) -> Option<MovePathIndex> {
// If returns None, then there is no move path corresponding
// to a direct owner of `place` (which means there is nothing
// that borrowck tracks for its analysis).
match self.move_data.rev_lookup.find(place) {
LookupResult::Parent(_) => None,
LookupResult::Exact(mpi) => Some(mpi),
}
}
fn check_if_assigned_path_is_moved(
&mut self,
context: Context,
(place, span): (&Place<'tcx>, Span),
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
debug!("check_if_assigned_path_is_moved place: {:?}", place);
// recur down place; dispatch to external checks when necessary
let mut place = place;
loop {
match *place {
Place::Promoted(_) |
Place::Local(_) | Place::Static(_) => {
// assigning to `x` does not require `x` be initialized.
break;
}
Place::Projection(ref proj) => {
let Projection { ref base, ref elem } = **proj;
match *elem {
ProjectionElem::Index(_/*operand*/) |
ProjectionElem::ConstantIndex { .. } |
// assigning to P[i] requires P to be valid.
ProjectionElem::Downcast(_/*adt_def*/, _/*variant_idx*/) =>
// assigning to (P->variant) is okay if assigning to `P` is okay
//
// FIXME: is this true even if P is a adt with a dtor?
{ }
// assigning to (*P) requires P to be initialized
ProjectionElem::Deref => {
self.check_if_full_path_is_moved(
context, InitializationRequiringAction::Use,
(base, span), flow_state);
// (base initialized; no need to
// recur further)
break;
}
ProjectionElem::Subslice { .. } => {
panic!("we don't allow assignments to subslices, context: {:?}",
context);
}
ProjectionElem::Field(..) => {
// if type of `P` has a dtor, then
// assigning to `P.f` requires `P` itself
// be already initialized
let tcx = self.infcx.tcx;
match base.ty(self.mir, tcx).to_ty(tcx).sty {
ty::Adt(def, _) if def.has_dtor(tcx) => {
self.check_if_path_or_subpath_is_moved(
context, InitializationRequiringAction::Assignment,
(base, span), flow_state);
// (base initialized; no need to
// recur further)
break;
}
_ => {}
}
}
}
place = base;
continue;
}
}
}
}
/// Check the permissions for the given place and read or write kind
///
/// Returns true if an error is reported, false otherwise.
fn check_access_permissions(
&mut self,
(place, span): (&Place<'tcx>, Span),
kind: ReadOrWrite,
is_local_mutation_allowed: LocalMutationIsAllowed,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
location: Location,
) -> bool {
debug!(
"check_access_permissions({:?}, {:?}, {:?})",
place, kind, is_local_mutation_allowed
);
let error_access;
let the_place_err;
match kind {
Reservation(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Unique))
| Reservation(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Mut { .. }))
| Write(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Unique))
| Write(WriteKind::MutableBorrow(borrow_kind @ BorrowKind::Mut { .. })) => {
let is_local_mutation_allowed = match borrow_kind {
BorrowKind::Unique => LocalMutationIsAllowed::Yes,
BorrowKind::Mut { .. } => is_local_mutation_allowed,
BorrowKind::Shared | BorrowKind::Shallow => unreachable!(),
};
match self.is_mutable(place, is_local_mutation_allowed) {
Ok(root_place) => {
self.add_used_mut(root_place, flow_state);
return false;
}
Err(place_err) => {
error_access = AccessKind::MutableBorrow;
the_place_err = place_err;
}
}
}
Reservation(WriteKind::Mutate) | Write(WriteKind::Mutate) => {
match self.is_mutable(place, is_local_mutation_allowed) {
Ok(root_place) => {
self.add_used_mut(root_place, flow_state);
return false;
}
Err(place_err) => {
error_access = AccessKind::Mutate;
the_place_err = place_err;
}
}
}
Reservation(wk @ WriteKind::Move)
| Write(wk @ WriteKind::Move)
| Reservation(wk @ WriteKind::StorageDeadOrDrop)
| Reservation(wk @ WriteKind::MutableBorrow(BorrowKind::Shared))
| Reservation(wk @ WriteKind::MutableBorrow(BorrowKind::Shallow))
| Write(wk @ WriteKind::StorageDeadOrDrop)
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shared))
| Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shallow)) => {
if let Err(_place_err) = self.is_mutable(place, is_local_mutation_allowed) {
if self.infcx.tcx.migrate_borrowck() {
// rust-lang/rust#46908: In pure NLL mode this
// code path should be unreachable (and thus
// we signal an ICE in the else branch
// here). But we can legitimately get here
// under borrowck=migrate mode, so instead of
// ICE'ing we instead report a legitimate
// error (which will then be downgraded to a
// warning by the migrate machinery).
error_access = match wk {
WriteKind::MutableBorrow(_) => AccessKind::MutableBorrow,
WriteKind::Move => AccessKind::Move,
WriteKind::StorageDeadOrDrop |
WriteKind::Mutate => AccessKind::Mutate,
};
self.report_mutability_error(
place,
span,
_place_err,
error_access,
location,
);
} else {
self.infcx.tcx.sess.delay_span_bug(
span,
&format!(
"Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
place, kind
),
);
}
}
return false;
}
Activation(..) => {
// permission checks are done at Reservation point.
return false;
}
Read(ReadKind::Borrow(BorrowKind::Unique))
| Read(ReadKind::Borrow(BorrowKind::Mut { .. }))
| Read(ReadKind::Borrow(BorrowKind::Shared))
| Read(ReadKind::Borrow(BorrowKind::Shallow))
| Read(ReadKind::Copy) => {
// Access authorized
return false;
}
}
// at this point, we have set up the error reporting state.
self.report_mutability_error(
place,
span,
the_place_err,
error_access,
location,
);
return true;
}
/// Adds the place into the used mutable variables set
fn add_used_mut<'d>(
&mut self,
root_place: RootPlace<'d, 'tcx>,
flow_state: &Flows<'cx, 'gcx, 'tcx>,
) {
match root_place {
RootPlace {
place: Place::Local(local),
is_local_mutation_allowed,
} => {
if is_local_mutation_allowed != LocalMutationIsAllowed::Yes {
// If the local may be initialized, and it is now currently being
// mutated, then it is justified to be annotated with the `mut`
// keyword, since the mutation may be a possible reassignment.
let mpi = self.move_data.rev_lookup.find_local(*local);
let ii = &self.move_data.init_path_map[mpi];
for &index in ii {
if flow_state.ever_inits.contains(index) {
self.used_mut.insert(*local);
break;
}
}
}
}
RootPlace {
place: _,
is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
} => {}
RootPlace {
place: place @ Place::Projection(_),
is_local_mutation_allowed: _,
} => {
if let Some(field) = place.is_upvar_field_projection(self.mir, &self.infcx.tcx) {
self.used_mut_upvars.push(field);
}
}
RootPlace {
place: Place::Promoted(..),
is_local_mutation_allowed: _,
} => {}
RootPlace {
place: Place::Static(..),
is_local_mutation_allowed: _,
} => {}
}
}
/// Whether this value be written or borrowed mutably.
/// Returns the root place if the place passed in is a projection.
fn is_mutable<'d>(
&self,
place: &'d Place<'tcx>,
is_local_mutation_allowed: LocalMutationIsAllowed,
) -> Result<RootPlace<'d, 'tcx>, &'d Place<'tcx>> {
match *place {
Place::Local(local) => {
let local = &self.mir.local_decls[local];
match local.mutability {
Mutability::Not => match is_local_mutation_allowed {
LocalMutationIsAllowed::Yes => Ok(RootPlace {
place,
is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
}),
LocalMutationIsAllowed::ExceptUpvars => Ok(RootPlace {
place,
is_local_mutation_allowed: LocalMutationIsAllowed::ExceptUpvars,
}),
LocalMutationIsAllowed::No => Err(place),
},
Mutability::Mut => Ok(RootPlace {
place,
is_local_mutation_allowed,
}),
}
}
// The rules for promotion are made by `qualify_consts`, there wouldn't even be a
// `Place::Promoted` if the promotion weren't 100% legal. So we just forward this
Place::Promoted(_) => Ok(RootPlace {
place,
is_local_mutation_allowed,
}),
Place::Static(ref static_) => {
if self.infcx.tcx.is_static(static_.def_id) != Some(hir::Mutability::MutMutable) {
Err(place)
} else {
Ok(RootPlace {
place,
is_local_mutation_allowed,
})
}
}
Place::Projection(ref proj) => {
match proj.elem {
ProjectionElem::Deref => {
let base_ty = proj.base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx);
// Check the kind of deref to decide
match base_ty.sty {
ty::Ref(_, _, mutbl) => {
match mutbl {
// Shared borrowed data is never mutable
hir::MutImmutable => Err(place),
// Mutably borrowed data is mutable, but only if we have a
// unique path to the `&mut`
hir::MutMutable => {
let mode = match place.is_upvar_field_projection(
self.mir, &self.infcx.tcx)
{
Some(field)
if {
self.mir.upvar_decls[field.index()].by_ref
} =>
{
is_local_mutation_allowed
}
_ => LocalMutationIsAllowed::Yes,
};
self.is_mutable(&proj.base, mode)
}
}
}
ty::RawPtr(tnm) => {
match tnm.mutbl {
// `*const` raw pointers are not mutable
hir::MutImmutable => return Err(place),
// `*mut` raw pointers are always mutable, regardless of
// context. The users have to check by themselves.
hir::MutMutable => {
return Ok(RootPlace {
place,
is_local_mutation_allowed,
});
}
}
}
// `Box<T>` owns its content, so mutable if its location is mutable
_ if base_ty.is_box() => {
self.is_mutable(&proj.base, is_local_mutation_allowed)
}
// Deref should only be for reference, pointers or boxes
_ => bug!("Deref of unexpected type: {:?}", base_ty),
}
}
// All other projections are owned by their base path, so mutable if
// base path is mutable
ProjectionElem::Field(..)
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..) => {
let upvar_field_projection = place.is_upvar_field_projection(
self.mir, &self.infcx.tcx);
if let Some(field) = upvar_field_projection {
let decl = &self.mir.upvar_decls[field.index()];
debug!(
"decl.mutability={:?} local_mutation_is_allowed={:?} place={:?}",
decl, is_local_mutation_allowed, place
);
match (decl.mutability, is_local_mutation_allowed) {
(Mutability::Not, LocalMutationIsAllowed::No)
| (Mutability::Not, LocalMutationIsAllowed::ExceptUpvars) => {
Err(place)
}
(Mutability::Not, LocalMutationIsAllowed::Yes)
| (Mutability::Mut, _) => {
// Subtle: this is an upvar
// reference, so it looks like
// `self.foo` -- we want to double
// check that the context `*self`
// is mutable (i.e., this is not a
// `Fn` closure). But if that
// check succeeds, we want to
// *blame* the mutability on
// `place` (that is,
// `self.foo`). This is used to
// propagate the info about
// whether mutability declarations
// are used outwards, so that we register
// the outer variable as mutable. Otherwise a
// test like this fails to record the `mut`
// as needed:
//
// ```
// fn foo<F: FnOnce()>(_f: F) { }
// fn main() {
// let var = Vec::new();
// foo(move || {
// var.push(1);
// });
// }
// ```
let _ = self.is_mutable(&proj.base, is_local_mutation_allowed)?;
Ok(RootPlace {
place,
is_local_mutation_allowed,
})
}
}
} else {
self.is_mutable(&proj.base, is_local_mutation_allowed)
}
}
}
}
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum NoMovePathFound {
ReachedStatic,
}
/// The degree of overlap between 2 places for borrow-checking.
enum Overlap {
/// The places might partially overlap - in this case, we give
/// up and say that they might conflict. This occurs when
/// different fields of a union are borrowed. For example,
/// if `u` is a union, we have no way of telling how disjoint
/// `u.a.x` and `a.b.y` are.
Arbitrary,
/// The places have the same type, and are either completely disjoint
/// or equal - i.e. they can't "partially" overlap as can occur with
/// unions. This is the "base case" on which we recur for extensions
/// of the place.
EqualOrDisjoint,
/// The places are disjoint, so we know all extensions of them
/// will also be disjoint.
Disjoint,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Context {
kind: ContextKind,
loc: Location,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ContextKind {
Activation,
AssignLhs,
AssignRhs,
SetDiscrim,
InlineAsm,
SwitchInt,
Drop,
DropAndReplace,
CallOperator,
CallOperand,
CallDest,
Assert,
Yield,
FakeRead,
StorageDead,
}
impl ContextKind {
fn new(self, loc: Location) -> Context {
Context {
kind: self,
loc: loc,
}
}
}
| 39.661353 | 100 | 0.502917 |
5b47b78a1840b82a0ffb72011f33de8d7b87050c | 2,898 | use ic_metrics::{buckets::decimal_buckets, MetricsRegistry, Timer};
use prometheus::HistogramVec;
const LABEL_STATUS: &str = "status";
const LABEL_TYPE: &str = "type";
const METRIC_BUILD_PAYLOAD_DURATION: &str = "bitcoin_builder_build_payload_duration_seconds";
const METRIC_VALIDATE_PAYLOAD_DURATION: &str = "bitcoin_builder_validate_payload_duration_seconds";
const METRIC_ADAPTER_REQUEST_DURATION: &str = "bitcoin_builder_adapter_request_duration_seconds";
pub struct BitcoinPayloadBuilderMetrics {
// Records the time it took to build the payload, by status.
build_payload_duration: HistogramVec,
// Records the time it took to validate a payload, by status.
validate_payload_duration: HistogramVec,
// Records the time it took to send a request to the Bitcoin
// Adapter and receive the response, by status and type.
adapter_request_duration: HistogramVec,
}
impl BitcoinPayloadBuilderMetrics {
pub fn new(metrics_registry: &MetricsRegistry) -> Self {
Self {
build_payload_duration: metrics_registry.histogram_vec(
METRIC_BUILD_PAYLOAD_DURATION,
"The time it took to build the payload, by status.",
// 0.1ms - 5s
decimal_buckets(-4, 0),
&[LABEL_STATUS],
),
validate_payload_duration: metrics_registry.histogram_vec(
METRIC_VALIDATE_PAYLOAD_DURATION,
"The time it took to validate a payload, by status.",
// 0.1ms - 5s
decimal_buckets(-4, 0),
&[LABEL_STATUS],
),
adapter_request_duration: metrics_registry.histogram_vec(
METRIC_ADAPTER_REQUEST_DURATION,
"The time it took to send a request to the Bitcoin Adapter and receive the response, by status and type.",
// 1μs - 5s
decimal_buckets(-6, 0),
&[LABEL_STATUS, LABEL_TYPE],
),
}
}
// Records the status and duration of a `get_self_validating_payload()` call.
pub fn observe_build_duration(&self, status: &str, timer: Timer) {
self.build_payload_duration
.with_label_values(&[status])
.observe(timer.elapsed());
}
// Records the status and duration of a `validate_self_validating_payload()` call.
pub fn observe_validate_duration(&self, status: &str, timer: Timer) {
self.validate_payload_duration
.with_label_values(&[status])
.observe(timer.elapsed());
}
// Records the status, type and duration of a request made to the BitcoinAdapter.
pub fn observe_adapter_request_duration(&self, status: &str, request_type: &str, timer: Timer) {
self.adapter_request_duration
.with_label_values(&[status, request_type])
.observe(timer.elapsed());
}
}
| 42.617647 | 122 | 0.65666 |
e4237df592394f9dd5370250dcaa9f63527a967d | 19,239 | //! The implementation of the query system itself. This defines the macros that
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
use crate::dep_graph::DepGraph;
use crate::ty::query::Query;
use crate::ty::tls::{self, ImplicitCtxt};
use crate::ty::{self, TyCtxt};
use rustc_query_system::query::QueryContext;
use rustc_query_system::query::{CycleError, QueryJobId, QueryJobInfo};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
use rustc_span::def_id::DefId;
use rustc_span::Span;
impl QueryContext for TyCtxt<'tcx> {
type Query = Query<'tcx>;
fn incremental_verify_ich(&self) -> bool {
self.sess.opts.debugging_opts.incremental_verify_ich
}
fn verbose(&self) -> bool {
self.sess.verbose()
}
fn def_path_str(&self, def_id: DefId) -> String {
TyCtxt::def_path_str(*self, def_id)
}
fn dep_graph(&self) -> &DepGraph {
&self.dep_graph
}
fn current_query_job(&self) -> Option<QueryJobId<Self::DepKind>> {
tls::with_related_context(*self, |icx| icx.query)
}
fn try_collect_active_jobs(
&self,
) -> Option<FxHashMap<QueryJobId<Self::DepKind>, QueryJobInfo<Self>>> {
self.queries.try_collect_active_jobs()
}
/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
#[inline(always)]
fn start_query<R>(
&self,
token: QueryJobId<Self::DepKind>,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: impl FnOnce(Self) -> R,
) -> R {
// The `TyCtxt` stored in TLS has the same global interner lifetime
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`.
tls::with_related_context(*self, move |current_icx| {
// Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt {
tcx: *self,
query: Some(token),
diagnostics,
layout_depth: current_icx.layout_depth,
task_deps: current_icx.task_deps,
};
// Use the `ImplicitCtxt` while we execute the query.
tls::enter_context(&new_icx, |_| compute(*self))
})
}
}
impl<'tcx> TyCtxt<'tcx> {
#[inline(never)]
#[cold]
pub(super) fn report_cycle(
self,
CycleError { usage, cycle: stack }: CycleError<Query<'tcx>>,
) -> DiagnosticBuilder<'tcx> {
assert!(!stack.is_empty());
let fix_span = |span: Span, query: &Query<'tcx>| {
self.sess.source_map().guess_head_span(query.default_span(self, span))
};
// Disable naming impls with types in this path, since that
// sometimes cycles itself, leading to extra cycle errors.
// (And cycle errors around impls tend to occur during the
// collect/coherence phases anyhow.)
ty::print::with_forced_impl_filename_line(|| {
let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
let mut err = struct_span_err!(
self.sess,
span,
E0391,
"cycle detected when {}",
stack[0].query.describe(self)
);
for i in 1..stack.len() {
let query = &stack[i].query;
let span = fix_span(stack[(i + 1) % stack.len()].span, query);
err.span_note(span, &format!("...which requires {}...", query.describe(self)));
}
err.note(&format!(
"...which again requires {}, completing the cycle",
stack[0].query.describe(self)
));
if let Some((span, query)) = usage {
err.span_note(
fix_span(span, &query),
&format!("cycle used when {}", query.describe(self)),
);
}
err
})
}
pub fn try_print_query_stack(handler: &Handler) {
eprintln!("query stack during panic:");
// Be careful reyling on global state here: this code is called from
// a panic hook, which means that the global `Handler` may be in a weird
// state if it was responsible for triggering the panic.
ty::tls::with_context_opt(|icx| {
if let Some(icx) = icx {
let query_map = icx.tcx.queries.try_collect_active_jobs();
let mut current_query = icx.query;
let mut i = 0;
while let Some(query) = current_query {
let query_info =
if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
info
} else {
break;
};
let mut diag = Diagnostic::new(
Level::FailureNote,
&format!(
"#{} [{}] {}",
i,
query_info.info.query.name(),
query_info.info.query.describe(icx.tcx)
),
);
diag.span =
icx.tcx.sess.source_map().guess_head_span(query_info.info.span).into();
handler.force_print_diagnostic(diag);
current_query = query_info.job.parent;
i += 1;
}
}
});
eprintln!("end of query stack");
}
}
macro_rules! handle_cycle_error {
([][$tcx: expr, $error:expr]) => {{
$tcx.report_cycle($error).emit();
Value::from_cycle_error($tcx)
}};
([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
$tcx.report_cycle($error).emit();
$tcx.sess.abort_if_errors();
unreachable!()
}};
([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
$tcx.report_cycle($error).delay_as_bug();
Value::from_cycle_error($tcx)
}};
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
handle_cycle_error!([$($($modifiers)*)*][$($args)*])
};
}
macro_rules! is_anon {
([]) => {{
false
}};
([anon $($rest:tt)*]) => {{
true
}};
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
is_anon!([$($($modifiers)*)*])
};
}
macro_rules! is_eval_always {
([]) => {{
false
}};
([eval_always $($rest:tt)*]) => {{
true
}};
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
is_eval_always!([$($($modifiers)*)*])
};
}
macro_rules! query_storage {
([][$K:ty, $V:ty]) => {
<<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache
};
([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
<$ty as CacheSelector<$K, $V>>::Cache
};
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
query_storage!([$($($modifiers)*)*][$($args)*])
};
}
macro_rules! hash_result {
([][$hcx:expr, $result:expr]) => {{
dep_graph::hash_result($hcx, &$result)
}};
([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
None
}};
([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
hash_result!([$($($modifiers)*)*][$($args)*])
};
}
macro_rules! define_queries {
(<$tcx:tt> $($category:tt {
$($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($($K:tt)*) -> $V:ty,)*
},)*) => {
define_queries_inner! { <$tcx>
$($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($($K)*) -> $V,)*)*
}
}
}
macro_rules! query_helper_param_ty {
(DefId) => { impl IntoQueryParam<DefId> };
($K:ty) => { $K };
}
macro_rules! define_queries_inner {
(<$tcx:tt>
$($(#[$attr:meta])* category<$category:tt>
[$($modifiers:tt)*] fn $name:ident: $node:ident($($K:tt)*) -> $V:ty,)*) => {
use std::mem;
use crate::{
rustc_data_structures::stable_hasher::HashStable,
rustc_data_structures::stable_hasher::StableHasher,
ich::StableHashingContext
};
use rustc_data_structures::profiling::ProfileCategory;
define_queries_struct! {
tcx: $tcx,
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
}
#[allow(nonstandard_style)]
#[derive(Clone, Debug)]
pub enum Query<$tcx> {
$($(#[$attr])* $name($($K)*)),*
}
impl<$tcx> Query<$tcx> {
pub fn name(&self) -> &'static str {
match *self {
$(Query::$name(_) => stringify!($name),)*
}
}
pub fn describe(&self, tcx: TyCtxt<$tcx>) -> Cow<'static, str> {
let (r, name) = match *self {
$(Query::$name(key) => {
(queries::$name::describe(tcx, key), stringify!($name))
})*
};
if tcx.sess.verbose() {
format!("{} [{}]", r, name).into()
} else {
r
}
}
// FIXME(eddyb) Get more valid `Span`s on queries.
pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
if !span.is_dummy() {
return span;
}
// The `def_span` query is used to calculate `default_span`,
// so exit to avoid infinite recursion.
if let Query::def_span(..) = *self {
return span
}
match *self {
$(Query::$name(key) => key.default_span(tcx),)*
}
}
}
impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
$(Query::$name(key) => key.hash_stable(hcx, hasher),)*
}
}
}
pub mod queries {
use std::marker::PhantomData;
$(#[allow(nonstandard_style)]
pub struct $name<$tcx> {
data: PhantomData<&$tcx ()>
})*
}
$(impl<$tcx> QueryConfig<TyCtxt<$tcx>> for queries::$name<$tcx> {
type Key = $($K)*;
type Value = $V;
type Stored = <
query_storage!([$($modifiers)*][$($K)*, $V])
as QueryStorage
>::Stored;
const NAME: &'static str = stringify!($name);
const CATEGORY: ProfileCategory = $category;
}
impl<$tcx> QueryAccessors<TyCtxt<$tcx>> for queries::$name<$tcx> {
const ANON: bool = is_anon!([$($modifiers)*]);
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
type Cache = query_storage!([$($modifiers)*][$($K)*, $V]);
#[inline(always)]
fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<TyCtxt<$tcx>, Self::Cache> {
&tcx.queries.$name
}
#[allow(unused)]
#[inline(always)]
fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode {
DepConstructor::$node(tcx, *key)
}
#[inline]
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
let provider = tcx.queries.providers.get(key.query_crate())
// HACK(eddyb) it's possible crates may be loaded after
// the query engine is created, and because crate loading
// is not yet integrated with the query engine, such crates
// would be missing appropriate entries in `providers`.
.unwrap_or(&tcx.queries.fallback_extern_providers)
.$name;
provider(tcx, key)
}
fn hash_result(
_hcx: &mut StableHashingContext<'_>,
_result: &Self::Value
) -> Option<Fingerprint> {
hash_result!([$($modifiers)*][_hcx, _result])
}
fn handle_cycle_error(
tcx: TyCtxt<'tcx>,
error: CycleError<Query<'tcx>>
) -> Self::Value {
handle_cycle_error!([$($modifiers)*][tcx, error])
}
})*
#[derive(Copy, Clone)]
pub struct TyCtxtEnsure<'tcx> {
pub tcx: TyCtxt<'tcx>,
}
impl TyCtxtEnsure<$tcx> {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
ensure_query::<queries::$name<'_>, _>(self.tcx, key.into_query_param())
})*
}
#[derive(Copy, Clone)]
pub struct TyCtxtAt<'tcx> {
pub tcx: TyCtxt<'tcx>,
pub span: Span,
}
impl Deref for TyCtxtAt<'tcx> {
type Target = TyCtxt<'tcx>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.tcx
}
}
impl TyCtxt<$tcx> {
/// Returns a transparent wrapper for `TyCtxt`, which ensures queries
/// are executed instead of just returning their results.
#[inline(always)]
pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
TyCtxtEnsure {
tcx: self,
}
}
/// Returns a transparent wrapper for `TyCtxt` which uses
/// `span` as the location of queries performed through it.
#[inline(always)]
pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
TyCtxtAt {
tcx: self,
span
}
}
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
{
self.at(DUMMY_SP).$name(key.into_query_param())
})*
/// All self-profiling events generated by the query engine use
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
pub fn alloc_self_profile_query_strings(self) {
use crate::ty::query::profiling_support::{
alloc_self_profile_query_strings_for_query_cache,
QueryKeyStringCache,
};
if !self.prof.enabled() {
return;
}
let mut string_cache = QueryKeyStringCache::new();
$({
alloc_self_profile_query_strings_for_query_cache(
self,
stringify!($name),
&self.queries.$name,
&mut string_cache,
);
})*
}
}
impl TyCtxtAt<$tcx> {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*))
-> <queries::$name<$tcx> as QueryConfig<TyCtxt<$tcx>>>::Stored
{
get_query::<queries::$name<'_>, _>(self.tcx, self.span, key.into_query_param())
})*
}
define_provider_struct! {
tcx: $tcx,
input: ($(([$($modifiers)*] [$name] [$($K)*] [$V]))*)
}
impl<$tcx> Copy for Providers<$tcx> {}
impl<$tcx> Clone for Providers<$tcx> {
fn clone(&self) -> Self { *self }
}
}
}
macro_rules! define_queries_struct {
(tcx: $tcx:tt,
input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
pub struct Queries<$tcx> {
/// This provides access to the incrimental comilation on-disk cache for query results.
/// Do not access this directly. It is only meant to be used by
/// `DepGraph::try_mark_green()` and the query infrastructure.
pub(crate) on_disk_cache: OnDiskCache<'tcx>,
providers: IndexVec<CrateNum, Providers<$tcx>>,
fallback_extern_providers: Box<Providers<$tcx>>,
$($(#[$attr])* $name: QueryState<
TyCtxt<$tcx>,
<queries::$name<$tcx> as QueryAccessors<TyCtxt<'tcx>>>::Cache,
>,)*
}
impl<$tcx> Queries<$tcx> {
pub(crate) fn new(
providers: IndexVec<CrateNum, Providers<$tcx>>,
fallback_extern_providers: Providers<$tcx>,
on_disk_cache: OnDiskCache<'tcx>,
) -> Self {
Queries {
providers,
fallback_extern_providers: Box::new(fallback_extern_providers),
on_disk_cache,
$($name: Default::default()),*
}
}
pub(crate) fn try_collect_active_jobs(
&self
) -> Option<FxHashMap<QueryJobId<crate::dep_graph::DepKind>, QueryJobInfo<TyCtxt<'tcx>>>> {
let mut jobs = FxHashMap::default();
$(
self.$name.try_collect_active_jobs(
<queries::$name<'tcx> as QueryAccessors<TyCtxt<'tcx>>>::DEP_KIND,
Query::$name,
&mut jobs,
)?;
)*
Some(jobs)
}
}
};
}
macro_rules! define_provider_struct {
(tcx: $tcx:tt,
input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
pub struct Providers<$tcx> {
$(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)*
}
impl<$tcx> Default for Providers<$tcx> {
fn default() -> Self {
$(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
bug!("`tcx.{}({:?})` unsupported by its crate",
stringify!($name), key);
})*
Providers { $($name),* }
}
}
};
}
| 34.355357 | 103 | 0.485212 |
91db9302f6414dbc0549f54bb6b2457c5f6ea26e | 46,974 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[doc(hidden)];
// Support code for rustc's built in test runner generator. Currently,
// none of this is meant for users. It is intended to support the
// simplest interface possible for representing and running tests
// while providing a base that other test frameworks may build off of.
use getopts;
use getopts::groups;
use json::ToJson;
use json;
use serialize::Decodable;
use sort;
use stats::Stats;
use stats;
use term;
use time::precise_time_ns;
use treemap::TreeMap;
use std::clone::Clone;
use std::comm::{stream, SharedChan, GenericPort, GenericChan};
use std::libc;
use std::either;
use std::io;
use std::result;
use std::task;
use std::to_str::ToStr;
use std::f64;
use std::os;
use std::uint;
// The name of a test. By convention this follows the rules for rust
// paths; i.e. it should be a series of identifiers separated by double
// colons. This way if some test runner wants to arrange the tests
// hierarchically it may.
#[deriving(Clone)]
pub enum TestName {
StaticTestName(&'static str),
DynTestName(~str)
}
impl ToStr for TestName {
fn to_str(&self) -> ~str {
match (*self).clone() {
StaticTestName(s) => s.to_str(),
DynTestName(s) => s.to_str()
}
}
}
// A function that runs a test. If the function returns successfully,
// the test succeeds; if the function fails then the test fails. We
// may need to come up with a more clever definition of test in order
// to support isolation of tests into tasks.
pub enum TestFn {
StaticTestFn(extern fn()),
StaticBenchFn(extern fn(&mut BenchHarness)),
StaticMetricFn(~fn(&mut MetricMap)),
DynTestFn(~fn()),
DynMetricFn(~fn(&mut MetricMap)),
DynBenchFn(~fn(&mut BenchHarness))
}
// Structure passed to BenchFns
pub struct BenchHarness {
iterations: u64,
ns_start: u64,
ns_end: u64,
bytes: u64
}
// The definition of a single test. A test runner will run a list of
// these.
#[deriving(Clone)]
pub struct TestDesc {
name: TestName,
ignore: bool,
should_fail: bool
}
pub struct TestDescAndFn {
desc: TestDesc,
testfn: TestFn,
}
#[deriving(Clone, Encodable, Decodable, Eq)]
pub struct Metric {
value: f64,
noise: f64
}
#[deriving(Eq)]
pub struct MetricMap(TreeMap<~str,Metric>);
impl Clone for MetricMap {
fn clone(&self) -> MetricMap {
MetricMap((**self).clone())
}
}
/// Analysis of a single change in metric
#[deriving(Eq)]
pub enum MetricChange {
LikelyNoise,
MetricAdded,
MetricRemoved,
Improvement(f64),
Regression(f64)
}
pub type MetricDiff = TreeMap<~str,MetricChange>;
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
let opts =
match parse_opts(args) {
either::Left(o) => o,
either::Right(m) => fail!(m)
};
if !run_tests_console(&opts, tests) { fail!("Some tests failed"); }
}
// A variant optimized for invocation with a static test vector.
// This will fail (intentionally) when fed any dynamic tests, because
// it is copying the static values out into a dynamic vector and cannot
// copy dynamic values. It is doing this because from this point on
// a ~[TestDescAndFn] is used in order to effect ownership-transfer
// semantics into parallel test runners, which in turn requires a ~[]
// rather than a &[].
pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
let owned_tests = do tests.map |t| {
match t.testfn {
StaticTestFn(f) =>
TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
StaticBenchFn(f) =>
TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
_ => {
fail!("non-static tests passed to test::test_main_static");
}
}
};
test_main(args, owned_tests)
}
pub struct TestOpts {
filter: Option<~str>,
run_ignored: bool,
run_tests: bool,
run_benchmarks: bool,
ratchet_metrics: Option<Path>,
ratchet_noise_percent: Option<f64>,
save_metrics: Option<Path>,
test_shard: Option<(uint,uint)>,
logfile: Option<Path>
}
type OptRes = Either<TestOpts, ~str>;
fn optgroups() -> ~[getopts::groups::OptGroup] {
~[groups::optflag("", "ignored", "Run ignored tests"),
groups::optflag("", "test", "Run tests and not benchmarks"),
groups::optflag("", "bench", "Run benchmarks instead of tests"),
groups::optflag("h", "help", "Display this message (longer with --help)"),
groups::optopt("", "save-metrics", "Location to save bench metrics",
"PATH"),
groups::optopt("", "ratchet-metrics",
"Location to load and save metrics from. The metrics \
loaded are cause benchmarks to fail if they run too \
slowly", "PATH"),
groups::optopt("", "ratchet-noise-percent",
"Tests within N% of the recorded metrics will be \
considered as passing", "PERCENTAGE"),
groups::optopt("", "logfile", "Write logs to the specified file instead \
of stdout", "PATH"),
groups::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
"A.B")]
}
fn usage(binary: &str, helpstr: &str) -> ! {
#[fixed_stack_segment]; #[inline(never)];
let message = fmt!("Usage: %s [OPTIONS] [FILTER]", binary);
println(groups::usage(message, optgroups()));
println("");
if helpstr == "help" {
println("\
The FILTER is matched against the name of all tests to run, and if any tests
have a substring match, only those tests are run.
By default, all tests are run in parallel. This can be altered with the
RUST_TEST_TASKS environment variable when running tests (set it to 1).
Test Attributes:
#[test] - Indicates a function is a test to be run. This function
takes no arguments.
#[bench] - Indicates a function is a benchmark to be run. This
function takes one argument (extra::test::BenchHarness).
#[should_fail] - This function (also labeled with #[test]) will only pass if
the code causes a failure (an assertion failure or fail!)
#[ignore] - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during
normal test runs. Running with --ignored will run these
tests. This may also be written as #[ignore(cfg(...))] to
ignore the test on certain configurations.");
}
unsafe { libc::exit(0) }
}
// Parses command line arguments into test options
pub fn parse_opts(args: &[~str]) -> OptRes {
let args_ = args.tail();
let matches =
match groups::getopts(args_, optgroups()) {
Ok(m) => m,
Err(f) => return either::Right(getopts::fail_str(f))
};
if getopts::opt_present(&matches, "h") { usage(args[0], "h"); }
if getopts::opt_present(&matches, "help") { usage(args[0], "help"); }
let filter =
if matches.free.len() > 0 {
Some((matches).free[0].clone())
} else {
None
};
let run_ignored = getopts::opt_present(&matches, "ignored");
let logfile = getopts::opt_maybe_str(&matches, "logfile");
let logfile = logfile.map_move(|s| Path(s));
let run_benchmarks = getopts::opt_present(&matches, "bench");
let run_tests = ! run_benchmarks ||
getopts::opt_present(&matches, "test");
let ratchet_metrics = getopts::opt_maybe_str(&matches, "ratchet-metrics");
let ratchet_metrics = ratchet_metrics.map_move(|s| Path(s));
let ratchet_noise_percent = getopts::opt_maybe_str(&matches, "ratchet-noise-percent");
let ratchet_noise_percent = ratchet_noise_percent.map_move(|s| f64::from_str(s).unwrap());
let save_metrics = getopts::opt_maybe_str(&matches, "save-metrics");
let save_metrics = save_metrics.map_move(|s| Path(s));
let test_shard = getopts::opt_maybe_str(&matches, "test-shard");
let test_shard = opt_shard(test_shard);
let test_opts = TestOpts {
filter: filter,
run_ignored: run_ignored,
run_tests: run_tests,
run_benchmarks: run_benchmarks,
ratchet_metrics: ratchet_metrics,
ratchet_noise_percent: ratchet_noise_percent,
save_metrics: save_metrics,
test_shard: test_shard,
logfile: logfile
};
either::Left(test_opts)
}
pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
match maybestr {
None => None,
Some(s) => {
match s.split_iter('.').to_owned_vec() {
[a, b] => match (uint::from_str(a), uint::from_str(b)) {
(Some(a), Some(b)) => Some((a,b)),
_ => None
},
_ => None
}
}
}
}
#[deriving(Clone, Eq)]
pub struct BenchSamples {
ns_iter_summ: stats::Summary,
mb_s: uint
}
#[deriving(Clone, Eq)]
pub enum TestResult {
TrOk,
TrFailed,
TrIgnored,
TrMetrics(MetricMap),
TrBench(BenchSamples),
}
struct ConsoleTestState {
out: @io::Writer,
log_out: Option<@io::Writer>,
term: Option<term::Terminal>,
use_color: bool,
total: uint,
passed: uint,
failed: uint,
ignored: uint,
measured: uint,
metrics: MetricMap,
failures: ~[TestDesc]
}
impl ConsoleTestState {
pub fn new(opts: &TestOpts) -> ConsoleTestState {
let log_out = match opts.logfile {
Some(ref path) => match io::file_writer(path,
[io::Create,
io::Truncate]) {
result::Ok(w) => Some(w),
result::Err(ref s) => {
fail!("can't open output file: %s", *s)
}
},
None => None
};
let out = io::stdout();
let term = match term::Terminal::new(out) {
Err(_) => None,
Ok(t) => Some(t)
};
ConsoleTestState {
out: out,
log_out: log_out,
use_color: use_color(),
term: term,
total: 0u,
passed: 0u,
failed: 0u,
ignored: 0u,
measured: 0u,
metrics: MetricMap::new(),
failures: ~[]
}
}
pub fn write_ok(&self) {
self.write_pretty("ok", term::color::GREEN);
}
pub fn write_failed(&self) {
self.write_pretty("FAILED", term::color::RED);
}
pub fn write_ignored(&self) {
self.write_pretty("ignored", term::color::YELLOW);
}
pub fn write_metric(&self) {
self.write_pretty("metric", term::color::CYAN);
}
pub fn write_bench(&self) {
self.write_pretty("bench", term::color::CYAN);
}
pub fn write_added(&self) {
self.write_pretty("added", term::color::GREEN);
}
pub fn write_improved(&self) {
self.write_pretty("improved", term::color::GREEN);
}
pub fn write_removed(&self) {
self.write_pretty("removed", term::color::YELLOW);
}
pub fn write_regressed(&self) {
self.write_pretty("regressed", term::color::RED);
}
pub fn write_pretty(&self,
word: &str,
color: term::color::Color) {
match self.term {
None => self.out.write_str(word),
Some(ref t) => {
if self.use_color {
t.fg(color);
}
self.out.write_str(word);
if self.use_color {
t.reset();
}
}
}
}
pub fn write_run_start(&mut self, len: uint) {
self.total = len;
let noun = if len != 1 { &"tests" } else { &"test" };
self.out.write_line(fmt!("\nrunning %u %s", len, noun));
}
pub fn write_test_start(&self, test: &TestDesc) {
self.out.write_str(fmt!("test %s ... ", test.name.to_str()));
}
pub fn write_result(&self, result: &TestResult) {
match *result {
TrOk => self.write_ok(),
TrFailed => self.write_failed(),
TrIgnored => self.write_ignored(),
TrMetrics(ref mm) => {
self.write_metric();
self.out.write_str(": " + fmt_metrics(mm));
}
TrBench(ref bs) => {
self.write_bench();
self.out.write_str(": " + fmt_bench_samples(bs))
}
}
self.out.write_str(&"\n");
}
pub fn write_log(&self, test: &TestDesc, result: &TestResult) {
match self.log_out {
None => (),
Some(out) => {
out.write_line(fmt!("%s %s",
match *result {
TrOk => ~"ok",
TrFailed => ~"failed",
TrIgnored => ~"ignored",
TrMetrics(ref mm) => fmt_metrics(mm),
TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.to_str()));
}
}
}
pub fn write_failures(&self) {
self.out.write_line("\nfailures:");
let mut failures = ~[];
for f in self.failures.iter() {
failures.push(f.name.to_str());
}
sort::tim_sort(failures);
for name in failures.iter() {
self.out.write_line(fmt!(" %s", name.to_str()));
}
}
pub fn write_metric_diff(&self, diff: &MetricDiff) {
let mut noise = 0;
let mut improved = 0;
let mut regressed = 0;
let mut added = 0;
let mut removed = 0;
for (k, v) in diff.iter() {
match *v {
LikelyNoise => noise += 1,
MetricAdded => {
added += 1;
self.write_added();
self.out.write_line(fmt!(": %s", *k));
}
MetricRemoved => {
removed += 1;
self.write_removed();
self.out.write_line(fmt!(": %s", *k));
}
Improvement(pct) => {
improved += 1;
self.out.write_str(*k);
self.out.write_str(": ");
self.write_improved();
self.out.write_line(fmt!(" by %.2f%%", pct as float))
}
Regression(pct) => {
regressed += 1;
self.out.write_str(*k);
self.out.write_str(": ");
self.write_regressed();
self.out.write_line(fmt!(" by %.2f%%", pct as float))
}
}
}
self.out.write_line(fmt!("result of ratchet: %u matrics added, %u removed, \
%u improved, %u regressed, %u noise",
added, removed, improved, regressed, noise));
if regressed == 0 {
self.out.write_line("updated ratchet file")
} else {
self.out.write_line("left ratchet file untouched")
}
}
pub fn write_run_finish(&self,
ratchet_metrics: &Option<Path>,
ratchet_pct: Option<f64>) -> bool {
assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let ratchet_success = match *ratchet_metrics {
None => true,
Some(ref pth) => {
self.out.write_str(fmt!("\nusing metrics ratchet: %s\n", pth.to_str()));
match ratchet_pct {
None => (),
Some(pct) =>
self.out.write_str(fmt!("with noise-tolerance forced to: %f%%\n",
pct as float))
}
let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
self.write_metric_diff(&diff);
ok
}
};
let test_success = self.failed == 0u;
if !test_success {
self.write_failures();
}
let success = ratchet_success && test_success;
self.out.write_str("\ntest result: ");
if success {
// There's no parallelism at this point so it's safe to use color
self.write_ok();
} else {
self.write_failed();
}
self.out.write_str(fmt!(". %u passed; %u failed; %u ignored; %u measured\n\n",
self.passed, self.failed, self.ignored, self.measured));
return success;
}
}
pub fn fmt_metrics(mm: &MetricMap) -> ~str {
let v : ~[~str] = mm.iter()
.map(|(k,v)| fmt!("%s: %f (+/- %f)",
*k,
v.value as float,
v.noise as float))
.collect();
v.connect(", ")
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
if bs.mb_s != 0 {
fmt!("%u ns/iter (+/- %u) = %u MB/s",
bs.ns_iter_summ.median as uint,
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
bs.mb_s)
} else {
fmt!("%u ns/iter (+/- %u)",
bs.ns_iter_summ.median as uint,
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
}
}
// A simple console test runner
pub fn run_tests_console(opts: &TestOpts,
tests: ~[TestDescAndFn]) -> bool {
fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
debug!("callback(event=%?)", event);
match (*event).clone() {
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
TeWait(ref test) => st.write_test_start(test),
TeResult(test, result) => {
st.write_log(&test, &result);
st.write_result(&result);
match result {
TrOk => st.passed += 1,
TrIgnored => st.ignored += 1,
TrMetrics(mm) => {
let tname = test.name.to_str();
for (k,v) in mm.iter() {
st.metrics.insert_metric(tname + "." + *k,
v.value, v.noise);
}
st.measured += 1
}
TrBench(bs) => {
st.metrics.insert_metric(test.name.to_str(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
st.measured += 1
}
TrFailed => {
st.failed += 1;
st.failures.push(test);
}
}
}
}
}
let st = @mut ConsoleTestState::new(opts);
run_tests(opts, tests, |x| callback(&x, st));
match opts.save_metrics {
None => (),
Some(ref pth) => {
st.metrics.save(pth);
st.out.write_str(fmt!("\nmetrics saved to: %s", pth.to_str()));
}
}
return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
}
#[test]
fn should_sort_failures_before_printing_them() {
fn dummy() {}
let s = do io::with_str_writer |wr| {
let test_a = TestDesc {
name: StaticTestName("a"),
ignore: false,
should_fail: false
};
let test_b = TestDesc {
name: StaticTestName("b"),
ignore: false,
should_fail: false
};
let st = @ConsoleTestState {
out: wr,
log_out: None,
term: None,
use_color: false,
total: 0u,
passed: 0u,
failed: 0u,
ignored: 0u,
measured: 0u,
metrics: MetricMap::new(),
failures: ~[test_b, test_a]
};
st.write_failures();
};
let apos = s.find_str("a").unwrap();
let bpos = s.find_str("b").unwrap();
assert!(apos < bpos);
}
fn use_color() -> bool { return get_concurrency() == 1; }
#[deriving(Clone)]
enum TestEvent {
TeFiltered(~[TestDesc]),
TeWait(TestDesc),
TeResult(TestDesc, TestResult),
}
type MonitorMsg = (TestDesc, TestResult);
fn run_tests(opts: &TestOpts,
tests: ~[TestDescAndFn],
callback: &fn(e: TestEvent)) {
let filtered_tests = filter_tests(opts, tests);
let filtered_descs = filtered_tests.map(|t| t.desc.clone());
callback(TeFiltered(filtered_descs));
let (filtered_tests, filtered_benchs_and_metrics) =
do filtered_tests.partition |e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
_ => false
}
};
// It's tempting to just spawn all the tests at once, but since we have
// many tests that run in other processes we would be making a big mess.
let concurrency = get_concurrency();
debug!("using %u test tasks", concurrency);
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (p, ch) = stream();
let ch = SharedChan::new(ch);
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
let test = remaining.pop();
if concurrency == 1 {
// We are doing one test at a time so we can print the name
// of the test before we run it. Useful for debugging tests
// that hang forever.
callback(TeWait(test.desc.clone()));
}
run_test(!opts.run_tests, test, ch.clone());
pending += 1;
}
let (desc, result) = p.recv();
if concurrency != 1 {
callback(TeWait(desc.clone()));
}
callback(TeResult(desc, result));
pending -= 1;
}
// All benchmarks run at the end, in serial.
// (this includes metric fns)
for b in filtered_benchs_and_metrics.move_iter() {
callback(TeWait(b.desc.clone()));
run_test(!opts.run_benchmarks, b, ch.clone());
let (test, result) = p.recv();
callback(TeResult(test, result));
}
}
fn get_concurrency() -> uint {
use std::rt;
match os::getenv("RUST_TEST_TASKS") {
Some(s) => {
let opt_n: Option<uint> = FromStr::from_str(s);
match opt_n {
Some(n) if n > 0 => n,
_ => fail!("RUST_TEST_TASKS is `%s`, should be a positive integer.", s)
}
}
None => {
rt::util::default_sched_threads()
}
}
}
pub fn filter_tests(
opts: &TestOpts,
tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
{
let mut filtered = tests;
// Remove tests that don't match the test filter
filtered = if opts.filter.is_none() {
filtered
} else {
let filter_str = match opts.filter {
Some(ref f) => (*f).clone(),
None => ~""
};
fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
Option<TestDescAndFn> {
if test.desc.name.to_str().contains(filter_str) {
return Some(test);
} else {
return None;
}
}
filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
};
// Maybe pull out the ignored test and unignore them
filtered = if !opts.run_ignored {
filtered
} else {
fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
if test.desc.ignore {
let TestDescAndFn {desc, testfn} = test;
Some(TestDescAndFn {
desc: TestDesc {ignore: false, ..desc},
testfn: testfn
})
} else {
None
}
};
filtered.move_iter().filter_map(|x| filter(x)).collect()
};
// Sort the tests alphabetically
fn lteq(t1: &TestDescAndFn, t2: &TestDescAndFn) -> bool {
t1.desc.name.to_str() < t2.desc.name.to_str()
}
sort::quick_sort(filtered, lteq);
// Shard the remaining tests, if sharding requested.
match opts.test_shard {
None => filtered,
Some((a,b)) =>
filtered.move_iter().enumerate()
.filter(|&(i,_)| i % b == a)
.map(|(_,t)| t)
.to_owned_vec()
}
}
struct TestFuture {
test: TestDesc,
wait: @fn() -> TestResult,
}
pub fn run_test(force_ignore: bool,
test: TestDescAndFn,
monitor_ch: SharedChan<MonitorMsg>) {
let TestDescAndFn {desc, testfn} = test;
if force_ignore || desc.ignore {
monitor_ch.send((desc, TrIgnored));
return;
}
fn run_test_inner(desc: TestDesc,
monitor_ch: SharedChan<MonitorMsg>,
testfn: ~fn()) {
let testfn_cell = ::std::cell::Cell::new(testfn);
do task::spawn {
let mut result_future = None; // task::future_result(builder);
let mut task = task::task();
task.unlinked();
task.future_result(|r| { result_future = Some(r) });
task.spawn(testfn_cell.take());
let task_result = result_future.unwrap().recv();
let test_result = calc_result(&desc,
task_result == task::Success);
monitor_ch.send((desc.clone(), test_result));
}
}
match testfn {
DynBenchFn(benchfn) => {
let bs = ::test::bench::benchmark(benchfn);
monitor_ch.send((desc, TrBench(bs)));
return;
}
StaticBenchFn(benchfn) => {
let bs = ::test::bench::benchmark(benchfn);
monitor_ch.send((desc, TrBench(bs)));
return;
}
DynMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm)));
return;
}
StaticMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm)));
return;
}
DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
StaticTestFn(f) => run_test_inner(desc, monitor_ch, || f())
}
}
fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
if task_succeeded {
if desc.should_fail { TrFailed }
else { TrOk }
} else {
if desc.should_fail { TrOk }
else { TrFailed }
}
}
impl ToJson for Metric {
fn to_json(&self) -> json::Json {
let mut map = ~TreeMap::new();
map.insert(~"value", json::Number(self.value as float));
map.insert(~"noise", json::Number(self.noise as float));
json::Object(map)
}
}
impl MetricMap {
pub fn new() -> MetricMap {
MetricMap(TreeMap::new())
}
/// Load MetricDiff from a file.
pub fn load(p: &Path) -> MetricMap {
assert!(os::path_exists(p));
let f = io::file_reader(p).unwrap();
let mut decoder = json::Decoder(json::from_reader(f).unwrap());
MetricMap(Decodable::decode(&mut decoder))
}
/// Write MetricDiff to a file.
pub fn save(&self, p: &Path) {
let f = io::file_writer(p, [io::Create, io::Truncate]).unwrap();
self.to_json().to_pretty_writer(f);
}
/// Compare against another MetricMap. Optionally compare all
/// measurements in the maps using the provided `noise_pct` as a
/// percentage of each value to consider noise. If `None`, each
/// measurement's noise threshold is independently chosen as the
/// maximum of that measurement's recorded noise quantity in either
/// map.
pub fn compare_to_old(&self, old: &MetricMap,
noise_pct: Option<f64>) -> MetricDiff {
let mut diff : MetricDiff = TreeMap::new();
for (k, vold) in old.iter() {
let r = match self.find(k) {
None => MetricRemoved,
Some(v) => {
let delta = (v.value - vold.value);
let noise = match noise_pct {
None => f64::max(vold.noise.abs(), v.noise.abs()),
Some(pct) => vold.value * pct / 100.0
};
if delta.abs() <= noise {
LikelyNoise
} else {
let pct = delta.abs() / (vold.value).max(&f64::epsilon) * 100.0;
if vold.noise < 0.0 {
// When 'noise' is negative, it means we want
// to see deltas that go up over time, and can
// only tolerate slight negative movement.
if delta < 0.0 {
Regression(pct)
} else {
Improvement(pct)
}
} else {
// When 'noise' is positive, it means we want
// to see deltas that go down over time, and
// can only tolerate slight positive movements.
if delta < 0.0 {
Improvement(pct)
} else {
Regression(pct)
}
}
}
}
};
diff.insert((*k).clone(), r);
}
for (k, _) in self.iter() {
if !diff.contains_key(k) {
diff.insert((*k).clone(), MetricAdded);
}
}
diff
}
/// Insert a named `value` (+/- `noise`) metric into the map. The value
/// must be non-negative. The `noise` indicates the uncertainty of the
/// metric, which doubles as the "noise range" of acceptable
/// pairwise-regressions on this named value, when comparing from one
/// metric to the next using `compare_to_old`.
///
/// If `noise` is positive, then it means this metric is of a value
/// you want to see grow smaller, so a change larger than `noise` in the
/// positive direction represents a regression.
///
/// If `noise` is negative, then it means this metric is of a value
/// you want to see grow larger, so a change larger than `noise` in the
/// negative direction represents a regression.
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
let m = Metric {
value: value,
noise: noise
};
self.insert(name.to_owned(), m);
}
/// Attempt to "ratchet" an external metric file. This involves loading
/// metrics from a metric file (if it exists), comparing against
/// the metrics in `self` using `compare_to_old`, and rewriting the
/// file to contain the metrics in `self` if none of the
/// `MetricChange`s are `Regression`. Returns the diff as well
/// as a boolean indicating whether the ratchet succeeded.
pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
let old = if os::path_exists(p) {
MetricMap::load(p)
} else {
MetricMap::new()
};
let diff : MetricDiff = self.compare_to_old(&old, pct);
let ok = do diff.iter().all() |(_, v)| {
match *v {
Regression(_) => false,
_ => true
}
};
if ok {
debug!("rewriting file '%s' with updated metrics");
self.save(p);
}
return (diff, ok)
}
}
// Benchmarking
impl BenchHarness {
/// Callback for benchmark functions to run in their body.
pub fn iter(&mut self, inner:&fn()) {
self.ns_start = precise_time_ns();
let k = self.iterations;
for _ in range(0u64, k) {
inner();
}
self.ns_end = precise_time_ns();
}
pub fn ns_elapsed(&mut self) -> u64 {
if self.ns_start == 0 || self.ns_end == 0 {
0
} else {
self.ns_end - self.ns_start
}
}
pub fn ns_per_iter(&mut self) -> u64 {
if self.iterations == 0 {
0
} else {
self.ns_elapsed() / self.iterations.max(&1)
}
}
pub fn bench_n(&mut self, n: u64, f: &fn(&mut BenchHarness)) {
self.iterations = n;
debug!("running benchmark for %u iterations",
n as uint);
f(self);
}
// This is a more statistics-driven benchmark algorithm
pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary {
// Initial bench run to get ballpark figure.
let mut n = 1_u64;
self.bench_n(n, |x| f(x));
// Try to estimate iter count for 1ms falling back to 1m
// iterations if first run took < 1ns.
if self.ns_per_iter() == 0 {
n = 1_000_000;
} else {
n = 1_000_000 / self.ns_per_iter().max(&1);
}
let mut total_run = 0;
let samples : &mut [f64] = [0.0_f64, ..50];
loop {
let loop_start = precise_time_ns();
for p in samples.mut_iter() {
self.bench_n(n as u64, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
let summ = stats::Summary::new(samples);
for p in samples.mut_iter() {
self.bench_n(5 * n as u64, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
let summ5 = stats::Summary::new(samples);
debug!("%u samples, median %f, MAD=%f, MADP=%f",
samples.len(),
summ.median as float,
summ.median_abs_dev as float,
summ.median_abs_dev_pct as float);
let now = precise_time_ns();
let loop_run = now - loop_start;
// If we've run for 100ms an seem to have converged to a
// stable median.
if loop_run > 100_000_000 &&
summ.median_abs_dev_pct < 1.0 &&
summ.median - summ5.median < summ5.median_abs_dev {
return summ5;
}
total_run += loop_run;
// Longest we ever run for is 3s.
if total_run > 3_000_000_000 {
return summ5;
}
n *= 2;
}
}
}
pub mod bench {
use test::{BenchHarness, BenchSamples};
pub fn benchmark(f: &fn(&mut BenchHarness)) -> BenchSamples {
let mut bs = BenchHarness {
iterations: 0,
ns_start: 0,
ns_end: 0,
bytes: 0
};
let ns_iter_summ = bs.auto_bench(f);
let ns_iter = (ns_iter_summ.median as u64).max(&1);
let iter_s = 1_000_000_000 / ns_iter;
let mb_s = (bs.bytes * iter_s) / 1_000_000;
BenchSamples {
ns_iter_summ: ns_iter_summ,
mb_s: mb_s as uint
}
}
}
#[cfg(test)]
mod tests {
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
TestDesc, TestDescAndFn,
Metric, MetricMap, MetricAdded, MetricRemoved,
Improvement, Regression, LikelyNoise,
StaticTestName, DynTestName, DynTestFn};
use test::{TestOpts, run_test};
use std::either;
use std::comm::{stream, SharedChan};
use tempfile;
use std::os;
#[test]
pub fn do_not_run_ignored_tests() {
fn f() { fail!(); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_fail: false
},
testfn: DynTestFn(|| f()),
};
let (p, ch) = stream();
let ch = SharedChan::new(ch);
run_test(false, desc, ch);
let (_, res) = p.recv();
assert!(res != TrOk);
}
#[test]
pub fn ignored_tests_result_in_ignored() {
fn f() { }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_fail: false
},
testfn: DynTestFn(|| f()),
};
let (p, ch) = stream();
let ch = SharedChan::new(ch);
run_test(false, desc, ch);
let (_, res) = p.recv();
assert_eq!(res, TrIgnored);
}
#[test]
fn test_should_fail() {
fn f() { fail!(); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: true
},
testfn: DynTestFn(|| f()),
};
let (p, ch) = stream();
let ch = SharedChan::new(ch);
run_test(false, desc, ch);
let (_, res) = p.recv();
assert_eq!(res, TrOk);
}
#[test]
fn test_should_fail_but_succeeds() {
fn f() { }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: true
},
testfn: DynTestFn(|| f()),
};
let (p, ch) = stream();
let ch = SharedChan::new(ch);
run_test(false, desc, ch);
let (_, res) = p.recv();
assert_eq!(res, TrFailed);
}
#[test]
fn first_free_arg_should_be_a_filter() {
let args = ~[~"progname", ~"filter"];
let opts = match parse_opts(args) {
either::Left(o) => o,
_ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
};
assert!("filter" == opts.filter.clone().unwrap());
}
#[test]
fn parse_ignored_flag() {
let args = ~[~"progname", ~"filter", ~"--ignored"];
let opts = match parse_opts(args) {
either::Left(o) => o,
_ => fail!("Malformed arg in parse_ignored_flag")
};
assert!((opts.run_ignored));
}
#[test]
pub fn filter_for_ignored_option() {
fn dummy() {}
// When we run ignored tests the test filter should filter out all the
// unignored tests and flip the ignore flag on the rest to false
let opts = TestOpts {
filter: None,
run_ignored: true,
logfile: None,
run_tests: true,
run_benchmarks: false,
ratchet_noise_percent: None,
ratchet_metrics: None,
save_metrics: None,
test_shard: None
};
let tests = ~[
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("1"),
ignore: true,
should_fail: false,
},
testfn: DynTestFn(|| {}),
},
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("2"),
ignore: false,
should_fail: false
},
testfn: DynTestFn(|| {}),
},
];
let filtered = filter_tests(&opts, tests);
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].desc.name.to_str(), ~"1");
assert!(filtered[0].desc.ignore == false);
}
#[test]
pub fn sort_tests() {
let opts = TestOpts {
filter: None,
run_ignored: false,
logfile: None,
run_tests: true,
run_benchmarks: false,
ratchet_noise_percent: None,
ratchet_metrics: None,
save_metrics: None,
test_shard: None
};
let names =
~[~"sha1::test", ~"int::test_to_str", ~"int::test_pow",
~"test::do_not_run_ignored_tests",
~"test::ignored_tests_result_in_ignored",
~"test::first_free_arg_should_be_a_filter",
~"test::parse_ignored_flag", ~"test::filter_for_ignored_option",
~"test::sort_tests"];
let tests =
{
fn testfn() { }
let mut tests = ~[];
for name in names.iter() {
let test = TestDescAndFn {
desc: TestDesc {
name: DynTestName((*name).clone()),
ignore: false,
should_fail: false
},
testfn: DynTestFn(testfn),
};
tests.push(test);
}
tests
};
let filtered = filter_tests(&opts, tests);
let expected =
~[~"int::test_pow", ~"int::test_to_str", ~"sha1::test",
~"test::do_not_run_ignored_tests",
~"test::filter_for_ignored_option",
~"test::first_free_arg_should_be_a_filter",
~"test::ignored_tests_result_in_ignored",
~"test::parse_ignored_flag",
~"test::sort_tests"];
for (a, b) in expected.iter().zip(filtered.iter()) {
assert!(*a == b.desc.name.to_str());
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
let mut m2 = MetricMap::new();
m1.insert_metric("in-both-noise", 1000.0, 200.0);
m2.insert_metric("in-both-noise", 1100.0, 200.0);
m1.insert_metric("in-first-noise", 1000.0, 2.0);
m2.insert_metric("in-second-noise", 1000.0, 2.0);
m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
let diff1 = m2.compare_to_old(&m1, None);
assert_eq!(*(diff1.find(&~"in-both-noise").unwrap()), LikelyNoise);
assert_eq!(*(diff1.find(&~"in-first-noise").unwrap()), MetricRemoved);
assert_eq!(*(diff1.find(&~"in-second-noise").unwrap()), MetricAdded);
assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").unwrap()),
Regression(100.0));
assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").unwrap()),
Improvement(50.0));
assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").unwrap()),
Regression(50.0));
assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").unwrap()),
Improvement(100.0));
assert_eq!(diff1.len(), 7);
let diff2 = m2.compare_to_old(&m1, Some(200.0));
assert_eq!(*(diff2.find(&~"in-both-noise").unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-first-noise").unwrap()), MetricRemoved);
assert_eq!(*(diff2.find(&~"in-second-noise").unwrap()), MetricAdded);
assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").unwrap()), LikelyNoise);
assert_eq!(diff2.len(), 7);
}
pub fn ratchet_test() {
let dpth = tempfile::mkdtemp(&os::tmpdir(),
"test-ratchet").expect("missing test for ratchet");
let pth = dpth.push("ratchet.json");
let mut m1 = MetricMap::new();
m1.insert_metric("runtime", 1000.0, 2.0);
m1.insert_metric("throughput", 50.0, 2.0);
let mut m2 = MetricMap::new();
m2.insert_metric("runtime", 1100.0, 2.0);
m2.insert_metric("throughput", 50.0, 2.0);
m1.save(&pth);
// Ask for a ratchet that should fail to advance.
let (diff1, ok1) = m2.ratchet(&pth, None);
assert_eq!(ok1, false);
assert_eq!(diff1.len(), 2);
assert_eq!(*(diff1.find(&~"runtime").unwrap()), Regression(10.0));
assert_eq!(*(diff1.find(&~"throughput").unwrap()), LikelyNoise);
// Check that it was not rewritten.
let m3 = MetricMap::load(&pth);
assert_eq!(m3.len(), 2);
assert_eq!(*(m3.find(&~"runtime").unwrap()), Metric { value: 1000.0, noise: 2.0 });
assert_eq!(*(m3.find(&~"throughput").unwrap()), Metric { value: 50.0, noise: 2.0 });
// Ask for a ratchet with an explicit noise-percentage override,
// that should advance.
let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
assert_eq!(ok2, true);
assert_eq!(diff2.len(), 2);
assert_eq!(*(diff2.find(&~"runtime").unwrap()), LikelyNoise);
assert_eq!(*(diff2.find(&~"throughput").unwrap()), LikelyNoise);
// Check that it was rewritten.
let m4 = MetricMap::load(&pth);
assert_eq!(m4.len(), 2);
assert_eq!(*(m4.find(&~"runtime").unwrap()), Metric { value: 1100.0, noise: 2.0 });
assert_eq!(*(m4.find(&~"throughput").unwrap()), Metric { value: 50.0, noise: 2.0 });
os::remove_dir_recursive(&dpth);
}
}
| 32.440608 | 98 | 0.521182 |
e6c138ee6c3518c50bacba2cb12c9a48640e4af0 | 6,232 | use clap::value_t_or_exit;
use noria_server::{Builder, ReuseConfigType, ZookeeperAuthority};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
fn main() {
use clap::{App, Arg};
let matches = App::new("noria-server")
.version("0.0.1")
.arg(
Arg::with_name("address")
.short("a")
.long("address")
.takes_value(true)
.default_value("127.0.0.1")
.help("IP address to listen on"),
)
.arg(
Arg::with_name("deployment")
.long("deployment")
.required(true)
.takes_value(true)
.help("Noria deployment ID."),
)
.arg(
Arg::with_name("durability")
.long("durability")
.takes_value(true)
.possible_values(&["persistent", "ephemeral", "memory"])
.default_value("persistent")
.help("How to maintain base logs."),
)
.arg(
Arg::with_name("persistence-threads")
.long("persistence-threads")
.takes_value(true)
.default_value("1")
.help("Number of background threads used by RocksDB."),
)
.arg(
Arg::with_name("flush-timeout")
.long("flush-timeout")
.takes_value(true)
.default_value("100000")
.help("Time to wait before processing a merged packet, in nanoseconds."),
)
.arg(
Arg::with_name("log-dir")
.long("log-dir")
.takes_value(true)
.help("Absolute path to the directory where the log files will be written."),
)
.arg(
Arg::with_name("zookeeper")
.short("z")
.long("zookeeper")
.takes_value(true)
.default_value("127.0.0.1:2181")
.help("Zookeeper connection info."),
)
.arg(
Arg::with_name("memory")
.short("m")
.long("memory")
.takes_value(true)
.default_value("0")
.help("Memory, in bytes, available for partially materialized state [0 = unlimited]."),
)
.arg(
Arg::with_name("memory_check_freq")
.long("memory-check-every")
.takes_value(true)
.default_value("10")
.requires("memory")
.help("Frequency at which to check the state size against the memory limit [in milliseconds]."),
)
.arg(
Arg::with_name("noreuse")
.long("no-reuse")
.help("Disable reuse"),
)
.arg(
Arg::with_name("nopartial")
.long("no-partial")
.help("Disable partial"),
)
.arg(
Arg::with_name("quorum")
.short("q")
.long("quorum")
.takes_value(true)
.default_value("1")
.help("Number of workers to wait for before starting (including this one)."),
)
.arg(
Arg::with_name("shards")
.long("shards")
.takes_value(true)
.default_value("0")
.help("Shard the graph this many ways (0 = disable sharding)."),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.takes_value(false)
.help("Verbose log output."),
)
.get_matches();
let log = noria_server::logger_pls();
let durability = matches.value_of("durability").unwrap();
let listen_addr = matches.value_of("address").unwrap().parse().unwrap();
let zookeeper_addr = matches.value_of("zookeeper").unwrap();
let memory = value_t_or_exit!(matches, "memory", usize);
let memory_check_freq = value_t_or_exit!(matches, "memory_check_freq", u64);
let quorum = value_t_or_exit!(matches, "quorum", usize);
let persistence_threads = value_t_or_exit!(matches, "persistence-threads", i32);
let flush_ns = value_t_or_exit!(matches, "flush-timeout", u32);
let sharding = match value_t_or_exit!(matches, "shards", usize) {
0 => None,
x => Some(x),
};
let verbose = matches.is_present("verbose");
let deployment_name = matches.value_of("deployment").unwrap();
let mut authority =
ZookeeperAuthority::new(&format!("{}/{}", zookeeper_addr, deployment_name)).unwrap();
let mut builder = Builder::default();
builder.set_listen_addr(listen_addr);
if memory > 0 {
builder.set_memory_limit(memory, Duration::from_millis(memory_check_freq));
}
builder.set_sharding(sharding);
builder.set_quorum(quorum);
if matches.is_present("nopartial") {
builder.disable_partial();
}
if matches.is_present("noreuse") {
builder.set_reuse(ReuseConfigType::NoReuse);
}
let mut persistence_params = noria_server::PersistenceParameters::new(
match durability {
"persistent" => noria_server::DurabilityMode::Permanent,
"ephemeral" => noria_server::DurabilityMode::DeleteOnExit,
"memory" => noria_server::DurabilityMode::MemoryOnly,
_ => unreachable!(),
},
Duration::new(0, flush_ns),
Some(deployment_name.to_string()),
persistence_threads,
);
persistence_params.log_dir = matches
.value_of("log-dir")
.and_then(|p| Some(PathBuf::from(p)));
builder.set_persistence(persistence_params);
if verbose {
authority.log_with(log.clone());
builder.log_with(log);
}
let mut rt = tokio::runtime::Builder::new();
rt.enable_all();
rt.threaded_scheduler();
rt.thread_name("worker");
if let Some(threads) = None {
rt.core_threads(threads);
}
let mut rt = rt.build().unwrap();
let (_server, done) = rt.block_on(builder.start(Arc::new(authority))).unwrap();
rt.block_on(done);
drop(rt);
}
| 35.011236 | 112 | 0.532413 |
62c4b596318243eb758c0b6dbb53191d3387ac4b | 6,010 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::block_storage::{BlockReader, BlockStore};
use consensus_types::{
block::{block_test_utils::certificate_for_genesis, Block},
common::Round,
executed_block::ExecutedBlock,
quorum_cert::QuorumCert,
sync_info::SyncInfo,
};
use diem_crypto::HashValue;
use diem_logger::Level;
use diem_types::{ledger_info::LedgerInfo, validator_signer::ValidatorSigner};
use std::{future::Future, sync::Arc, time::Duration};
use tokio::{runtime, time::timeout};
mod mock_state_computer;
mod mock_storage;
#[cfg(any(test, feature = "fuzzing"))]
mod mock_txn_manager;
use crate::util::mock_time_service::SimulatedTimeService;
use consensus_types::{block::block_test_utils::gen_test_certificate, common::Payload};
use diem_types::block_info::BlockInfo;
pub use mock_state_computer::{EmptyStateComputer, MockStateComputer};
pub use mock_storage::{EmptyStorage, MockSharedStorage, MockStorage};
pub use mock_txn_manager::MockTransactionManager;
pub const TEST_TIMEOUT: Duration = Duration::from_secs(60);
pub fn build_simple_tree() -> (Vec<Arc<ExecutedBlock>>, Arc<BlockStore>) {
let mut inserter = TreeInserter::default();
let block_store = inserter.block_store();
let genesis = block_store.root();
let genesis_block_id = genesis.id();
let genesis_block = block_store
.get_block(genesis_block_id)
.expect("genesis block must exist");
assert_eq!(block_store.len(), 1);
assert_eq!(block_store.child_links(), block_store.len() - 1);
assert_eq!(block_store.block_exists(genesis_block.id()), true);
// ╭--> A1--> A2--> A3
// Genesis--> B1--> B2
// ╰--> C1
let a1 = inserter.insert_block_with_qc(certificate_for_genesis(), &genesis_block, 1);
let a2 = inserter.insert_block(&a1, 2, None);
let a3 = inserter.insert_block(&a2, 3, Some(genesis.block_info()));
let b1 = inserter.insert_block_with_qc(certificate_for_genesis(), &genesis_block, 4);
let b2 = inserter.insert_block(&b1, 5, None);
let c1 = inserter.insert_block(&b1, 6, None);
assert_eq!(block_store.len(), 7);
assert_eq!(block_store.child_links(), block_store.len() - 1);
(vec![genesis_block, a1, a2, a3, b1, b2, c1], block_store)
}
pub fn build_empty_tree() -> Arc<BlockStore> {
let (initial_data, storage) = EmptyStorage::start_for_testing();
Arc::new(BlockStore::new(
storage,
initial_data,
Arc::new(EmptyStateComputer),
10, // max pruned blocks in mem
Arc::new(SimulatedTimeService::new()),
))
}
pub struct TreeInserter {
signer: ValidatorSigner,
block_store: Arc<BlockStore>,
}
impl TreeInserter {
pub fn default() -> Self {
Self::new(ValidatorSigner::random(None))
}
pub fn new(signer: ValidatorSigner) -> Self {
let block_store = build_empty_tree();
Self {
signer,
block_store,
}
}
pub fn new_with_store(signer: ValidatorSigner, block_store: Arc<BlockStore>) -> Self {
Self {
signer,
block_store,
}
}
pub fn signer(&self) -> &ValidatorSigner {
&self.signer
}
pub fn block_store(&self) -> Arc<BlockStore> {
Arc::clone(&self.block_store)
}
/// This function is generating a placeholder QC for a block's parent that is signed by a single
/// signer kept by the block store. If more sophisticated QC required, please use
/// `insert_block_with_qc`.
pub fn insert_block(
&mut self,
parent: &ExecutedBlock,
round: Round,
committed_block: Option<BlockInfo>,
) -> Arc<ExecutedBlock> {
// Node must carry a QC to its parent
let parent_qc = self.create_qc_for_block(parent, committed_block);
self.insert_block_with_qc(parent_qc, parent, round)
}
pub fn insert_block_with_qc(
&mut self,
parent_qc: QuorumCert,
parent: &ExecutedBlock,
round: Round,
) -> Arc<ExecutedBlock> {
self.block_store
.insert_block_with_qc(self.create_block_with_qc(
parent_qc,
parent.timestamp_usecs() + 1,
round,
vec![],
))
.unwrap()
}
pub fn create_qc_for_block(
&self,
block: &ExecutedBlock,
committed_block: Option<BlockInfo>,
) -> QuorumCert {
gen_test_certificate(
vec![&self.signer],
block.block_info(),
block.quorum_cert().certified_block().clone(),
committed_block,
)
}
pub fn insert_qc_for_block(&self, block: &ExecutedBlock, committed_block: Option<BlockInfo>) {
self.block_store
.insert_single_quorum_cert(self.create_qc_for_block(block, committed_block))
.unwrap()
}
pub fn create_block_with_qc(
&self,
parent_qc: QuorumCert,
timestamp_usecs: u64,
round: Round,
payload: Payload,
) -> Block {
Block::new_proposal(payload, round, timestamp_usecs, parent_qc, &self.signer)
}
}
pub fn placeholder_ledger_info() -> LedgerInfo {
LedgerInfo::new(BlockInfo::empty(), HashValue::zero())
}
pub fn placeholder_sync_info() -> SyncInfo {
SyncInfo::new(certificate_for_genesis(), certificate_for_genesis(), None)
}
fn nocapture() -> bool {
::std::env::args().any(|arg| arg == "--nocapture")
}
pub fn consensus_runtime() -> runtime::Runtime {
if nocapture() {
::diem_logger::Logger::new().level(Level::Debug).init();
}
runtime::Builder::new()
.threaded_scheduler()
.enable_all()
.build()
.expect("Failed to create Tokio runtime!")
}
pub fn timed_block_on<F>(runtime: &mut runtime::Runtime, f: F) -> <F as Future>::Output
where
F: Future,
{
runtime
.block_on(async { timeout(TEST_TIMEOUT, f).await })
.expect("test timed out")
}
| 30.663265 | 100 | 0.641265 |
039715822a4c1ffa807701c185284de3be046149 | 2,550 | /// Process/thread subsystem.
///
/// The subsystem implements process/thread-related system calls, which are
/// mainly based on the three concepts below:
///
/// * [`Process`]. A process has a parent and may have multiple child processes and
/// can own multiple threads.
/// * [`Thread`]. A thread belongs to one and only one process and owns a set
/// of OS resources, e.g., virtual memory, file tables, etc.
/// * [`Task`]. A task belongs to one and only one thread, for which it deals with
/// the low-level details about thread execution.
use crate::fs::{FileRef, FileTable, FsView};
use crate::misc::ResourceLimits;
use crate::prelude::*;
use crate::sched::SchedAgent;
use crate::signal::{SigDispositions, SigQueues};
use crate::vm::ProcessVM;
use self::pgrp::ProcessGrp;
use self::process::{ProcessBuilder, ProcessInner};
use self::thread::{ThreadBuilder, ThreadId, ThreadInner};
use self::wait::{WaitQueue, Waiter};
pub use self::do_exit::handle_force_exit;
pub use self::do_futex::{futex_wait, futex_wake};
pub use self::do_robust_list::RobustListHead;
pub use self::do_spawn::do_spawn_without_exec;
pub use self::do_vfork::do_vfork;
pub use self::do_wait4::idle_reap_zombie_children;
pub use self::process::{Process, ProcessFilter, ProcessStatus, IDLE};
pub use self::spawn_attribute::posix_spawnattr_t;
pub use self::spawn_attribute::SpawnAttr;
pub use self::syscalls::*;
pub use self::task::Task;
pub use self::term_status::{ForcedExitStatus, TermStatus};
pub use self::thread::{Thread, ThreadStatus};
mod do_arch_prctl;
mod do_clone;
mod do_exec;
mod do_exit;
mod do_futex;
mod do_getpid;
mod do_robust_list;
mod do_set_tid_address;
mod do_spawn;
mod do_vfork;
mod do_wait4;
mod pgrp;
mod prctl;
mod process;
mod spawn_attribute;
mod syscalls;
mod term_status;
mod thread;
mod wait;
pub mod current;
pub mod elf_file;
pub mod table;
pub mod task;
// TODO: need to separate C's version pid_t with Rust version Pid.
// pid_t must be signed as negative values may have special meaning
// (check wait4 and kill for examples), while Pid should be a
// non-negative value.
#[allow(non_camel_case_types)]
pub type pid_t = u32;
#[allow(non_camel_case_types)]
pub type uid_t = u32;
pub type ProcessRef = Arc<Process>;
pub type ThreadRef = Arc<Thread>;
pub type FileTableRef = Arc<SgxMutex<FileTable>>;
pub type ProcessVMRef = Arc<ProcessVM>;
pub type FsViewRef = Arc<RwLock<FsView>>;
pub type SchedAgentRef = Arc<SgxMutex<SchedAgent>>;
pub type ResourceLimitsRef = Arc<SgxMutex<ResourceLimits>>;
pub type ProcessGrpRef = Arc<ProcessGrp>;
| 31.875 | 83 | 0.756863 |
de42cfd33031e765c113b6ef7b02ae0bb4285dca | 3,364 | use crate::stream::{Fuse, FuturesUnordered, StreamExt};
use futures_core::future::Future;
use futures_core::stream::{Stream, FusedStream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;
use core::fmt;
use core::pin::Pin;
pin_project! {
/// Stream for the [`buffer_unordered`](super::StreamExt::buffer_unordered)
/// method.
#[must_use = "streams do nothing unless polled"]
pub struct BufferUnordered<St>
where
St: Stream,
{
#[pin]
stream: Fuse<St>,
in_progress_queue: FuturesUnordered<St::Item>,
max: usize,
}
}
impl<St> fmt::Debug for BufferUnordered<St>
where
St: Stream + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufferUnordered")
.field("stream", &self.stream)
.field("in_progress_queue", &self.in_progress_queue)
.field("max", &self.max)
.finish()
}
}
impl<St> BufferUnordered<St>
where
St: Stream,
St::Item: Future,
{
pub(super) fn new(stream: St, n: usize) -> Self
where
St: Stream,
St::Item: Future,
{
Self {
stream: super::Fuse::new(stream),
in_progress_queue: FuturesUnordered::new(),
max: n,
}
}
delegate_access_inner!(stream, St, (.));
}
impl<St> Stream for BufferUnordered<St>
where
St: Stream,
St::Item: Future,
{
type Item = <St::Item as Future>::Output;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let mut this = self.project();
// First up, try to spawn off as many futures as possible by filling up
// our queue of futures.
while this.in_progress_queue.len() < *this.max {
match this.stream.as_mut().poll_next(cx) {
Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut),
Poll::Ready(None) | Poll::Pending => break,
}
}
// Attempt to pull the next value from the in_progress_queue
match this.in_progress_queue.poll_next_unpin(cx) {
x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x,
Poll::Ready(None) => {}
}
// If more values are still coming from the stream, we're not done yet
if this.stream.is_done() {
Poll::Ready(None)
} else {
Poll::Pending
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let queue_len = self.in_progress_queue.len();
let (lower, upper) = self.stream.size_hint();
let lower = lower.saturating_add(queue_len);
let upper = match upper {
Some(x) => x.checked_add(queue_len),
None => None,
};
(lower, upper)
}
}
impl<St> FusedStream for BufferUnordered<St>
where
St: Stream,
St::Item: Future,
{
fn is_terminated(&self) -> bool {
self.in_progress_queue.is_terminated() && self.stream.is_terminated()
}
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S, Item> Sink<Item> for BufferUnordered<S>
where
S: Stream + Sink<Item>,
S::Item: Future,
{
type Error = S::Error;
delegate_sink!(stream, Item);
}
| 26.28125 | 79 | 0.580856 |
5bd746face02422f883ea0c2d2ada333e9b31c1a | 611 | //! Verification of the mask reduction API for `x86`/`x86_64`+`SSE2`
use packed_simd::*;
use stdarch_test::assert_instr;
macro_rules! verify {
($id:ident => $instr:tt) => {
verify_mask!($id["avx2"] => $instr);
}
}
// 128-bit wide:
verify!(m8x16 => vpmovmskb);
verify!(m16x8 => vpmovmskb);
verify!(m32x4 => vmovmskps);
verify!(m64x2 => vmovmskpd);
// FIXME: verify!(m128x1 => vmovmskpd);
// 256-bit wide:
verify!(m8x32 => vpmovmskb);
verify!(m16x16 => vpmovmskb);
verify!(m32x8 => vmovmskps);
verify!(m64x4 => vmovmskpd);
// FIXME: verify!(m128x2 => vmovmskpd);
// FIXME: 512-bit wide masks
| 22.62963 | 68 | 0.648118 |
2f114108b2a13c0d66bfbcb08a136f851ac35162 | 13,774 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
//! This file defines transaction store APIs that are related to committed signed transactions.
use crate::{
change_set::ChangeSet,
errors::AptosDbError,
schema::{
transaction::TransactionSchema, transaction_by_account::TransactionByAccountSchema,
transaction_by_hash::TransactionByHashSchema, write_set::WriteSetSchema,
},
transaction_accumulator::TransactionAccumulatorSchema,
transaction_info::TransactionInfoSchema,
};
use anyhow::{ensure, format_err, Result};
use aptos_crypto::{hash::CryptoHash, HashValue};
use aptos_types::{
account_address::AccountAddress,
block_metadata::BlockMetadata,
proof::position::Position,
transaction::{Transaction, Version},
write_set::WriteSet,
};
use schemadb::{ReadOptions, SchemaBatch, SchemaIterator, DB};
use std::sync::Arc;
#[derive(Debug)]
pub(crate) struct TransactionStore {
db: Arc<DB>,
}
impl TransactionStore {
pub fn new(db: Arc<DB>) -> Self {
Self { db }
}
/// Gets the version of a transaction by the sender `address` and `sequence_number`.
pub fn get_account_transaction_version(
&self,
address: AccountAddress,
sequence_number: u64,
ledger_version: Version,
) -> Result<Option<Version>> {
if let Some(version) = self
.db
.get::<TransactionByAccountSchema>(&(address, sequence_number))?
{
if version <= ledger_version {
return Ok(Some(version));
}
}
Ok(None)
}
/// Gets the version of a transaction by its hash.
pub fn get_transaction_version_by_hash(
&self,
hash: &HashValue,
ledger_version: Version,
) -> Result<Option<Version>> {
Ok(match self.db.get::<TransactionByHashSchema>(hash)? {
Some(version) if version <= ledger_version => Some(version),
_ => None,
})
}
/// Gets an iterator that yields `(sequence_number, version)` for each
/// transaction sent by an account, starting at `start_seq_num`, and returning
/// at most `num_versions` results with `version <= ledger_version`.
///
/// Guarantees that the returned sequence numbers are sequential, i.e.,
/// `seq_num_{i} + 1 = seq_num_{i+1}`.
pub fn get_account_transaction_version_iter(
&self,
address: AccountAddress,
start_seq_num: u64,
num_versions: u64,
ledger_version: Version,
) -> Result<AccountTransactionVersionIter> {
let mut iter = self
.db
.iter::<TransactionByAccountSchema>(ReadOptions::default())?;
iter.seek(&(address, start_seq_num))?;
Ok(AccountTransactionVersionIter {
inner: iter,
address,
expected_next_seq_num: start_seq_num,
end_seq_num: start_seq_num
.checked_add(num_versions)
.ok_or_else(|| format_err!("too many transactions requested"))?,
prev_version: None,
ledger_version,
})
}
/// Get signed transaction given `version`
pub fn get_transaction(&self, version: Version) -> Result<Transaction> {
self.db
.get::<TransactionSchema>(&version)?
.ok_or_else(|| AptosDbError::NotFound(format!("Txn {}", version)).into())
}
/// Gets an iterator that yields `num_transactions` transactions starting from `start_version`.
pub fn get_transaction_iter(
&self,
start_version: Version,
num_transactions: usize,
) -> Result<TransactionIter> {
let mut iter = self.db.iter::<TransactionSchema>(ReadOptions::default())?;
iter.seek(&start_version)?;
Ok(TransactionIter {
inner: iter,
expected_next_version: start_version,
end_version: start_version
.checked_add(num_transactions as u64)
.ok_or_else(|| format_err!("too many transactions requested"))?,
})
}
/// Get the first version that txn starts existent.
pub fn get_first_txn_version(&self) -> Result<Option<Version>> {
let mut iter = self.db.iter::<TransactionSchema>(Default::default())?;
iter.seek_to_first();
iter.next().map(|res| res.map(|(v, _)| v)).transpose()
}
/// Returns the block metadata carried on the block metadata transaction at or preceding
/// `version`, together with the version of the block metadata transaction.
/// Returns None if there's no such transaction at or preceding `version` (it's likely the genesis
/// version 0).
pub fn get_block_metadata(&self, version: Version) -> Result<Option<(Version, BlockMetadata)>> {
// Must be larger than a block size, otherwise a NotFound error will be raised wrongly.
const MAX_VERSIONS_TO_SEARCH: usize = 1000 * 100;
// Linear search via `DB::rev_iter()` here, NOT expecting performance hit, due to the fact
// that the iterator caches data block and that there are limited number of transactions in
// each block.
let mut iter = self.db.rev_iter::<TransactionSchema>(Default::default())?;
iter.seek(&version)?;
for res in iter.take(MAX_VERSIONS_TO_SEARCH) {
let (v, txn) = res?;
if let Transaction::BlockMetadata(block_meta) = txn {
return Ok(Some((v, block_meta)));
} else if v == 0 {
return Ok(None);
}
}
Err(AptosDbError::NotFound(format!("BlockMetadata preceding version {}", version)).into())
}
/// Save signed transaction at `version`
pub fn put_transaction(
&self,
version: Version,
transaction: &Transaction,
cs: &mut ChangeSet,
) -> Result<()> {
if let Transaction::UserTransaction(txn) = transaction {
cs.batch.put::<TransactionByAccountSchema>(
&(txn.sender(), txn.sequence_number()),
&version,
)?;
}
cs.batch
.put::<TransactionByHashSchema>(&transaction.hash(), &version)?;
cs.batch.put::<TransactionSchema>(&version, transaction)?;
Ok(())
}
/// Get executed transaction vm output given `version`
pub fn get_write_set(&self, version: Version) -> Result<WriteSet> {
self.db.get::<WriteSetSchema>(&version)?.ok_or_else(|| {
AptosDbError::NotFound(format!("WriteSet at version {}", version)).into()
})
}
/// Get the first version that write set starts existent.
pub fn get_first_write_set_version(&self) -> Result<Option<Version>> {
let mut iter = self.db.iter::<WriteSetSchema>(Default::default())?;
iter.seek_to_first();
iter.next().map(|res| res.map(|(v, _)| v)).transpose()
}
/// Save executed transaction vm output given `version`
pub fn put_write_set(
&self,
version: Version,
write_set: &WriteSet,
cs: &mut ChangeSet,
) -> Result<()> {
cs.batch.put::<WriteSetSchema>(&version, write_set)
}
/// Prune the transaction by hash store given a list of transaction
pub fn prune_transaction_by_hash(
&self,
transactions: &[Transaction],
db_batch: &mut SchemaBatch,
) -> anyhow::Result<()> {
for transaction in transactions {
db_batch.delete::<TransactionByHashSchema>(&transaction.hash())?;
}
Ok(())
}
/// Prune the transaction by account store given a list of transaction
pub fn prune_transaction_by_account(
&self,
transactions: &[Transaction],
db_batch: &mut SchemaBatch,
) -> anyhow::Result<()> {
for transaction in transactions {
if let Transaction::UserTransaction(txn) = transaction {
db_batch
.delete::<TransactionByAccountSchema>(&(txn.sender(), txn.sequence_number()))?;
}
}
Ok(())
}
/// Prune the transaction schema store between a range of version in [begin, end)
pub fn prune_transaction_schema(
&self,
begin: Version,
end: Version,
db_batch: &mut SchemaBatch,
) -> anyhow::Result<()> {
db_batch.delete_range::<TransactionSchema>(&begin, &end)?;
Ok(())
}
/// Prune the transaction schema store between a range of version in [begin, end)
pub fn prune_transaction_info_schema(
&self,
begin: Version,
end: Version,
db_batch: &mut SchemaBatch,
) -> anyhow::Result<()> {
db_batch.delete_range::<TransactionInfoSchema>(&begin, &end)?;
Ok(())
}
/// Prune the transaction schema store between a range of version in [begin, end).
pub fn prune_transaction_accumulator(
&self,
begin: Version,
end: Version,
db_batch: &mut SchemaBatch,
) -> anyhow::Result<()> {
let begin_position = self.get_min_proof_node(begin);
let end_position = self.get_min_proof_node(end);
db_batch.delete_range::<TransactionAccumulatorSchema>(&begin_position, &end_position)?;
Ok(())
}
/// Returns the minimum position node needed to be included in the proof of the leaf index. This
/// will be the left child of the root if the leaf index is non zero and zero otherwise.
pub fn get_min_proof_node(&self, leaf_index: u64) -> Position {
if leaf_index > 0 {
Position::root_from_leaf_index(leaf_index).left_child()
} else {
// Handle this as a special case when least_readable_version is 0
Position::root_from_leaf_index(0)
}
}
}
pub struct TransactionIter<'a> {
inner: SchemaIterator<'a, TransactionSchema>,
expected_next_version: Version,
end_version: Version,
}
impl<'a> TransactionIter<'a> {
fn next_impl(&mut self) -> Result<Option<Transaction>> {
if self.expected_next_version >= self.end_version {
return Ok(None);
}
let ret = match self.inner.next().transpose()? {
Some((version, transaction)) => {
ensure!(
version == self.expected_next_version,
"Transaction versions are not consecutive.",
);
self.expected_next_version += 1;
Some(transaction)
}
None => None,
};
Ok(ret)
}
}
impl<'a> Iterator for TransactionIter<'a> {
type Item = Result<Transaction>;
fn next(&mut self) -> Option<Self::Item> {
self.next_impl().transpose()
}
}
// TODO(philiphayes): this will need to change to support CRSNs
// (Conflict-Resistant Sequence Numbers)[https://github.com/diem/dip/blob/main/dips/dip-168.md].
//
// It depends on the implementation details, but we'll probably index by _requested_
// transaction sequence number rather than committed account sequence number.
// This would mean the property: `seq_num_{i+1} == seq_num_{i} + 1` would no longer
// be guaranteed and the check should be removed.
//
// This index would also no longer iterate over an account's transactions in
// committed order, meaning the outer method would need to overread by
// `CRSN_WINDOW_SIZE`, sort by version, and take only `limit` entries to get
// at most `limit` transactions in committed order. Alternatively, add another
// index for scanning an accounts transactions in committed order, e.g.,
// `(AccountAddress, Version) -> SeqNum`.
pub struct AccountTransactionVersionIter<'a> {
inner: SchemaIterator<'a, TransactionByAccountSchema>,
address: AccountAddress,
expected_next_seq_num: u64,
end_seq_num: u64,
prev_version: Option<Version>,
ledger_version: Version,
}
impl<'a> AccountTransactionVersionIter<'a> {
fn next_impl(&mut self) -> Result<Option<(u64, Version)>> {
if self.expected_next_seq_num >= self.end_seq_num {
return Ok(None);
}
Ok(match self.inner.next().transpose()? {
Some(((address, seq_num), version)) => {
// No more transactions sent by this account.
if address != self.address {
return Ok(None);
}
// Ensure seq_num_{i+1} == seq_num_{i} + 1
ensure!(
seq_num == self.expected_next_seq_num,
"DB corruption: account transactions sequence numbers are not contiguous: \
actual: {}, expected: {}",
seq_num,
self.expected_next_seq_num,
);
// Ensure version_{i+1} > version_{i}
if let Some(prev_version) = self.prev_version {
ensure!(
prev_version < version,
"DB corruption: account transaction versions are not strictly increasing: \
previous version: {}, current version: {}",
prev_version,
version,
);
}
// No more transactions (in this view of the ledger).
if version > self.ledger_version {
return Ok(None);
}
self.expected_next_seq_num += 1;
self.prev_version = Some(version);
Some((seq_num, version))
}
None => None,
})
}
}
impl<'a> Iterator for AccountTransactionVersionIter<'a> {
type Item = Result<(u64, Version)>;
fn next(&mut self) -> Option<Self::Item> {
self.next_impl().transpose()
}
}
#[cfg(test)]
mod test;
| 35.5 | 102 | 0.601278 |
0115dd5c730207782f85203d014752c2936abe63 | 608 | use async_channel::SendError;
use fluvio_types::PartitionError;
use fluvio_storage::StorageError;
use fluvio_socket::SocketError;
#[derive(Debug, thiserror::Error)]
pub enum InternalServerError {
#[error("Storage error")]
Storage(#[from] StorageError),
#[error("Partition error")]
Partition(#[from] PartitionError),
#[error("Socket error")]
Socket(#[from] SocketError),
#[error("Channel send error")]
Send(String),
}
impl<T> From<SendError<T>> for InternalServerError {
fn from(error: SendError<T>) -> Self {
InternalServerError::Send(error.to_string())
}
}
| 26.434783 | 52 | 0.6875 |
764023977e7ad39a3ffe449f735aef28537e02e3 | 7,992 | //! This is an example of a simple application
//! which calculates the Collatz conjecture.
//!
//! The function itself is trivial on purpose,
//! so that we can focus on understanding how
//! the application can be made localizable
//! via Fluent.
//!
//! To try the app launch `cargo run --example simple-fallback NUM (LOCALES)`
//!
//! NUM is a number to be calculated, and LOCALES is an optional
//! parameter with a comma-separated list of locales requested by the user.
//!
//! Example:
//!
//! cargo run --example simple-fallback 123 de,pl
//!
//! If the second argument is omitted, `en-US` locale is used as the
//! default one.
use std::{env, fs, io, path::PathBuf, str::FromStr};
use fluent_bundle::{FluentArgs, FluentBundle, FluentResource, FluentValue};
use fluent_fallback::{
generator::{BundleGenerator, FluentBundleResult},
Localization,
};
use fluent_langneg::{negotiate_languages, NegotiationStrategy};
use unic_langid::LanguageIdentifier;
/// This helper struct holds the available locales and scheme for converting
/// resource paths into full paths. It is used to customise
/// `fluent-fallback::SyncLocalization`.
struct Bundles {
locales: Vec<LanguageIdentifier>,
res_path_scheme: PathBuf,
}
/// This helper function allows us to read the list
/// of available locales by reading the list of
/// directories in `./examples/resources`.
///
/// It is expected that every directory inside it
/// has a name that is a valid BCP47 language tag.
fn get_available_locales() -> io::Result<Vec<LanguageIdentifier>> {
let mut locales = vec![];
let mut dir = env::current_dir()?;
if dir.to_string_lossy().ends_with("fluent-rs") {
dir.push("fluent-fallback");
}
dir.push("examples");
dir.push("resources");
let res_dir = fs::read_dir(dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier = name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
static L10N_RESOURCES: &[&str] = &["simple.ftl"];
fn main() {
// 1. Get the command line arguments.
let args: Vec<String> = env::args().collect();
// 2. If the argument length is more than 1,
// take the second argument as a comma-separated
// list of requested locales.
let requested: Vec<LanguageIdentifier> = args.get(2).map_or(vec![], |arg| {
arg.split(",")
.map(|s| s.parse().expect("Parsing locale failed."))
.collect()
});
// 3. Negotiate it against the avialable ones
let default_locale: LanguageIdentifier = "en-US".parse().expect("Parsing failed.");
let available = get_available_locales().expect("Retrieving available locales failed.");
let resolved_locales = negotiate_languages(
&requested,
&available,
Some(&default_locale),
NegotiationStrategy::Filtering,
);
// 4. Construct the path scheme for converting `locale` and `res_id` resource
// path into full path passed to OS for loading.
// Eg. ./examples/resources/{locale}/{res_id}
let mut res_path_scheme = env::current_dir().expect("Failed to retrieve current dir.");
if res_path_scheme.to_string_lossy().ends_with("fluent-rs") {
res_path_scheme.push("fluent-bundle");
}
res_path_scheme.push("examples");
res_path_scheme.push("resources");
res_path_scheme.push("{locale}");
res_path_scheme.push("{res_id}");
// 5. Create a new Localization instance which will be used to maintain the localization
// context for this UI. `Bundles` provides the custom logic for obtaining resources.
let loc = Localization::with_generator(
L10N_RESOURCES.iter().map(|&res| res.into()).collect(),
true,
Bundles {
locales: resolved_locales.into_iter().cloned().collect(),
res_path_scheme,
},
);
let mut errors = vec![];
// 6. Check if the input is provided.
match args.get(1) {
Some(input) => {
// 7.1. Cast it to a number.
match isize::from_str(&input) {
Ok(i) => {
// 7.2. Construct a map of arguments
// to format the message.
let mut args = FluentArgs::new();
args.add("input", FluentValue::from(i));
args.add("value", FluentValue::from(collatz(i)));
// 7.3. Format the message.
let value = loc
.format_value_sync("response-msg", Some(&args), &mut errors)
.unwrap()
.unwrap();
println!("{}", value);
}
Err(err) => {
let mut args = FluentArgs::new();
args.add("input", FluentValue::from(input.as_str()));
args.add("reason", FluentValue::from(err.to_string()));
let value = loc
.format_value_sync("input-parse-error-msg", Some(&args), &mut errors)
.unwrap()
.unwrap();
println!("{}", value);
}
}
}
None => {
let value = loc
.format_value_sync("missing-arg-error", None, &mut errors)
.unwrap()
.unwrap();
println!("{}", value);
}
}
}
/// Collatz conjecture calculating function.
fn collatz(n: isize) -> isize {
match n {
1 => 0,
_ => match n % 2 {
0 => 1 + collatz(n / 2),
_ => 1 + collatz(n * 3 + 1),
},
}
}
/// Bundle iterator used by BundleGeneratorSync implementation for Locales.
struct BundleIter {
res_path_scheme: String,
locales: <Vec<LanguageIdentifier> as IntoIterator>::IntoIter,
res_ids: Vec<String>,
}
impl Iterator for BundleIter {
type Item = FluentBundleResult<FluentResource>;
fn next(&mut self) -> Option<Self::Item> {
let locale = self.locales.next()?;
let res_path_scheme = self
.res_path_scheme
.as_str()
.replace("{locale}", &locale.to_string());
let mut bundle = FluentBundle::new(vec![locale]);
let mut errors = vec![];
for res_id in &self.res_ids {
let res_path = res_path_scheme.as_str().replace("{res_id}", res_id);
let source = fs::read_to_string(res_path).unwrap();
let res = match FluentResource::try_new(source) {
Ok(res) => res,
Err((res, err)) => {
errors.extend(err.into_iter().map(Into::into));
res
}
};
bundle.add_resource(res).unwrap();
}
if errors.is_empty() {
Some(Ok(bundle))
} else {
Some(Err((bundle, errors)))
}
}
}
impl futures::Stream for BundleIter {
type Item = FluentBundleResult<FluentResource>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
todo!()
}
}
impl BundleGenerator for Bundles {
type Resource = FluentResource;
type Iter = BundleIter;
type Stream = BundleIter;
fn bundles_iter(&self, res_ids: Vec<String>) -> Self::Iter {
BundleIter {
res_path_scheme: self.res_path_scheme.to_string_lossy().to_string(),
locales: self.locales.clone().into_iter(),
res_ids,
}
}
fn bundles_stream(&self, _res_ids: Vec<String>) -> Self::Stream {
todo!()
}
}
| 32.888889 | 96 | 0.566942 |
bba8f383e1bea184abb4e9234511196f0a7d2b4c | 3,175 | // This file is part of file-descriptors. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. No part of file-descriptors, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of file-descriptors. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT.
/// Miscellaneous local mode flags.
#[derive(EnumIter, Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(not(any(target_os = "ios", target_os = "macos")), repr(u32))]
#[cfg_attr(all(any(target_os = "ios", target_os = "macos"), target_pointer_width = "32"), repr(u32))]
#[cfg_attr(all(any(target_os = "ios", target_os = "macos"), target_pointer_width = "64"), repr(u64))]
pub enum MiscellaneousLocalModeFlag
{
/// If set, the extended, implementation-defined special characters (such as `WERASE`) are recognized and processed.
///
/// Equivalent to the `IEXTEN` flag.
ImplementationDefinedOutputProcessing = IEXTEN,
/// If set and if the implementation supports job control, the `SIGTTOU` signal is sent to the process group of a background process that tries to write to its controlling terminal.
///
/// By default, this signal stops all the processes in the process group.
///
/// This signal is not generated by the terminal driver if the background process that is writing to the controlling terminal is either ignoring or blocking the signal.
///
/// Equivalent to the `TOSTOP` flag.
RaiseSigTTouSignal = TOSTOP,
/// If set then any input that has not been read is reprinted by the system when the next character is input.
///
/// This action is similar to what happens when the `REPRINT` character is typed.
///
/// Equivalent to the `PENDIN` flag.
ReprintUnreadInput = PENDIN,
/// If set then this flag prevents the `STATUS` character from printing information on the foreground process group.
///
/// Regardless of this flag, however, the `STATUS` character still causes the `SIGINFO` signal to be sent to the foreground process group.
///
/// Equivalent to the `NOKERNINFO` flag.
#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "ios", target_os = "macos", target_os = "openbsd"))] PreventStatusCharacterFromPrintingInformation = NOKERNINFO,
/// If set then an alternate word-erase algorithm is used when the `WERASE` character is entered.
///
/// Instead of moving backward until the previous white space character, this flag causes the `WERASE` character to move backward until the first nonalphanumeric character is encountered.
///
/// Equivalent to the `ALTWERASE` flag.
#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "ios", target_os = "macos", target_os = "openbsd"))] AlternativeWordEraseAlgorithm = ALTWERASE,
}
impl Into<tcflag_t> for MiscellaneousLocalModeFlag
{
#[inline(always)]
fn into(self) -> tcflag_t
{
self as tcflag_t
}
}
| 56.696429 | 403 | 0.748031 |
39da1af8c91052c476028431a65c88e9aab86575 | 197,893 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// All possible error types for this service.
#[non_exhaustive]
#[derive(std::fmt::Debug)]
pub enum Error {
/// <p>You do not have sufficient permissions to perform this action.</p>
AccessDeniedException(crate::error::AccessDeniedException),
/// <p>The contact flow has not been published.</p>
ContactFlowNotPublishedException(crate::error::ContactFlowNotPublishedException),
/// <p>The contact with the specified ID is not active or does not exist.</p>
ContactNotFoundException(crate::error::ContactNotFoundException),
/// <p>Outbound calls to the destination number are not allowed.</p>
DestinationNotAllowedException(crate::error::DestinationNotAllowedException),
/// <p>A resource with the specified name already exists.</p>
DuplicateResourceException(crate::error::DuplicateResourceException),
/// <p>An entity with the same name already exists.</p>
IdempotencyException(crate::error::IdempotencyException),
/// <p>Request processing failed because of an error or failure with the service.</p>
InternalServiceException(crate::error::InternalServiceException),
/// <p>The contact flow is not valid.</p>
InvalidContactFlowException(crate::error::InvalidContactFlowException),
/// <p>The problems with the module. Please fix before trying again.</p>
InvalidContactFlowModuleException(crate::error::InvalidContactFlowModuleException),
/// <p>One or more of the specified parameters are not valid.</p>
InvalidParameterException(crate::error::InvalidParameterException),
/// <p>The request is not valid.</p>
InvalidRequestException(crate::error::InvalidRequestException),
/// <p>The allowed limit for the resource has been exceeded.</p>
LimitExceededException(crate::error::LimitExceededException),
/// <p>The contact is not permitted.</p>
OutboundContactNotPermittedException(crate::error::OutboundContactNotPermittedException),
/// <p>A resource already has that name.</p>
ResourceConflictException(crate::error::ResourceConflictException),
/// <p>That resource is already in use. Please try another.</p>
ResourceInUseException(crate::error::ResourceInUseException),
/// <p>The specified resource was not found.</p>
ResourceNotFoundException(crate::error::ResourceNotFoundException),
/// <p>The service quota has been exceeded.</p>
ServiceQuotaExceededException(crate::error::ServiceQuotaExceededException),
/// <p>The throttling limit has been exceeded.</p>
ThrottlingException(crate::error::ThrottlingException),
/// <p>No user with the specified credentials was found in the Amazon Connect instance.</p>
UserNotFoundException(crate::error::UserNotFoundException),
/// An unhandled error occurred.
Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::AccessDeniedException(inner) => inner.fmt(f),
Error::ContactFlowNotPublishedException(inner) => inner.fmt(f),
Error::ContactNotFoundException(inner) => inner.fmt(f),
Error::DestinationNotAllowedException(inner) => inner.fmt(f),
Error::DuplicateResourceException(inner) => inner.fmt(f),
Error::IdempotencyException(inner) => inner.fmt(f),
Error::InternalServiceException(inner) => inner.fmt(f),
Error::InvalidContactFlowException(inner) => inner.fmt(f),
Error::InvalidContactFlowModuleException(inner) => inner.fmt(f),
Error::InvalidParameterException(inner) => inner.fmt(f),
Error::InvalidRequestException(inner) => inner.fmt(f),
Error::LimitExceededException(inner) => inner.fmt(f),
Error::OutboundContactNotPermittedException(inner) => inner.fmt(f),
Error::ResourceConflictException(inner) => inner.fmt(f),
Error::ResourceInUseException(inner) => inner.fmt(f),
Error::ResourceNotFoundException(inner) => inner.fmt(f),
Error::ServiceQuotaExceededException(inner) => inner.fmt(f),
Error::ThrottlingException(inner) => inner.fmt(f),
Error::UserNotFoundException(inner) => inner.fmt(f),
Error::Unhandled(inner) => inner.fmt(f),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateApprovedOriginError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::AssociateApprovedOriginError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateApprovedOriginErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::AssociateApprovedOriginErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::AssociateApprovedOriginErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::AssociateApprovedOriginErrorKind::ResourceConflictException(
inner,
) => Error::ResourceConflictException(inner),
crate::error::AssociateApprovedOriginErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::AssociateApprovedOriginErrorKind::ServiceQuotaExceededException(
inner,
) => Error::ServiceQuotaExceededException(inner),
crate::error::AssociateApprovedOriginErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::AssociateApprovedOriginErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateBotError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::AssociateBotError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateBotErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::AssociateBotErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::AssociateBotErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::AssociateBotErrorKind::ResourceConflictException(inner) => {
Error::ResourceConflictException(inner)
}
crate::error::AssociateBotErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::AssociateBotErrorKind::ServiceQuotaExceededException(inner) => {
Error::ServiceQuotaExceededException(inner)
}
crate::error::AssociateBotErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::AssociateBotErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::AssociateInstanceStorageConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::AssociateInstanceStorageConfigError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::AssociateInstanceStorageConfigErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::AssociateInstanceStorageConfigErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::AssociateInstanceStorageConfigErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::AssociateInstanceStorageConfigErrorKind::ResourceConflictException(inner) => Error::ResourceConflictException(inner),
crate::error::AssociateInstanceStorageConfigErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::AssociateInstanceStorageConfigErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::AssociateInstanceStorageConfigErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateLambdaFunctionError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::AssociateLambdaFunctionError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateLambdaFunctionErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::AssociateLambdaFunctionErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::AssociateLambdaFunctionErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::AssociateLambdaFunctionErrorKind::ResourceConflictException(
inner,
) => Error::ResourceConflictException(inner),
crate::error::AssociateLambdaFunctionErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::AssociateLambdaFunctionErrorKind::ServiceQuotaExceededException(
inner,
) => Error::ServiceQuotaExceededException(inner),
crate::error::AssociateLambdaFunctionErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::AssociateLambdaFunctionErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateLexBotError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::AssociateLexBotError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateLexBotErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::AssociateLexBotErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::AssociateLexBotErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::AssociateLexBotErrorKind::ResourceConflictException(inner) => {
Error::ResourceConflictException(inner)
}
crate::error::AssociateLexBotErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::AssociateLexBotErrorKind::ServiceQuotaExceededException(inner) => {
Error::ServiceQuotaExceededException(inner)
}
crate::error::AssociateLexBotErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::AssociateLexBotErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateQueueQuickConnectsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::AssociateQueueQuickConnectsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateQueueQuickConnectsErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::AssociateQueueQuickConnectsErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::AssociateQueueQuickConnectsErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::AssociateQueueQuickConnectsErrorKind::LimitExceededException(
inner,
) => Error::LimitExceededException(inner),
crate::error::AssociateQueueQuickConnectsErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::AssociateQueueQuickConnectsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::AssociateQueueQuickConnectsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateRoutingProfileQueuesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::AssociateRoutingProfileQueuesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateRoutingProfileQueuesErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::AssociateRoutingProfileQueuesErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::AssociateRoutingProfileQueuesErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::AssociateRoutingProfileQueuesErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::AssociateRoutingProfileQueuesErrorKind::ThrottlingException(
inner,
) => Error::ThrottlingException(inner),
crate::error::AssociateRoutingProfileQueuesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::AssociateSecurityKeyError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::AssociateSecurityKeyError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::AssociateSecurityKeyErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::AssociateSecurityKeyErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::AssociateSecurityKeyErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::AssociateSecurityKeyErrorKind::ResourceConflictException(inner) => {
Error::ResourceConflictException(inner)
}
crate::error::AssociateSecurityKeyErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::AssociateSecurityKeyErrorKind::ServiceQuotaExceededException(
inner,
) => Error::ServiceQuotaExceededException(inner),
crate::error::AssociateSecurityKeyErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::AssociateSecurityKeyErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateAgentStatusError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateAgentStatusError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateAgentStatusErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateAgentStatusErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateAgentStatusErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateAgentStatusErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateAgentStatusErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateAgentStatusErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateAgentStatusErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateAgentStatusErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateContactFlowError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateContactFlowError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateContactFlowErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateContactFlowErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateContactFlowErrorKind::InvalidContactFlowException(inner) => {
Error::InvalidContactFlowException(inner)
}
crate::error::CreateContactFlowErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateContactFlowErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateContactFlowErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateContactFlowErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateContactFlowErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateContactFlowErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateContactFlowModuleError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateContactFlowModuleError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::CreateContactFlowModuleErrorKind::AccessDeniedException(inner) => Error::AccessDeniedException(inner),
crate::error::CreateContactFlowModuleErrorKind::DuplicateResourceException(inner) => Error::DuplicateResourceException(inner),
crate::error::CreateContactFlowModuleErrorKind::IdempotencyException(inner) => Error::IdempotencyException(inner),
crate::error::CreateContactFlowModuleErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::CreateContactFlowModuleErrorKind::InvalidContactFlowModuleException(inner) => Error::InvalidContactFlowModuleException(inner),
crate::error::CreateContactFlowModuleErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::CreateContactFlowModuleErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::CreateContactFlowModuleErrorKind::LimitExceededException(inner) => Error::LimitExceededException(inner),
crate::error::CreateContactFlowModuleErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::CreateContactFlowModuleErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::CreateContactFlowModuleErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateHoursOfOperationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateHoursOfOperationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateHoursOfOperationErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::CreateHoursOfOperationErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateHoursOfOperationErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateHoursOfOperationErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateHoursOfOperationErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateHoursOfOperationErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateHoursOfOperationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateHoursOfOperationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateInstanceError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::CreateInstanceError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateInstanceErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateInstanceErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateInstanceErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateInstanceErrorKind::ServiceQuotaExceededException(inner) => {
Error::ServiceQuotaExceededException(inner)
}
crate::error::CreateInstanceErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateInstanceErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateIntegrationAssociationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateIntegrationAssociationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateIntegrationAssociationErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::CreateIntegrationAssociationErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::CreateIntegrationAssociationErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::CreateIntegrationAssociationErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::CreateIntegrationAssociationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateIntegrationAssociationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateQueueError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::CreateQueueError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateQueueErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateQueueErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateQueueErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateQueueErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateQueueErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateQueueErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateQueueErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateQueueErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateQuickConnectError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateQuickConnectError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateQuickConnectErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateQuickConnectErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateQuickConnectErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateQuickConnectErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateQuickConnectErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateQuickConnectErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateQuickConnectErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateQuickConnectErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateRoutingProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateRoutingProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateRoutingProfileErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateRoutingProfileErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateRoutingProfileErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateRoutingProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateRoutingProfileErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateRoutingProfileErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateRoutingProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateRoutingProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateSecurityProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateSecurityProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateSecurityProfileErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateSecurityProfileErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateSecurityProfileErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateSecurityProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateSecurityProfileErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateSecurityProfileErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateSecurityProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateSecurityProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateUseCaseError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::CreateUseCaseError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateUseCaseErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateUseCaseErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateUseCaseErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateUseCaseErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateUseCaseErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateUseCaseErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateUserError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::CreateUserError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateUserErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::CreateUserErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::CreateUserErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::CreateUserErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateUserErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateUserErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::CreateUserErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateUserErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::CreateUserHierarchyGroupError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::CreateUserHierarchyGroupError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::CreateUserHierarchyGroupErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::CreateUserHierarchyGroupErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::CreateUserHierarchyGroupErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::CreateUserHierarchyGroupErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::CreateUserHierarchyGroupErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::CreateUserHierarchyGroupErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::CreateUserHierarchyGroupErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::CreateUserHierarchyGroupErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteContactFlowError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteContactFlowError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteContactFlowErrorKind::AccessDeniedException(inner) => {
Error::AccessDeniedException(inner)
}
crate::error::DeleteContactFlowErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteContactFlowErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DeleteContactFlowErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteContactFlowErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteContactFlowErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteContactFlowErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteContactFlowModuleError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteContactFlowModuleError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteContactFlowModuleErrorKind::AccessDeniedException(inner) => {
Error::AccessDeniedException(inner)
}
crate::error::DeleteContactFlowModuleErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteContactFlowModuleErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DeleteContactFlowModuleErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteContactFlowModuleErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DeleteContactFlowModuleErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteContactFlowModuleErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteHoursOfOperationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteHoursOfOperationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteHoursOfOperationErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteHoursOfOperationErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DeleteHoursOfOperationErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteHoursOfOperationErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteHoursOfOperationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteHoursOfOperationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteInstanceError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DeleteInstanceError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteInstanceErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteInstanceErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteInstanceErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteInstanceErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteIntegrationAssociationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteIntegrationAssociationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteIntegrationAssociationErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DeleteIntegrationAssociationErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DeleteIntegrationAssociationErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DeleteIntegrationAssociationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteIntegrationAssociationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteQuickConnectError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteQuickConnectError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteQuickConnectErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteQuickConnectErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DeleteQuickConnectErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteQuickConnectErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteQuickConnectErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteQuickConnectErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteSecurityProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteSecurityProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteSecurityProfileErrorKind::AccessDeniedException(inner) => {
Error::AccessDeniedException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::ResourceInUseException(inner) => {
Error::ResourceInUseException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteSecurityProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteUseCaseError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DeleteUseCaseError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteUseCaseErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteUseCaseErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteUseCaseErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteUseCaseErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteUseCaseErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteUserError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DeleteUserError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteUserErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DeleteUserErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DeleteUserErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteUserErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DeleteUserErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteUserErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteUserHierarchyGroupError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DeleteUserHierarchyGroupError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DeleteUserHierarchyGroupErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DeleteUserHierarchyGroupErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DeleteUserHierarchyGroupErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DeleteUserHierarchyGroupErrorKind::ResourceInUseException(inner) => {
Error::ResourceInUseException(inner)
}
crate::error::DeleteUserHierarchyGroupErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DeleteUserHierarchyGroupErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DeleteUserHierarchyGroupErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeAgentStatusError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeAgentStatusError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeAgentStatusErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeAgentStatusErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeAgentStatusErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeAgentStatusErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeAgentStatusErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeAgentStatusErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeContactError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DescribeContactError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeContactErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeContactErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeContactErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeContactErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeContactErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeContactErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeContactFlowError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeContactFlowError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeContactFlowErrorKind::ContactFlowNotPublishedException(
inner,
) => Error::ContactFlowNotPublishedException(inner),
crate::error::DescribeContactFlowErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeContactFlowErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeContactFlowErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeContactFlowErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeContactFlowErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeContactFlowErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeContactFlowModuleError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeContactFlowModuleError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeContactFlowModuleErrorKind::AccessDeniedException(inner) => {
Error::AccessDeniedException(inner)
}
crate::error::DescribeContactFlowModuleErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DescribeContactFlowModuleErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DescribeContactFlowModuleErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DescribeContactFlowModuleErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DescribeContactFlowModuleErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeContactFlowModuleErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeHoursOfOperationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeHoursOfOperationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeHoursOfOperationErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DescribeHoursOfOperationErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DescribeHoursOfOperationErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeHoursOfOperationErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DescribeHoursOfOperationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeHoursOfOperationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeInstanceError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeInstanceError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeInstanceErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeInstanceErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeInstanceErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeInstanceErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeInstanceAttributeError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeInstanceAttributeError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeInstanceAttributeErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DescribeInstanceAttributeErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DescribeInstanceAttributeErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DescribeInstanceAttributeErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DescribeInstanceAttributeErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeInstanceAttributeErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeInstanceStorageConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeInstanceStorageConfigError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeInstanceStorageConfigErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DescribeInstanceStorageConfigErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DescribeInstanceStorageConfigErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DescribeInstanceStorageConfigErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DescribeInstanceStorageConfigErrorKind::ThrottlingException(
inner,
) => Error::ThrottlingException(inner),
crate::error::DescribeInstanceStorageConfigErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeQueueError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DescribeQueueError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeQueueErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeQueueErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeQueueErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeQueueErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeQueueErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeQueueErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeQuickConnectError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeQuickConnectError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeQuickConnectErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeQuickConnectErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeQuickConnectErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeQuickConnectErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeQuickConnectErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeQuickConnectErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeRoutingProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeRoutingProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeRoutingProfileErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeRoutingProfileErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeRoutingProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeRoutingProfileErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeRoutingProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeRoutingProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeSecurityProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeSecurityProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeSecurityProfileErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeSecurityProfileErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DescribeSecurityProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeSecurityProfileErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DescribeSecurityProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeSecurityProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeUserError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DescribeUserError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeUserErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DescribeUserErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DescribeUserErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DescribeUserErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DescribeUserErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeUserErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeUserHierarchyGroupError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DescribeUserHierarchyGroupError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DescribeUserHierarchyGroupErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DescribeUserHierarchyGroupErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DescribeUserHierarchyGroupErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DescribeUserHierarchyGroupErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DescribeUserHierarchyGroupErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DescribeUserHierarchyGroupErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::DescribeUserHierarchyStructureError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::DescribeUserHierarchyStructureError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::DescribeUserHierarchyStructureErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::DescribeUserHierarchyStructureErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::DescribeUserHierarchyStructureErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::DescribeUserHierarchyStructureErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::DescribeUserHierarchyStructureErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::DescribeUserHierarchyStructureErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DisassociateApprovedOriginError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DisassociateApprovedOriginError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DisassociateApprovedOriginErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DisassociateApprovedOriginErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DisassociateApprovedOriginErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DisassociateApprovedOriginErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DisassociateApprovedOriginErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DisassociateApprovedOriginErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DisassociateBotError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::DisassociateBotError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DisassociateBotErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DisassociateBotErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DisassociateBotErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DisassociateBotErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DisassociateBotErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::DisassociateInstanceStorageConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::DisassociateInstanceStorageConfigError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::DisassociateInstanceStorageConfigErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::DisassociateInstanceStorageConfigErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::DisassociateInstanceStorageConfigErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::DisassociateInstanceStorageConfigErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::DisassociateInstanceStorageConfigErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::DisassociateInstanceStorageConfigErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DisassociateLambdaFunctionError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DisassociateLambdaFunctionError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DisassociateLambdaFunctionErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::DisassociateLambdaFunctionErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DisassociateLambdaFunctionErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::DisassociateLambdaFunctionErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DisassociateLambdaFunctionErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DisassociateLambdaFunctionErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DisassociateLexBotError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DisassociateLexBotError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DisassociateLexBotErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DisassociateLexBotErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::DisassociateLexBotErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DisassociateLexBotErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::DisassociateLexBotErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DisassociateLexBotErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::DisassociateQueueQuickConnectsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::DisassociateQueueQuickConnectsError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::DisassociateQueueQuickConnectsErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::DisassociateQueueQuickConnectsErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::DisassociateQueueQuickConnectsErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::DisassociateQueueQuickConnectsErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::DisassociateQueueQuickConnectsErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::DisassociateQueueQuickConnectsErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::DisassociateRoutingProfileQueuesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::DisassociateRoutingProfileQueuesError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::DisassociateRoutingProfileQueuesErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::DisassociateRoutingProfileQueuesErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::DisassociateRoutingProfileQueuesErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::DisassociateRoutingProfileQueuesErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::DisassociateRoutingProfileQueuesErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::DisassociateRoutingProfileQueuesErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::DisassociateSecurityKeyError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::DisassociateSecurityKeyError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::DisassociateSecurityKeyErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::DisassociateSecurityKeyErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::DisassociateSecurityKeyErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::DisassociateSecurityKeyErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::DisassociateSecurityKeyErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::DisassociateSecurityKeyErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetContactAttributesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::GetContactAttributesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::GetContactAttributesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::GetContactAttributesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::GetContactAttributesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::GetContactAttributesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetCurrentMetricDataError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::GetCurrentMetricDataError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::GetCurrentMetricDataErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::GetCurrentMetricDataErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::GetCurrentMetricDataErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::GetCurrentMetricDataErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::GetCurrentMetricDataErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::GetCurrentMetricDataErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetFederationTokenError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::GetFederationTokenError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::GetFederationTokenErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::GetFederationTokenErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::GetFederationTokenErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::GetFederationTokenErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::GetFederationTokenErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::GetFederationTokenErrorKind::UserNotFoundException(inner) => {
Error::UserNotFoundException(inner)
}
crate::error::GetFederationTokenErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetMetricDataError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::GetMetricDataError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::GetMetricDataErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::GetMetricDataErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::GetMetricDataErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::GetMetricDataErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::GetMetricDataErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::GetMetricDataErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListAgentStatusesError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListAgentStatusesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListAgentStatusesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListAgentStatusesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListAgentStatusesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListAgentStatusesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListAgentStatusesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListAgentStatusesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListApprovedOriginsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListApprovedOriginsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListApprovedOriginsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListApprovedOriginsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListApprovedOriginsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListApprovedOriginsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListApprovedOriginsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListApprovedOriginsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListBotsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListBotsError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListBotsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListBotsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListBotsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListBotsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListBotsErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListContactFlowModulesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListContactFlowModulesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListContactFlowModulesErrorKind::AccessDeniedException(inner) => {
Error::AccessDeniedException(inner)
}
crate::error::ListContactFlowModulesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListContactFlowModulesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListContactFlowModulesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListContactFlowModulesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListContactFlowModulesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListContactFlowModulesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListContactFlowsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListContactFlowsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListContactFlowsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListContactFlowsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListContactFlowsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListContactFlowsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListContactFlowsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListContactFlowsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListContactReferencesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListContactReferencesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListContactReferencesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListContactReferencesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListContactReferencesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListContactReferencesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListContactReferencesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListContactReferencesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListHoursOfOperationsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListHoursOfOperationsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListHoursOfOperationsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListHoursOfOperationsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListHoursOfOperationsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListHoursOfOperationsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListHoursOfOperationsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListHoursOfOperationsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListInstanceAttributesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListInstanceAttributesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListInstanceAttributesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListInstanceAttributesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListInstanceAttributesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListInstanceAttributesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListInstanceAttributesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListInstanceAttributesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListInstancesError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListInstancesError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListInstancesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListInstancesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListInstancesErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListInstanceStorageConfigsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListInstanceStorageConfigsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListInstanceStorageConfigsErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::ListInstanceStorageConfigsErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::ListInstanceStorageConfigsErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::ListInstanceStorageConfigsErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::ListInstanceStorageConfigsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListInstanceStorageConfigsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListIntegrationAssociationsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListIntegrationAssociationsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListIntegrationAssociationsErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::ListIntegrationAssociationsErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::ListIntegrationAssociationsErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::ListIntegrationAssociationsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListIntegrationAssociationsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListLambdaFunctionsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListLambdaFunctionsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListLambdaFunctionsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListLambdaFunctionsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListLambdaFunctionsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListLambdaFunctionsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListLambdaFunctionsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListLambdaFunctionsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListLexBotsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListLexBotsError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListLexBotsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListLexBotsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListLexBotsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListLexBotsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListLexBotsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListLexBotsErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListPhoneNumbersError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListPhoneNumbersError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListPhoneNumbersErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListPhoneNumbersErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListPhoneNumbersErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListPhoneNumbersErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListPhoneNumbersErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListPhoneNumbersErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListPromptsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListPromptsError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListPromptsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListPromptsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListPromptsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListPromptsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListPromptsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListPromptsErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListQueueQuickConnectsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListQueueQuickConnectsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListQueueQuickConnectsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListQueueQuickConnectsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListQueueQuickConnectsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListQueueQuickConnectsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListQueueQuickConnectsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListQueueQuickConnectsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListQueuesError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListQueuesError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListQueuesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListQueuesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListQueuesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListQueuesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListQueuesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListQueuesErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListQuickConnectsError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListQuickConnectsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListQuickConnectsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListQuickConnectsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListQuickConnectsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListQuickConnectsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListQuickConnectsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListQuickConnectsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListRoutingProfileQueuesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListRoutingProfileQueuesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListRoutingProfileQueuesErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::ListRoutingProfileQueuesErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::ListRoutingProfileQueuesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListRoutingProfileQueuesErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::ListRoutingProfileQueuesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListRoutingProfileQueuesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListRoutingProfilesError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListRoutingProfilesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListRoutingProfilesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListRoutingProfilesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListRoutingProfilesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListRoutingProfilesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListRoutingProfilesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListRoutingProfilesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListSecurityKeysError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListSecurityKeysError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListSecurityKeysErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListSecurityKeysErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListSecurityKeysErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListSecurityKeysErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListSecurityKeysErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListSecurityKeysErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::ListSecurityProfilePermissionsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::ListSecurityProfilePermissionsError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::ListSecurityProfilePermissionsErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::ListSecurityProfilePermissionsErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::ListSecurityProfilePermissionsErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::ListSecurityProfilePermissionsErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::ListSecurityProfilePermissionsErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::ListSecurityProfilePermissionsErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListSecurityProfilesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListSecurityProfilesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListSecurityProfilesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListSecurityProfilesErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListSecurityProfilesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListSecurityProfilesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListSecurityProfilesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListSecurityProfilesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListTagsForResourceErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListTagsForResourceErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListTagsForResourceErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListTagsForResourceErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListTagsForResourceErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListTagsForResourceErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListUseCasesError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListUseCasesError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListUseCasesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListUseCasesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListUseCasesErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListUseCasesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListUseCasesErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListUserHierarchyGroupsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ListUserHierarchyGroupsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListUserHierarchyGroupsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListUserHierarchyGroupsErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::ListUserHierarchyGroupsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListUserHierarchyGroupsErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::ListUserHierarchyGroupsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListUserHierarchyGroupsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListUsersError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::ListUsersError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ListUsersErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ListUsersErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::ListUsersErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ListUsersErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ListUsersErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::ListUsersErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::ResumeContactRecordingError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::ResumeContactRecordingError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::ResumeContactRecordingErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::ResumeContactRecordingErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::ResumeContactRecordingErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::ResumeContactRecordingErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StartChatContactError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StartChatContactError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StartChatContactErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StartChatContactErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::StartChatContactErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StartChatContactErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::StartChatContactErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StartChatContactErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StartContactRecordingError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StartContactRecordingError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StartContactRecordingErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StartContactRecordingErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::StartContactRecordingErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StartContactRecordingErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StartContactRecordingErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StartContactStreamingError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StartContactStreamingError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StartContactStreamingErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StartContactStreamingErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::StartContactStreamingErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StartContactStreamingErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::StartContactStreamingErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StartContactStreamingErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StartOutboundVoiceContactError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StartOutboundVoiceContactError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::StartOutboundVoiceContactErrorKind::DestinationNotAllowedException(inner) => Error::DestinationNotAllowedException(inner),
crate::error::StartOutboundVoiceContactErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::StartOutboundVoiceContactErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::StartOutboundVoiceContactErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::StartOutboundVoiceContactErrorKind::LimitExceededException(inner) => Error::LimitExceededException(inner),
crate::error::StartOutboundVoiceContactErrorKind::OutboundContactNotPermittedException(inner) => Error::OutboundContactNotPermittedException(inner),
crate::error::StartOutboundVoiceContactErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::StartOutboundVoiceContactErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StartTaskContactError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StartTaskContactError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StartTaskContactErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StartTaskContactErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::StartTaskContactErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StartTaskContactErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StartTaskContactErrorKind::ServiceQuotaExceededException(inner) => {
Error::ServiceQuotaExceededException(inner)
}
crate::error::StartTaskContactErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::StartTaskContactErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StopContactError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::StopContactError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StopContactErrorKind::ContactNotFoundException(inner) => {
Error::ContactNotFoundException(inner)
}
crate::error::StopContactErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StopContactErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::StopContactErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StopContactErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StopContactErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StopContactRecordingError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StopContactRecordingError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StopContactRecordingErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StopContactRecordingErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StopContactRecordingErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StopContactRecordingErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::StopContactStreamingError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::StopContactStreamingError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::StopContactStreamingErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::StopContactStreamingErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::StopContactStreamingErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::StopContactStreamingErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::StopContactStreamingErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::SuspendContactRecordingError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::SuspendContactRecordingError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::SuspendContactRecordingErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::SuspendContactRecordingErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::SuspendContactRecordingErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::SuspendContactRecordingErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::TagResourceError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::TagResourceError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::TagResourceErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::TagResourceErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::TagResourceErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::TagResourceErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::TagResourceErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::TagResourceErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UntagResourceError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::UntagResourceError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UntagResourceErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UntagResourceErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UntagResourceErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UntagResourceErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UntagResourceErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UntagResourceErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateAgentStatusError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateAgentStatusError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateAgentStatusErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::UpdateAgentStatusErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateAgentStatusErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateAgentStatusErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateAgentStatusErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::UpdateAgentStatusErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateAgentStatusErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateAgentStatusErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateContactError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::UpdateContactError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateContactErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateContactErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateContactErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateContactErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateContactErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateContactErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateContactAttributesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateContactAttributesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateContactAttributesErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateContactAttributesErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateContactAttributesErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateContactAttributesErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateContactAttributesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowContentError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowContentError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateContactFlowContentErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateContactFlowContentErrorKind::InvalidContactFlowException(
inner,
) => Error::InvalidContactFlowException(inner),
crate::error::UpdateContactFlowContentErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateContactFlowContentErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateContactFlowContentErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateContactFlowContentErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateContactFlowContentErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowMetadataError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowMetadataError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateContactFlowMetadataErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::UpdateContactFlowMetadataErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateContactFlowMetadataErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateContactFlowMetadataErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateContactFlowMetadataErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateContactFlowMetadataErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateContactFlowMetadataErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowModuleContentError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::UpdateContactFlowModuleContentError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::UpdateContactFlowModuleContentErrorKind::AccessDeniedException(inner) => Error::AccessDeniedException(inner),
crate::error::UpdateContactFlowModuleContentErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::UpdateContactFlowModuleContentErrorKind::InvalidContactFlowModuleException(inner) => Error::InvalidContactFlowModuleException(inner),
crate::error::UpdateContactFlowModuleContentErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::UpdateContactFlowModuleContentErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::UpdateContactFlowModuleContentErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::UpdateContactFlowModuleContentErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowModuleMetadataError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::UpdateContactFlowModuleMetadataError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::UpdateContactFlowModuleMetadataErrorKind::AccessDeniedException(inner) => Error::AccessDeniedException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::DuplicateResourceException(inner) => Error::DuplicateResourceException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::UpdateContactFlowModuleMetadataErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowNameError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateContactFlowNameError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateContactFlowNameErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::UpdateContactFlowNameErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateContactFlowNameErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateContactFlowNameErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateContactFlowNameErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateContactFlowNameErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateContactFlowNameErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateContactScheduleError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateContactScheduleError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateContactScheduleErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateContactScheduleErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateContactScheduleErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateContactScheduleErrorKind::LimitExceededException(inner) => {
Error::LimitExceededException(inner)
}
crate::error::UpdateContactScheduleErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateContactScheduleErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateContactScheduleErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateHoursOfOperationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateHoursOfOperationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateHoursOfOperationErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::UpdateHoursOfOperationErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateHoursOfOperationErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateHoursOfOperationErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateHoursOfOperationErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateHoursOfOperationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateHoursOfOperationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateInstanceAttributeError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateInstanceAttributeError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateInstanceAttributeErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateInstanceAttributeErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateInstanceAttributeErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateInstanceAttributeErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateInstanceAttributeErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateInstanceAttributeErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateInstanceStorageConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateInstanceStorageConfigError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateInstanceStorageConfigErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateInstanceStorageConfigErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateInstanceStorageConfigErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateInstanceStorageConfigErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateInstanceStorageConfigErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateInstanceStorageConfigErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateQueueHoursOfOperationError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateQueueHoursOfOperationError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateQueueHoursOfOperationErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateQueueHoursOfOperationErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateQueueHoursOfOperationErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateQueueHoursOfOperationErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateQueueHoursOfOperationErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateQueueHoursOfOperationErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateQueueMaxContactsError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateQueueMaxContactsError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateQueueMaxContactsErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateQueueMaxContactsErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateQueueMaxContactsErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateQueueMaxContactsErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateQueueMaxContactsErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateQueueMaxContactsErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateQueueNameError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(err: aws_smithy_http::result::SdkError<crate::error::UpdateQueueNameError, R>) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateQueueNameErrorKind::DuplicateResourceException(inner) => {
Error::DuplicateResourceException(inner)
}
crate::error::UpdateQueueNameErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateQueueNameErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateQueueNameErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateQueueNameErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateQueueNameErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateQueueNameErrorKind::Unhandled(inner) => Error::Unhandled(inner),
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::UpdateQueueOutboundCallerConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::UpdateQueueOutboundCallerConfigError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::UpdateQueueOutboundCallerConfigErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::UpdateQueueOutboundCallerConfigErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::UpdateQueueOutboundCallerConfigErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::UpdateQueueOutboundCallerConfigErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::UpdateQueueOutboundCallerConfigErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::UpdateQueueOutboundCallerConfigErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateQueueStatusError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateQueueStatusError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateQueueStatusErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateQueueStatusErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateQueueStatusErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateQueueStatusErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateQueueStatusErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateQueueStatusErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateQuickConnectConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateQuickConnectConfigError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateQuickConnectConfigErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateQuickConnectConfigErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateQuickConnectConfigErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateQuickConnectConfigErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateQuickConnectConfigErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateQuickConnectConfigErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateQuickConnectNameError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateQuickConnectNameError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateQuickConnectNameErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateQuickConnectNameErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateQuickConnectNameErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateQuickConnectNameErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateQuickConnectNameErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateQuickConnectNameErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<aws_smithy_http::result::SdkError<crate::error::UpdateRoutingProfileConcurrencyError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::UpdateRoutingProfileConcurrencyError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::UpdateRoutingProfileConcurrencyErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::UpdateRoutingProfileConcurrencyErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::UpdateRoutingProfileConcurrencyErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::UpdateRoutingProfileConcurrencyErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::UpdateRoutingProfileConcurrencyErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::UpdateRoutingProfileConcurrencyErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R>
From<
aws_smithy_http::result::SdkError<
crate::error::UpdateRoutingProfileDefaultOutboundQueueError,
R,
>,
> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<
crate::error::UpdateRoutingProfileDefaultOutboundQueueError,
R,
>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind {
crate::error::UpdateRoutingProfileDefaultOutboundQueueErrorKind::InternalServiceException(inner) => Error::InternalServiceException(inner),
crate::error::UpdateRoutingProfileDefaultOutboundQueueErrorKind::InvalidParameterException(inner) => Error::InvalidParameterException(inner),
crate::error::UpdateRoutingProfileDefaultOutboundQueueErrorKind::InvalidRequestException(inner) => Error::InvalidRequestException(inner),
crate::error::UpdateRoutingProfileDefaultOutboundQueueErrorKind::ResourceNotFoundException(inner) => Error::ResourceNotFoundException(inner),
crate::error::UpdateRoutingProfileDefaultOutboundQueueErrorKind::ThrottlingException(inner) => Error::ThrottlingException(inner),
crate::error::UpdateRoutingProfileDefaultOutboundQueueErrorKind::Unhandled(inner) => Error::Unhandled(inner),
}
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateRoutingProfileNameError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateRoutingProfileNameError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateRoutingProfileNameErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::UpdateRoutingProfileNameErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateRoutingProfileNameErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateRoutingProfileNameErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateRoutingProfileNameErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateRoutingProfileNameErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateRoutingProfileNameErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateRoutingProfileQueuesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateRoutingProfileQueuesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateRoutingProfileQueuesErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateRoutingProfileQueuesErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateRoutingProfileQueuesErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateRoutingProfileQueuesErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateRoutingProfileQueuesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateRoutingProfileQueuesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateSecurityProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateSecurityProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateSecurityProfileErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateSecurityProfileErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateSecurityProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateSecurityProfileErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateSecurityProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateSecurityProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserHierarchyError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserHierarchyError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserHierarchyErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateUserHierarchyErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateUserHierarchyErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateUserHierarchyErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateUserHierarchyErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserHierarchyErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserHierarchyGroupNameError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserHierarchyGroupNameError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserHierarchyGroupNameErrorKind::DuplicateResourceException(
inner,
) => Error::DuplicateResourceException(inner),
crate::error::UpdateUserHierarchyGroupNameErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateUserHierarchyGroupNameErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateUserHierarchyGroupNameErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateUserHierarchyGroupNameErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateUserHierarchyGroupNameErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserHierarchyGroupNameErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserHierarchyStructureError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserHierarchyStructureError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserHierarchyStructureErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateUserHierarchyStructureErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateUserHierarchyStructureErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateUserHierarchyStructureErrorKind::ResourceInUseException(
inner,
) => Error::ResourceInUseException(inner),
crate::error::UpdateUserHierarchyStructureErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateUserHierarchyStructureErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserHierarchyStructureErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserIdentityInfoError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserIdentityInfoError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserIdentityInfoErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateUserIdentityInfoErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateUserIdentityInfoErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateUserIdentityInfoErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateUserIdentityInfoErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserIdentityInfoErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserPhoneConfigError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserPhoneConfigError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserPhoneConfigErrorKind::InternalServiceException(inner) => {
Error::InternalServiceException(inner)
}
crate::error::UpdateUserPhoneConfigErrorKind::InvalidParameterException(inner) => {
Error::InvalidParameterException(inner)
}
crate::error::UpdateUserPhoneConfigErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateUserPhoneConfigErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::UpdateUserPhoneConfigErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserPhoneConfigErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserRoutingProfileError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserRoutingProfileError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserRoutingProfileErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateUserRoutingProfileErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateUserRoutingProfileErrorKind::InvalidRequestException(inner) => {
Error::InvalidRequestException(inner)
}
crate::error::UpdateUserRoutingProfileErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateUserRoutingProfileErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserRoutingProfileErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::UpdateUserSecurityProfilesError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::UpdateUserSecurityProfilesError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::UpdateUserSecurityProfilesErrorKind::InternalServiceException(
inner,
) => Error::InternalServiceException(inner),
crate::error::UpdateUserSecurityProfilesErrorKind::InvalidParameterException(
inner,
) => Error::InvalidParameterException(inner),
crate::error::UpdateUserSecurityProfilesErrorKind::InvalidRequestException(
inner,
) => Error::InvalidRequestException(inner),
crate::error::UpdateUserSecurityProfilesErrorKind::ResourceNotFoundException(
inner,
) => Error::ResourceNotFoundException(inner),
crate::error::UpdateUserSecurityProfilesErrorKind::ThrottlingException(inner) => {
Error::ThrottlingException(inner)
}
crate::error::UpdateUserSecurityProfilesErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl std::error::Error for Error {}
| 47.939196 | 164 | 0.591284 |
4b54c32fbd36ac9247fd70ef359ab3976d1386fc | 1,117 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
trait get {
fn get(self) -> int;
}
// Note: impl on a slice; we're checking that the pointers below
// correctly get borrowed to `&`. (similar to impling for `int`, with
// `&self` instead of `self`.)
impl<'a> get for &'a int {
fn get(self) -> int {
return *self;
}
}
pub fn main() {
let x = @6;
let y = x.get();
assert_eq!(y, 6);
let x = @6;
let y = x.get();
println!("y={}", y);
assert_eq!(y, 6);
let x = ~6;
let y = x.get();
println!("y={}", y);
assert_eq!(y, 6);
let x = &6;
let y = x.get();
println!("y={}", y);
assert_eq!(y, 6);
}
| 24.282609 | 69 | 0.599821 |
115f06ccc9ac47f00e978199b283866da5e3c281 | 12,356 | use crate::ast::{self, EventField, Field, GenericArg, TypeAlias, TypeDesc};
use crate::grammar::expressions::parse_expr;
use crate::grammar::functions::parse_single_word_stmt;
use crate::node::{Node, Span};
use crate::{ParseFailed, ParseResult, Parser, TokenKind};
use vec1::Vec1;
/// Parse a [`ModuleStmt::Struct`].
/// # Panics
/// Panics if the next token isn't `struct`.
pub fn parse_struct_def(par: &mut Parser) -> ParseResult<Node<ast::Struct>> {
use TokenKind::*;
let struct_tok = par.assert(Struct);
let name = par.expect_with_notes(Name, "failed to parse struct definition", || {
vec!["Note: a struct name must start with a letter or underscore, and contain letters, numbers, or underscores".into()]
})?;
let mut fields = vec![];
par.enter_block(struct_tok.span + name.span, "struct definition")?;
loop {
match par.peek() {
Some(Name) | Some(Pub) | Some(Const) => {
let pub_qual = parse_opt_qualifier(par, Pub);
let const_qual = parse_opt_qualifier(par, Const);
fields.push(parse_field(par, pub_qual, const_qual)?);
}
Some(Dedent) => {
par.next()?;
break;
}
Some(Pass) => {
parse_single_word_stmt(par)?;
}
None => break,
Some(_) => {
let tok = par.next()?;
par.unexpected_token_error(tok.span, "failed to parse struct def", vec![]);
return Err(ParseFailed);
}
}
}
let span = struct_tok.span + name.span + fields.last();
Ok(Node::new(
ast::Struct {
name: name.into(),
fields,
},
span,
))
}
/// Parse a type alias definition, e.g. `type MyMap = Map<u8, address>`.
/// # Panics
/// Panics if the next token isn't `type`.
pub fn parse_type_alias(par: &mut Parser) -> ParseResult<Node<TypeAlias>> {
let type_tok = par.assert(TokenKind::Type);
let name = par.expect(TokenKind::Name, "failed to parse type declaration")?;
par.expect_with_notes(TokenKind::Eq, "failed to parse type declaration", || {
vec![
"Note: a type alias name must be followed by an equals sign and a type description"
.into(),
format!("Example: `type {} = Map<address, u64>`", name.text),
]
})?;
let typ = parse_type_desc(par)?;
let span = type_tok.span + typ.span;
Ok(Node::new(
TypeAlias {
name: name.into(),
typ,
},
span,
))
}
/// Parse an event definition.
/// # Panics
/// Panics if the next token isn't `event`.
pub fn parse_event_def(par: &mut Parser) -> ParseResult<Node<ast::Event>> {
use TokenKind::*;
let event_tok = par.assert(Event);
let name = par.expect(Name, "failed to parse event definition")?;
let mut fields = vec![];
par.enter_block(event_tok.span + name.span, "event definition")?;
loop {
match par.peek() {
Some(Name) | Some(Idx) => {
fields.push(parse_event_field(par)?);
}
Some(Pass) => {
parse_single_word_stmt(par)?;
}
Some(Dedent) => {
par.next()?;
break;
}
None => break,
Some(_) => {
let tok = par.next()?;
par.unexpected_token_error(tok.span, "failed to parse event definition", vec![]);
return Err(ParseFailed);
}
}
}
let span = event_tok.span + name.span + fields.last();
Ok(Node::new(
ast::Event {
name: name.into(),
fields,
},
span,
))
}
/// Parse an event field, e.g. `foo: u8` or `idx from: address`.
pub fn parse_event_field(par: &mut Parser) -> ParseResult<Node<EventField>> {
let idx_qual = parse_opt_qualifier(par, TokenKind::Idx);
let name = par.expect(TokenKind::Name, "failed to parse event field")?;
par.expect_with_notes(TokenKind::Colon, "failed to parse event field", || {
vec![
"Note: event field name must be followed by a colon and a type description".into(),
format!(
"Example: `{}{}: address`",
if idx_qual.is_some() { "idx " } else { "" },
name.text
),
]
})?;
let typ = parse_type_desc(par)?;
par.expect_newline("event field")?;
let span = name.span + idx_qual + &typ;
Ok(Node::new(
EventField {
is_idx: idx_qual.is_some(),
name: Node::new(name.text.into(), name.span),
typ,
},
span,
))
}
/// Parse a field for a struct or contract. The leading optional `pub` and
/// `const` qualifiers must be parsed by the caller, and passed in.
/// Note that `event` fields are handled in [`parse_event_field`].
pub fn parse_field(
par: &mut Parser,
pub_qual: Option<Span>,
const_qual: Option<Span>,
) -> ParseResult<Node<Field>> {
let name = par.expect(TokenKind::Name, "failed to parse field definition")?;
par.expect_with_notes(TokenKind::Colon, "failed to parse field definition", || {
vec![
"Note: field name must be followed by a colon and a type description".into(),
format!(
"Example: {}{}{}: address",
if pub_qual.is_some() { "pub " } else { "" },
if const_qual.is_some() { "const " } else { "" },
name.text
),
]
})?;
let typ = parse_type_desc(par)?;
let value = if par.peek() == Some(TokenKind::Eq) {
par.next()?;
Some(parse_expr(par)?)
} else {
None
};
par.expect_newline("field definition")?;
let span = name.span + pub_qual + const_qual + &typ;
Ok(Node::new(
Field {
is_pub: pub_qual.is_some(),
is_const: const_qual.is_some(),
name: Node::new(name.text.into(), name.span),
typ,
value,
},
span,
))
}
/// Parse an optional qualifier (`pub`, `const`, or `idx`).
pub fn parse_opt_qualifier(par: &mut Parser, tk: TokenKind) -> Option<Span> {
if par.peek() == Some(tk) {
let tok = par.next().unwrap();
Some(tok.span)
} else {
None
}
}
/// Parse an angle-bracket-wrapped list of generic arguments (eg. the tail end
/// of `Map<address, u256>`).
/// # Panics
/// Panics if the first token isn't `<`.
pub fn parse_generic_args(par: &mut Parser) -> ParseResult<Node<Vec<GenericArg>>> {
use TokenKind::*;
let mut span = par.assert(Lt).span;
let mut args = vec![];
let expect_end = |par: &mut Parser| {
// If there's no comma, the next token must be `>`
match par.peek_or_err()? {
Gt => Ok(par.next()?.span),
GtGt => Ok(par.split_next()?.span),
_ => {
let tok = par.next()?;
par.unexpected_token_error(
tok.span,
"Unexpected token while parsing generic arg list",
vec![],
);
Err(ParseFailed)
}
}
};
loop {
match par.peek_or_err()? {
Gt => {
span += par.next()?.span;
break;
}
GtGt => {
span += par.split_next()?.span;
break;
}
Int => {
let tok = par.next()?;
if let Ok(num) = tok.text.parse() {
args.push(GenericArg::Int(Node::new(num, tok.span)));
if par.peek() == Some(Comma) {
par.next()?;
} else {
span += expect_end(par)?;
break;
}
} else {
par.error(tok.span, "failed to parse integer literal");
return Err(ParseFailed);
}
}
Name | ParenOpen => {
let typ = parse_type_desc(par)?;
args.push(GenericArg::TypeDesc(Node::new(typ.kind, typ.span)));
if par.peek() == Some(Comma) {
par.next()?;
} else {
span += expect_end(par)?;
break;
}
}
_ => {
let tok = par.next()?;
par.unexpected_token_error(
tok.span,
"failed to parse generic type argument list",
vec![],
);
return Err(ParseFailed);
}
}
}
Ok(Node::new(args, span))
}
/// Parse a type description, e.g. `u8` or `Map<address, u256>`.
pub fn parse_type_desc(par: &mut Parser) -> ParseResult<Node<TypeDesc>> {
use TokenKind::*;
let mut typ = match par.peek_or_err()? {
Name => {
let name = par.next()?;
match par.peek() {
Some(Lt) => {
let args = parse_generic_args(par)?;
let span = name.span + args.span;
Node::new(
TypeDesc::Generic {
base: name.into(),
args,
},
span,
)
}
_ => Node::new(
TypeDesc::Base {
base: name.text.into(),
},
name.span,
),
}
}
ParenOpen => {
let mut span = par.next()?.span;
let mut items = vec![];
loop {
match par.peek_or_err()? {
ParenClose => {
span += par.next()?.span;
break;
}
Name | ParenOpen => {
let item = parse_type_desc(par)?;
span += item.span;
items.push(item);
if par.peek_or_err()? == Comma {
par.next()?;
} else {
span += par
.expect(
ParenClose,
"Unexpected token while parsing tuple type description",
)?
.span;
break;
}
}
_ => {
let tok = par.next()?;
par.unexpected_token_error(
tok.span,
"failed to parse type description",
vec![],
);
return Err(ParseFailed);
}
}
}
if items.is_empty() {
Node::new(TypeDesc::Unit, span)
} else {
Node::new(
TypeDesc::Tuple {
items: Vec1::try_from_vec(items).expect("couldn't convert vec to vec1"),
},
span,
)
}
}
_ => {
let tok = par.next()?;
par.unexpected_token_error(tok.span, "failed to parse type description", vec![]);
return Err(ParseFailed);
}
};
while par.peek() == Some(BracketOpen) {
let ctx = "Unexpected token while parsing array type description.";
let mut span = typ.span + par.next()?.span;
let num = par.expect(Int, ctx)?;
if let Ok(dimension) = num.text.parse() {
span += par.expect(BracketClose, ctx)?.span;
typ = Node::new(
TypeDesc::Array {
typ: Box::new(typ),
dimension,
},
span,
);
} else {
par.error(num.span, "failed to parse number literal");
return Err(ParseFailed);
}
}
Ok(typ)
}
| 32.861702 | 127 | 0.45573 |
bb2e310476ff7d537ca1d0d4a00c3cee0d5397df | 1,639 | use alloc::{boxed::Box, sync::Arc, vec};
use core::ops::Drop;
pub struct Buffer {
queue: Arc<wgpu::Queue>,
pub(crate) buffer: Arc<wgpu::Buffer>,
pub(crate) offset: usize,
size: usize,
free: Box<dyn Fn() + Sync + Send + 'static>,
}
impl Buffer {
pub(crate) fn new<F>(queue: Arc<wgpu::Queue>, buffer: Arc<wgpu::Buffer>, offset: usize, size: usize, free: F) -> Self
where
F: Fn() + Sync + Send + 'static,
{
Self {
queue,
buffer,
offset,
size,
free: Box::new(free),
}
}
pub fn write(&self, data: &[u8]) {
// TODO raise error or warn
if data.len() % wgpu::COPY_BUFFER_ALIGNMENT as usize != 0 {
let count = data.len() % wgpu::COPY_BUFFER_ALIGNMENT as usize;
let mut new_buf = vec![0; data.len() + count];
new_buf[..data.len()].copy_from_slice(data);
self.queue.write_buffer(&self.buffer, self.offset as u64, &new_buf)
} else {
self.queue.write_buffer(&self.buffer, self.offset as u64, data)
}
}
pub(crate) fn binding_resource(&self) -> wgpu::BindingResource {
wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &self.buffer,
offset: self.offset as wgpu::BufferAddress,
size: wgpu::BufferSize::new(self.size as u64),
})
}
pub(crate) fn as_slice(&self) -> wgpu::BufferSlice {
self.buffer.slice(self.offset as u64..self.offset as u64 + self.size as u64)
}
}
impl Drop for Buffer {
fn drop(&mut self) {
(self.free)()
}
}
| 28.754386 | 121 | 0.555217 |
5d96989f967c44555a7a08f2b3cbb68b875b9d0d | 46,258 | #![allow(unused_imports, non_camel_case_types)]
use crate::models::r4::Address::Address;
use crate::models::r4::Age::Age;
use crate::models::r4::Annotation::Annotation;
use crate::models::r4::Attachment::Attachment;
use crate::models::r4::CodeableConcept::CodeableConcept;
use crate::models::r4::Coding::Coding;
use crate::models::r4::ContactDetail::ContactDetail;
use crate::models::r4::ContactPoint::ContactPoint;
use crate::models::r4::Contributor::Contributor;
use crate::models::r4::Count::Count;
use crate::models::r4::DataRequirement::DataRequirement;
use crate::models::r4::Distance::Distance;
use crate::models::r4::Dosage::Dosage;
use crate::models::r4::Duration::Duration;
use crate::models::r4::Element::Element;
use crate::models::r4::Expression::Expression;
use crate::models::r4::Extension::Extension;
use crate::models::r4::HumanName::HumanName;
use crate::models::r4::Identifier::Identifier;
use crate::models::r4::Meta::Meta;
use crate::models::r4::Money::Money;
use crate::models::r4::ParameterDefinition::ParameterDefinition;
use crate::models::r4::Period::Period;
use crate::models::r4::Quantity::Quantity;
use crate::models::r4::Range::Range;
use crate::models::r4::Ratio::Ratio;
use crate::models::r4::Reference::Reference;
use crate::models::r4::RelatedArtifact::RelatedArtifact;
use crate::models::r4::SampledData::SampledData;
use crate::models::r4::Signature::Signature;
use crate::models::r4::Timing::Timing;
use crate::models::r4::TriggerDefinition::TriggerDefinition;
use crate::models::r4::UsageContext::UsageContext;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// A task to be performed.
#[derive(Debug)]
pub struct Task_Input<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl Task_Input<'_> {
pub fn new(value: &Value) -> Task_Input {
Task_Input {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for valueBase64Binary
pub fn _value_base_64_binary(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueBase64Binary") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueBoolean
pub fn _value_boolean(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueBoolean") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueCanonical
pub fn _value_canonical(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueCanonical") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueCode
pub fn _value_code(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueCode") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueDate
pub fn _value_date(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueDate") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueDateTime
pub fn _value_date_time(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueDateTime") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueDecimal
pub fn _value_decimal(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueDecimal") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueId
pub fn _value_id(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueId") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueInstant
pub fn _value_instant(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueInstant") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueInteger
pub fn _value_integer(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueInteger") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueMarkdown
pub fn _value_markdown(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueMarkdown") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueOid
pub fn _value_oid(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueOid") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valuePositiveInt
pub fn _value_positive_int(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valuePositiveInt") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueString
pub fn _value_string(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueString") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueTime
pub fn _value_time(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueTime") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueUnsignedInt
pub fn _value_unsigned_int(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueUnsignedInt") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueUri
pub fn _value_uri(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueUri") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueUrl
pub fn _value_url(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueUrl") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for valueUuid
pub fn _value_uuid(&self) -> Option<Element> {
if let Some(val) = self.value.get("_valueUuid") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may be
/// any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element
/// in which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To make
/// the use of extensions safe and manageable, there is a strict set of governance
/// applied to the definition and use of extensions. Though any implementer can define
/// an extension, there is a set of requirements that SHALL be met as part of the
/// definition of the extension. Applications processing a resource are required to
/// check for modifier extensions. Modifier extensions SHALL NOT change the meaning
/// of any elements on Resource or DomainResource (including cannot change the meaning
/// of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// A code or description indicating how the input is intended to be used as part of
/// the task execution.
pub fn fhir_type(&self) -> CodeableConcept {
CodeableConcept {
value: Cow::Borrowed(&self.value["type"]),
}
}
/// The value of the input parameter as a basic type.
pub fn value_address(&self) -> Option<Address> {
if let Some(val) = self.value.get("valueAddress") {
return Some(Address {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_age(&self) -> Option<Age> {
if let Some(val) = self.value.get("valueAge") {
return Some(Age {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_annotation(&self) -> Option<Annotation> {
if let Some(val) = self.value.get("valueAnnotation") {
return Some(Annotation {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_attachment(&self) -> Option<Attachment> {
if let Some(val) = self.value.get("valueAttachment") {
return Some(Attachment {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_base_64_binary(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueBase64Binary") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_boolean(&self) -> Option<bool> {
if let Some(val) = self.value.get("valueBoolean") {
return Some(val.as_bool().unwrap());
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_canonical(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueCanonical") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_code(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueCode") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_codeable_concept(&self) -> Option<CodeableConcept> {
if let Some(val) = self.value.get("valueCodeableConcept") {
return Some(CodeableConcept {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_coding(&self) -> Option<Coding> {
if let Some(val) = self.value.get("valueCoding") {
return Some(Coding {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_contact_detail(&self) -> Option<ContactDetail> {
if let Some(val) = self.value.get("valueContactDetail") {
return Some(ContactDetail {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_contact_point(&self) -> Option<ContactPoint> {
if let Some(val) = self.value.get("valueContactPoint") {
return Some(ContactPoint {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_contributor(&self) -> Option<Contributor> {
if let Some(val) = self.value.get("valueContributor") {
return Some(Contributor {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_count(&self) -> Option<Count> {
if let Some(val) = self.value.get("valueCount") {
return Some(Count {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_data_requirement(&self) -> Option<DataRequirement> {
if let Some(val) = self.value.get("valueDataRequirement") {
return Some(DataRequirement {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_date(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueDate") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_date_time(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueDateTime") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_decimal(&self) -> Option<f64> {
if let Some(val) = self.value.get("valueDecimal") {
return Some(val.as_f64().unwrap());
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_distance(&self) -> Option<Distance> {
if let Some(val) = self.value.get("valueDistance") {
return Some(Distance {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_dosage(&self) -> Option<Dosage> {
if let Some(val) = self.value.get("valueDosage") {
return Some(Dosage {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_duration(&self) -> Option<Duration> {
if let Some(val) = self.value.get("valueDuration") {
return Some(Duration {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_expression(&self) -> Option<Expression> {
if let Some(val) = self.value.get("valueExpression") {
return Some(Expression {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_human_name(&self) -> Option<HumanName> {
if let Some(val) = self.value.get("valueHumanName") {
return Some(HumanName {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueId") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_identifier(&self) -> Option<Identifier> {
if let Some(val) = self.value.get("valueIdentifier") {
return Some(Identifier {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_instant(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueInstant") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_integer(&self) -> Option<f64> {
if let Some(val) = self.value.get("valueInteger") {
return Some(val.as_f64().unwrap());
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_markdown(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueMarkdown") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_meta(&self) -> Option<Meta> {
if let Some(val) = self.value.get("valueMeta") {
return Some(Meta {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_money(&self) -> Option<Money> {
if let Some(val) = self.value.get("valueMoney") {
return Some(Money {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_oid(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueOid") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_parameter_definition(&self) -> Option<ParameterDefinition> {
if let Some(val) = self.value.get("valueParameterDefinition") {
return Some(ParameterDefinition {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_period(&self) -> Option<Period> {
if let Some(val) = self.value.get("valuePeriod") {
return Some(Period {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_positive_int(&self) -> Option<f64> {
if let Some(val) = self.value.get("valuePositiveInt") {
return Some(val.as_f64().unwrap());
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_quantity(&self) -> Option<Quantity> {
if let Some(val) = self.value.get("valueQuantity") {
return Some(Quantity {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_range(&self) -> Option<Range> {
if let Some(val) = self.value.get("valueRange") {
return Some(Range {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_ratio(&self) -> Option<Ratio> {
if let Some(val) = self.value.get("valueRatio") {
return Some(Ratio {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_reference(&self) -> Option<Reference> {
if let Some(val) = self.value.get("valueReference") {
return Some(Reference {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_related_artifact(&self) -> Option<RelatedArtifact> {
if let Some(val) = self.value.get("valueRelatedArtifact") {
return Some(RelatedArtifact {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_sampled_data(&self) -> Option<SampledData> {
if let Some(val) = self.value.get("valueSampledData") {
return Some(SampledData {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_signature(&self) -> Option<Signature> {
if let Some(val) = self.value.get("valueSignature") {
return Some(Signature {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_string(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueString") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_time(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueTime") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_timing(&self) -> Option<Timing> {
if let Some(val) = self.value.get("valueTiming") {
return Some(Timing {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_trigger_definition(&self) -> Option<TriggerDefinition> {
if let Some(val) = self.value.get("valueTriggerDefinition") {
return Some(TriggerDefinition {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_unsigned_int(&self) -> Option<f64> {
if let Some(val) = self.value.get("valueUnsignedInt") {
return Some(val.as_f64().unwrap());
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_uri(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueUri") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_url(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueUrl") {
return Some(string);
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_usage_context(&self) -> Option<UsageContext> {
if let Some(val) = self.value.get("valueUsageContext") {
return Some(UsageContext {
value: Cow::Borrowed(val),
});
}
return None;
}
/// The value of the input parameter as a basic type.
pub fn value_uuid(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("valueUuid") {
return Some(string);
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._value_base_64_binary() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_boolean() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_canonical() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_code() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_date() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_date_time() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_decimal() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_id() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_instant() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_integer() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_markdown() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_oid() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_positive_int() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_string() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_time() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_unsigned_int() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_uri() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_url() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._value_uuid() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if !self.fhir_type().validate() {
return false;
}
if let Some(_val) = self.value_address() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_age() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_annotation() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_attachment() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_base_64_binary() {}
if let Some(_val) = self.value_boolean() {}
if let Some(_val) = self.value_canonical() {}
if let Some(_val) = self.value_code() {}
if let Some(_val) = self.value_codeable_concept() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_coding() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_contact_detail() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_contact_point() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_contributor() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_count() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_data_requirement() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_date() {}
if let Some(_val) = self.value_date_time() {}
if let Some(_val) = self.value_decimal() {}
if let Some(_val) = self.value_distance() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_dosage() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_duration() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_expression() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_human_name() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_id() {}
if let Some(_val) = self.value_identifier() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_instant() {}
if let Some(_val) = self.value_integer() {}
if let Some(_val) = self.value_markdown() {}
if let Some(_val) = self.value_meta() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_money() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_oid() {}
if let Some(_val) = self.value_parameter_definition() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_period() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_positive_int() {}
if let Some(_val) = self.value_quantity() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_range() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_ratio() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_reference() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_related_artifact() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_sampled_data() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_signature() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_string() {}
if let Some(_val) = self.value_time() {}
if let Some(_val) = self.value_timing() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_trigger_definition() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_unsigned_int() {}
if let Some(_val) = self.value_uri() {}
if let Some(_val) = self.value_url() {}
if let Some(_val) = self.value_usage_context() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.value_uuid() {}
return true;
}
}
#[derive(Debug)]
pub struct Task_InputBuilder {
pub(crate) value: Value,
}
impl Task_InputBuilder {
pub fn build(&self) -> Task_Input {
Task_Input {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: Task_Input) -> Task_InputBuilder {
Task_InputBuilder {
value: (*existing.value).clone(),
}
}
pub fn new(fhir_type: CodeableConcept) -> Task_InputBuilder {
let mut __value: Value = json!({});
__value["type"] = json!(fhir_type.value);
return Task_InputBuilder { value: __value };
}
pub fn _value_base_64_binary<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueBase64Binary"] = json!(val.value);
return self;
}
pub fn _value_boolean<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueBoolean"] = json!(val.value);
return self;
}
pub fn _value_canonical<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueCanonical"] = json!(val.value);
return self;
}
pub fn _value_code<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueCode"] = json!(val.value);
return self;
}
pub fn _value_date<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueDate"] = json!(val.value);
return self;
}
pub fn _value_date_time<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueDateTime"] = json!(val.value);
return self;
}
pub fn _value_decimal<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueDecimal"] = json!(val.value);
return self;
}
pub fn _value_id<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueId"] = json!(val.value);
return self;
}
pub fn _value_instant<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueInstant"] = json!(val.value);
return self;
}
pub fn _value_integer<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueInteger"] = json!(val.value);
return self;
}
pub fn _value_markdown<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueMarkdown"] = json!(val.value);
return self;
}
pub fn _value_oid<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueOid"] = json!(val.value);
return self;
}
pub fn _value_positive_int<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valuePositiveInt"] = json!(val.value);
return self;
}
pub fn _value_string<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueString"] = json!(val.value);
return self;
}
pub fn _value_time<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueTime"] = json!(val.value);
return self;
}
pub fn _value_unsigned_int<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueUnsignedInt"] = json!(val.value);
return self;
}
pub fn _value_uri<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueUri"] = json!(val.value);
return self;
}
pub fn _value_url<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueUrl"] = json!(val.value);
return self;
}
pub fn _value_uuid<'a>(&'a mut self, val: Element) -> &'a mut Task_InputBuilder {
self.value["_valueUuid"] = json!(val.value);
return self;
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut Task_InputBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn modifier_extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut Task_InputBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn value_address<'a>(&'a mut self, val: Address) -> &'a mut Task_InputBuilder {
self.value["valueAddress"] = json!(val.value);
return self;
}
pub fn value_age<'a>(&'a mut self, val: Age) -> &'a mut Task_InputBuilder {
self.value["valueAge"] = json!(val.value);
return self;
}
pub fn value_annotation<'a>(&'a mut self, val: Annotation) -> &'a mut Task_InputBuilder {
self.value["valueAnnotation"] = json!(val.value);
return self;
}
pub fn value_attachment<'a>(&'a mut self, val: Attachment) -> &'a mut Task_InputBuilder {
self.value["valueAttachment"] = json!(val.value);
return self;
}
pub fn value_base_64_binary<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueBase64Binary"] = json!(val);
return self;
}
pub fn value_boolean<'a>(&'a mut self, val: bool) -> &'a mut Task_InputBuilder {
self.value["valueBoolean"] = json!(val);
return self;
}
pub fn value_canonical<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueCanonical"] = json!(val);
return self;
}
pub fn value_code<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueCode"] = json!(val);
return self;
}
pub fn value_codeable_concept<'a>(
&'a mut self,
val: CodeableConcept,
) -> &'a mut Task_InputBuilder {
self.value["valueCodeableConcept"] = json!(val.value);
return self;
}
pub fn value_coding<'a>(&'a mut self, val: Coding) -> &'a mut Task_InputBuilder {
self.value["valueCoding"] = json!(val.value);
return self;
}
pub fn value_contact_detail<'a>(&'a mut self, val: ContactDetail) -> &'a mut Task_InputBuilder {
self.value["valueContactDetail"] = json!(val.value);
return self;
}
pub fn value_contact_point<'a>(&'a mut self, val: ContactPoint) -> &'a mut Task_InputBuilder {
self.value["valueContactPoint"] = json!(val.value);
return self;
}
pub fn value_contributor<'a>(&'a mut self, val: Contributor) -> &'a mut Task_InputBuilder {
self.value["valueContributor"] = json!(val.value);
return self;
}
pub fn value_count<'a>(&'a mut self, val: Count) -> &'a mut Task_InputBuilder {
self.value["valueCount"] = json!(val.value);
return self;
}
pub fn value_data_requirement<'a>(
&'a mut self,
val: DataRequirement,
) -> &'a mut Task_InputBuilder {
self.value["valueDataRequirement"] = json!(val.value);
return self;
}
pub fn value_date<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueDate"] = json!(val);
return self;
}
pub fn value_date_time<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueDateTime"] = json!(val);
return self;
}
pub fn value_decimal<'a>(&'a mut self, val: f64) -> &'a mut Task_InputBuilder {
self.value["valueDecimal"] = json!(val);
return self;
}
pub fn value_distance<'a>(&'a mut self, val: Distance) -> &'a mut Task_InputBuilder {
self.value["valueDistance"] = json!(val.value);
return self;
}
pub fn value_dosage<'a>(&'a mut self, val: Dosage) -> &'a mut Task_InputBuilder {
self.value["valueDosage"] = json!(val.value);
return self;
}
pub fn value_duration<'a>(&'a mut self, val: Duration) -> &'a mut Task_InputBuilder {
self.value["valueDuration"] = json!(val.value);
return self;
}
pub fn value_expression<'a>(&'a mut self, val: Expression) -> &'a mut Task_InputBuilder {
self.value["valueExpression"] = json!(val.value);
return self;
}
pub fn value_human_name<'a>(&'a mut self, val: HumanName) -> &'a mut Task_InputBuilder {
self.value["valueHumanName"] = json!(val.value);
return self;
}
pub fn value_id<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueId"] = json!(val);
return self;
}
pub fn value_identifier<'a>(&'a mut self, val: Identifier) -> &'a mut Task_InputBuilder {
self.value["valueIdentifier"] = json!(val.value);
return self;
}
pub fn value_instant<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueInstant"] = json!(val);
return self;
}
pub fn value_integer<'a>(&'a mut self, val: f64) -> &'a mut Task_InputBuilder {
self.value["valueInteger"] = json!(val);
return self;
}
pub fn value_markdown<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueMarkdown"] = json!(val);
return self;
}
pub fn value_meta<'a>(&'a mut self, val: Meta) -> &'a mut Task_InputBuilder {
self.value["valueMeta"] = json!(val.value);
return self;
}
pub fn value_money<'a>(&'a mut self, val: Money) -> &'a mut Task_InputBuilder {
self.value["valueMoney"] = json!(val.value);
return self;
}
pub fn value_oid<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueOid"] = json!(val);
return self;
}
pub fn value_parameter_definition<'a>(
&'a mut self,
val: ParameterDefinition,
) -> &'a mut Task_InputBuilder {
self.value["valueParameterDefinition"] = json!(val.value);
return self;
}
pub fn value_period<'a>(&'a mut self, val: Period) -> &'a mut Task_InputBuilder {
self.value["valuePeriod"] = json!(val.value);
return self;
}
pub fn value_positive_int<'a>(&'a mut self, val: f64) -> &'a mut Task_InputBuilder {
self.value["valuePositiveInt"] = json!(val);
return self;
}
pub fn value_quantity<'a>(&'a mut self, val: Quantity) -> &'a mut Task_InputBuilder {
self.value["valueQuantity"] = json!(val.value);
return self;
}
pub fn value_range<'a>(&'a mut self, val: Range) -> &'a mut Task_InputBuilder {
self.value["valueRange"] = json!(val.value);
return self;
}
pub fn value_ratio<'a>(&'a mut self, val: Ratio) -> &'a mut Task_InputBuilder {
self.value["valueRatio"] = json!(val.value);
return self;
}
pub fn value_reference<'a>(&'a mut self, val: Reference) -> &'a mut Task_InputBuilder {
self.value["valueReference"] = json!(val.value);
return self;
}
pub fn value_related_artifact<'a>(
&'a mut self,
val: RelatedArtifact,
) -> &'a mut Task_InputBuilder {
self.value["valueRelatedArtifact"] = json!(val.value);
return self;
}
pub fn value_sampled_data<'a>(&'a mut self, val: SampledData) -> &'a mut Task_InputBuilder {
self.value["valueSampledData"] = json!(val.value);
return self;
}
pub fn value_signature<'a>(&'a mut self, val: Signature) -> &'a mut Task_InputBuilder {
self.value["valueSignature"] = json!(val.value);
return self;
}
pub fn value_string<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueString"] = json!(val);
return self;
}
pub fn value_time<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueTime"] = json!(val);
return self;
}
pub fn value_timing<'a>(&'a mut self, val: Timing) -> &'a mut Task_InputBuilder {
self.value["valueTiming"] = json!(val.value);
return self;
}
pub fn value_trigger_definition<'a>(
&'a mut self,
val: TriggerDefinition,
) -> &'a mut Task_InputBuilder {
self.value["valueTriggerDefinition"] = json!(val.value);
return self;
}
pub fn value_unsigned_int<'a>(&'a mut self, val: f64) -> &'a mut Task_InputBuilder {
self.value["valueUnsignedInt"] = json!(val);
return self;
}
pub fn value_uri<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueUri"] = json!(val);
return self;
}
pub fn value_url<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueUrl"] = json!(val);
return self;
}
pub fn value_usage_context<'a>(&'a mut self, val: UsageContext) -> &'a mut Task_InputBuilder {
self.value["valueUsageContext"] = json!(val.value);
return self;
}
pub fn value_uuid<'a>(&'a mut self, val: &str) -> &'a mut Task_InputBuilder {
self.value["valueUuid"] = json!(val);
return self;
}
}
| 31.748799 | 100 | 0.540534 |
ffbfe76d08dc52094adc3f894c125270c9015015 | 6,560 | #[doc = "Writer for register PWRSET"]
pub type W = crate::W<u32, super::PWRSET>;
#[doc = "Register PWRSET `reset()`'s with value 0"]
impl crate::ResetValue for super::PWRSET {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Set Hibernate Domain Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum HIB_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: Enable Hibernate domain"]
VALUE2 = 1,
}
impl From<HIB_AW> for bool {
#[inline(always)]
fn from(variant: HIB_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `HIB`"]
pub struct HIB_W<'a> {
w: &'a mut W,
}
impl<'a> HIB_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: HIB_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(HIB_AW::VALUE1)
}
#[doc = "Enable Hibernate domain"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(HIB_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Set USB PHY Transceiver Disable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USBPHYPDQ_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: Active"]
VALUE2 = 1,
}
impl From<USBPHYPDQ_AW> for bool {
#[inline(always)]
fn from(variant: USBPHYPDQ_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `USBPHYPDQ`"]
pub struct USBPHYPDQ_W<'a> {
w: &'a mut W,
}
impl<'a> USBPHYPDQ_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USBPHYPDQ_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(USBPHYPDQ_AW::VALUE1)
}
#[doc = "Active"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(USBPHYPDQ_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Set USB On-The-Go Comparators Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USBOTGEN_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: Active"]
VALUE2 = 1,
}
impl From<USBOTGEN_AW> for bool {
#[inline(always)]
fn from(variant: USBOTGEN_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `USBOTGEN`"]
pub struct USBOTGEN_W<'a> {
w: &'a mut W,
}
impl<'a> USBOTGEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USBOTGEN_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(USBOTGEN_AW::VALUE1)
}
#[doc = "Active"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(USBOTGEN_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Set USB Weak Pull-Up at PADN Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USBPUWQ_AW {
#[doc = "0: No effect"]
VALUE1 = 0,
#[doc = "1: Pull-up not active"]
VALUE2 = 1,
}
impl From<USBPUWQ_AW> for bool {
#[inline(always)]
fn from(variant: USBPUWQ_AW) -> Self {
variant as u8 != 0
}
}
#[doc = "Write proxy for field `USBPUWQ`"]
pub struct USBPUWQ_W<'a> {
w: &'a mut W,
}
impl<'a> USBPUWQ_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USBPUWQ_AW) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "No effect"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(USBPUWQ_AW::VALUE1)
}
#[doc = "Pull-up not active"]
#[inline(always)]
pub fn value2(self) -> &'a mut W {
self.variant(USBPUWQ_AW::VALUE2)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
impl W {
#[doc = "Bit 0 - Set Hibernate Domain Enable"]
#[inline(always)]
pub fn hib(&mut self) -> HIB_W {
HIB_W { w: self }
}
#[doc = "Bit 16 - Set USB PHY Transceiver Disable"]
#[inline(always)]
pub fn usbphypdq(&mut self) -> USBPHYPDQ_W {
USBPHYPDQ_W { w: self }
}
#[doc = "Bit 17 - Set USB On-The-Go Comparators Enable"]
#[inline(always)]
pub fn usbotgen(&mut self) -> USBOTGEN_W {
USBOTGEN_W { w: self }
}
#[doc = "Bit 18 - Set USB Weak Pull-Up at PADN Enable"]
#[inline(always)]
pub fn usbpuwq(&mut self) -> USBPUWQ_W {
USBPUWQ_W { w: self }
}
}
| 26.77551 | 86 | 0.539482 |
5604553780496668884840d8c9771f358ee39ad6 | 4,827 | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::i64;
use super::{EvalContext, Result, ScalarFunc};
use crate::coprocessor::codec::Datum;
impl ScalarFunc {
#[inline]
pub fn bit_count(&self, ctx: &mut EvalContext, row: &[Datum]) -> Result<Option<i64>> {
let res = self.children[0].eval_int(ctx, row);
match res {
Ok(r) => {
if let Some(v) = r {
Ok(Some(i64::from(v.count_ones())))
} else {
Ok(None)
}
}
Err(e) => {
if e.is_overflow() {
Ok(Some(64))
} else {
Err(e)
}
}
}
}
}
#[cfg(test)]
mod tests {
use crate::coprocessor::codec::mysql::Decimal;
use crate::coprocessor::codec::Datum;
use crate::coprocessor::dag::expr::ctx::FLAG_OVERFLOW_AS_WARNING;
use crate::coprocessor::dag::expr::tests::{datum_expr, scalar_func_expr};
use crate::coprocessor::dag::expr::{EvalConfig, EvalContext, Expression};
use std::str::FromStr;
use std::sync::Arc;
use tipb::expression::ScalarFuncSig;
#[test]
fn test_bit_count() {
let cases = vec![
(Datum::I64(8), Datum::I64(1)),
(Datum::I64(29), Datum::I64(4)),
(Datum::I64(0), Datum::I64(0)),
(Datum::I64(-1), Datum::I64(64)),
(Datum::I64(-11), Datum::I64(62)),
(Datum::I64(-1000), Datum::I64(56)),
(Datum::I64(9223372036854775807), Datum::I64(63)),
(Datum::U64(9223372036854775808), Datum::I64(1)),
(Datum::U64(9223372036854775809), Datum::I64(2)),
(Datum::U64(11111111112222222222), Datum::I64(37)),
(Datum::U64(18446744073709551615), Datum::I64(64)),
(Datum::U64(18446744073709551614), Datum::I64(63)),
(Datum::Null, Datum::Null),
];
let mut ctx = EvalContext::default();
for (input, exp) in cases {
let args = &[datum_expr(input)];
let op = scalar_func_expr(ScalarFuncSig::BitCount, args);
let op = Expression::build(&ctx, op).unwrap();
let res = op.eval(&mut ctx, &[]).unwrap();
assert_eq!(res, exp);
}
let cases = vec![
(
Datum::Bytes(
b"111111111111111111111111111111111111111111111111111111111111111".to_vec(),
),
Datum::I64(64),
),
(
Datum::Bytes(b"18446744073709551616".to_vec()),
Datum::I64(64),
),
(
Datum::Bytes(b"18446744073709551615".to_vec()),
Datum::I64(64),
),
(
Datum::Bytes(b"18446744073709551614".to_vec()),
Datum::I64(63),
),
(
Datum::Bytes(b"11111111112222222222".to_vec()),
Datum::I64(37),
),
];
let mut ctx = EvalContext::new(Arc::new(EvalConfig::from_flags(FLAG_OVERFLOW_AS_WARNING)));
for (input, exp) in cases {
let args = &[datum_expr(input)];
let child = scalar_func_expr(ScalarFuncSig::CastStringAsInt, args);
let op = scalar_func_expr(ScalarFuncSig::BitCount, &[child]);
let op = Expression::build(&ctx, op).unwrap();
let res = op.eval(&mut ctx, &[]).unwrap();
assert_eq!(res, exp);
}
let cases = vec![
(
Datum::Dec(
Decimal::from_str(
"111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap(),
),
Datum::I64(63),
),
(
Datum::Dec(
Decimal::from_str(
"-111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap(),
),
Datum::I64(1),
),
(
Datum::Dec(Decimal::from_str("18446744073709551616").unwrap()),
Datum::I64(63),
),
];
let mut ctx = EvalContext::new(Arc::new(EvalConfig::from_flags(FLAG_OVERFLOW_AS_WARNING)));
for (input, exp) in cases {
let args = &[datum_expr(input)];
let child = scalar_func_expr(ScalarFuncSig::CastDecimalAsInt, args);
let op = scalar_func_expr(ScalarFuncSig::BitCount, &[child]);
let op = Expression::build(&ctx, op).unwrap();
let res = op.eval(&mut ctx, &[]).unwrap();
assert_eq!(res, exp);
}
}
}
| 35.233577 | 99 | 0.484359 |
14bc19dffd5d0b6f307856c23ccdf8814b74fdf9 | 16,430 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lint;
use metadata::cstore::CStore;
use metadata::filesearch;
use session::search_paths::PathKind;
use util::nodemap::NodeMap;
use syntax::ast::NodeId;
use syntax::codemap::Span;
use syntax::diagnostic::{self, Emitter};
use syntax::diagnostics;
use syntax::feature_gate;
use syntax::parse;
use syntax::parse::token;
use syntax::parse::ParseSess;
use syntax::{ast, codemap};
use rustc_back::target::Target;
use std::path::{Path, PathBuf};
use std::cell::{Cell, RefCell};
use std::env;
pub mod config;
pub mod search_paths;
// Represents the data associated with a compilation
// session for a single crate.
pub struct Session {
pub target: config::Config,
pub host: Target,
pub opts: config::Options,
pub cstore: CStore,
pub parse_sess: ParseSess,
// For a library crate, this is always none
pub entry_fn: RefCell<Option<(NodeId, codemap::Span)>>,
pub entry_type: Cell<Option<config::EntryFnType>>,
pub plugin_registrar_fn: Cell<Option<ast::NodeId>>,
pub default_sysroot: Option<PathBuf>,
// The name of the root source file of the crate, in the local file system. The path is always
// expected to be absolute. `None` means that there is no source file.
pub local_crate_source_file: Option<PathBuf>,
pub working_dir: PathBuf,
pub lint_store: RefCell<lint::LintStore>,
pub lints: RefCell<NodeMap<Vec<(lint::LintId, codemap::Span, String)>>>,
pub plugin_llvm_passes: RefCell<Vec<String>>,
pub crate_types: RefCell<Vec<config::CrateType>>,
pub crate_metadata: RefCell<Vec<String>>,
pub features: RefCell<feature_gate::Features>,
pub delayed_span_bug: RefCell<Option<(codemap::Span, String)>>,
/// The maximum recursion limit for potentially infinitely recursive
/// operations such as auto-dereference and monomorphization.
pub recursion_limit: Cell<usize>,
pub can_print_warnings: bool
}
impl Session {
pub fn span_fatal(&self, sp: Span, msg: &str) -> ! {
if self.opts.treat_err_as_bug {
self.span_bug(sp, msg);
}
panic!(self.diagnostic().span_fatal(sp, msg))
}
pub fn span_fatal_with_code(&self, sp: Span, msg: &str, code: &str) -> ! {
if self.opts.treat_err_as_bug {
self.span_bug(sp, msg);
}
panic!(self.diagnostic().span_fatal_with_code(sp, msg, code))
}
pub fn fatal(&self, msg: &str) -> ! {
if self.opts.treat_err_as_bug {
self.bug(msg);
}
self.diagnostic().handler().fatal(msg)
}
pub fn span_err(&self, sp: Span, msg: &str) {
if self.opts.treat_err_as_bug {
self.span_bug(sp, msg);
}
match split_msg_into_multilines(msg) {
Some(msg) => self.diagnostic().span_err(sp, &msg[..]),
None => self.diagnostic().span_err(sp, msg)
}
}
pub fn span_err_with_code(&self, sp: Span, msg: &str, code: &str) {
if self.opts.treat_err_as_bug {
self.span_bug(sp, msg);
}
match split_msg_into_multilines(msg) {
Some(msg) => self.diagnostic().span_err_with_code(sp, &msg[..], code),
None => self.diagnostic().span_err_with_code(sp, msg, code)
}
}
pub fn err(&self, msg: &str) {
if self.opts.treat_err_as_bug {
self.bug(msg);
}
self.diagnostic().handler().err(msg)
}
pub fn err_count(&self) -> usize {
self.diagnostic().handler().err_count()
}
pub fn has_errors(&self) -> bool {
self.diagnostic().handler().has_errors()
}
pub fn abort_if_errors(&self) {
self.diagnostic().handler().abort_if_errors();
let delayed_bug = self.delayed_span_bug.borrow();
match *delayed_bug {
Some((span, ref errmsg)) => {
self.diagnostic().span_bug(span, errmsg);
},
_ => {}
}
}
pub fn span_warn(&self, sp: Span, msg: &str) {
if self.can_print_warnings {
self.diagnostic().span_warn(sp, msg)
}
}
pub fn span_warn_with_code(&self, sp: Span, msg: &str, code: &str) {
if self.can_print_warnings {
self.diagnostic().span_warn_with_code(sp, msg, code)
}
}
pub fn warn(&self, msg: &str) {
if self.can_print_warnings {
self.diagnostic().handler().warn(msg)
}
}
pub fn opt_span_warn(&self, opt_sp: Option<Span>, msg: &str) {
match opt_sp {
Some(sp) => self.span_warn(sp, msg),
None => self.warn(msg),
}
}
pub fn span_note(&self, sp: Span, msg: &str) {
self.diagnostic().span_note(sp, msg)
}
pub fn span_end_note(&self, sp: Span, msg: &str) {
self.diagnostic().span_end_note(sp, msg)
}
/// Prints out a message with a suggested edit of the code.
///
/// See `diagnostic::RenderSpan::Suggestion` for more information.
pub fn span_suggestion(&self, sp: Span, msg: &str, suggestion: String) {
self.diagnostic().span_suggestion(sp, msg, suggestion)
}
pub fn span_help(&self, sp: Span, msg: &str) {
self.diagnostic().span_help(sp, msg)
}
pub fn fileline_note(&self, sp: Span, msg: &str) {
self.diagnostic().fileline_note(sp, msg)
}
pub fn fileline_help(&self, sp: Span, msg: &str) {
self.diagnostic().fileline_help(sp, msg)
}
pub fn note(&self, msg: &str) {
self.diagnostic().handler().note(msg)
}
pub fn help(&self, msg: &str) {
self.diagnostic().handler().help(msg)
}
pub fn opt_span_bug(&self, opt_sp: Option<Span>, msg: &str) -> ! {
match opt_sp {
Some(sp) => self.span_bug(sp, msg),
None => self.bug(msg),
}
}
/// Delay a span_bug() call until abort_if_errors()
pub fn delay_span_bug(&self, sp: Span, msg: &str) {
let mut delayed = self.delayed_span_bug.borrow_mut();
*delayed = Some((sp, msg.to_string()));
}
pub fn span_bug(&self, sp: Span, msg: &str) -> ! {
self.diagnostic().span_bug(sp, msg)
}
pub fn bug(&self, msg: &str) -> ! {
self.diagnostic().handler().bug(msg)
}
pub fn span_unimpl(&self, sp: Span, msg: &str) -> ! {
self.diagnostic().span_unimpl(sp, msg)
}
pub fn unimpl(&self, msg: &str) -> ! {
self.diagnostic().handler().unimpl(msg)
}
pub fn add_lint(&self,
lint: &'static lint::Lint,
id: ast::NodeId,
sp: Span,
msg: String) {
let lint_id = lint::LintId::of(lint);
let mut lints = self.lints.borrow_mut();
match lints.get_mut(&id) {
Some(arr) => { arr.push((lint_id, sp, msg)); return; }
None => {}
}
lints.insert(id, vec!((lint_id, sp, msg)));
}
pub fn next_node_id(&self) -> ast::NodeId {
self.parse_sess.next_node_id()
}
pub fn reserve_node_ids(&self, count: ast::NodeId) -> ast::NodeId {
self.parse_sess.reserve_node_ids(count)
}
pub fn diagnostic<'a>(&'a self) -> &'a diagnostic::SpanHandler {
&self.parse_sess.span_diagnostic
}
pub fn codemap<'a>(&'a self) -> &'a codemap::CodeMap {
&self.parse_sess.span_diagnostic.cm
}
// This exists to help with refactoring to eliminate impossible
// cases later on
pub fn impossible_case(&self, sp: Span, msg: &str) -> ! {
self.span_bug(sp,
&format!("impossible case reached: {}", msg));
}
pub fn verbose(&self) -> bool { self.opts.debugging_opts.verbose }
pub fn time_passes(&self) -> bool { self.opts.debugging_opts.time_passes }
pub fn count_llvm_insns(&self) -> bool {
self.opts.debugging_opts.count_llvm_insns
}
pub fn count_type_sizes(&self) -> bool {
self.opts.debugging_opts.count_type_sizes
}
pub fn time_llvm_passes(&self) -> bool {
self.opts.debugging_opts.time_llvm_passes
}
pub fn trans_stats(&self) -> bool { self.opts.debugging_opts.trans_stats }
pub fn meta_stats(&self) -> bool { self.opts.debugging_opts.meta_stats }
pub fn asm_comments(&self) -> bool { self.opts.debugging_opts.asm_comments }
pub fn no_verify(&self) -> bool { self.opts.debugging_opts.no_verify }
pub fn borrowck_stats(&self) -> bool { self.opts.debugging_opts.borrowck_stats }
pub fn print_llvm_passes(&self) -> bool {
self.opts.debugging_opts.print_llvm_passes
}
pub fn lto(&self) -> bool {
self.opts.cg.lto
}
pub fn no_landing_pads(&self) -> bool {
self.opts.debugging_opts.no_landing_pads
}
pub fn unstable_options(&self) -> bool {
self.opts.debugging_opts.unstable_options
}
pub fn print_enum_sizes(&self) -> bool {
self.opts.debugging_opts.print_enum_sizes
}
pub fn sysroot<'a>(&'a self) -> &'a Path {
match self.opts.maybe_sysroot {
Some (ref sysroot) => sysroot,
None => self.default_sysroot.as_ref()
.expect("missing sysroot and default_sysroot in Session")
}
}
pub fn target_filesearch(&self, kind: PathKind) -> filesearch::FileSearch {
filesearch::FileSearch::new(self.sysroot(),
&self.opts.target_triple,
&self.opts.search_paths,
kind)
}
pub fn host_filesearch(&self, kind: PathKind) -> filesearch::FileSearch {
filesearch::FileSearch::new(
self.sysroot(),
config::host_triple(),
&self.opts.search_paths,
kind)
}
}
fn split_msg_into_multilines(msg: &str) -> Option<String> {
// Conditions for enabling multi-line errors:
if !msg.contains("mismatched types") &&
!msg.contains("type mismatch resolving") &&
!msg.contains("if and else have incompatible types") &&
!msg.contains("if may be missing an else clause") &&
!msg.contains("match arms have incompatible types") &&
!msg.contains("structure constructor specifies a structure of type") {
return None
}
let first = msg.match_indices("expected").filter(|s| {
s.0 > 0 && (msg.char_at_reverse(s.0) == ' ' ||
msg.char_at_reverse(s.0) == '(')
}).map(|(a, b)| (a - 1, b));
let second = msg.match_indices("found").filter(|s| {
msg.char_at_reverse(s.0) == ' '
}).map(|(a, b)| (a - 1, b));
let mut new_msg = String::new();
let mut head = 0;
// Insert `\n` before expected and found.
for (pos1, pos2) in first.zip(second) {
new_msg = new_msg +
// A `(` may be preceded by a space and it should be trimmed
msg[head..pos1.0].trim_right() + // prefix
"\n" + // insert before first
&msg[pos1.0..pos1.1] + // insert what first matched
&msg[pos1.1..pos2.0] + // between matches
"\n " + // insert before second
// 123
// `expected` is 3 char longer than `found`. To align the types,
// `found` gets 3 spaces prepended.
&msg[pos2.0..pos2.1]; // insert what second matched
head = pos2.1;
}
let mut tail = &msg[head..];
let third = tail.find("(values differ")
.or(tail.find("(lifetime"))
.or(tail.find("(cyclic type of infinite size"));
// Insert `\n` before any remaining messages which match.
if let Some(pos) = third {
// The end of the message may just be wrapped in `()` without
// `expected`/`found`. Push this also to a new line and add the
// final tail after.
new_msg = new_msg +
// `(` is usually preceded by a space and should be trimmed.
tail[..pos].trim_right() + // prefix
"\n" + // insert before paren
&tail[pos..]; // append the tail
tail = "";
}
new_msg.push_str(tail);
return Some(new_msg);
}
pub fn build_session(sopts: config::Options,
local_crate_source_file: Option<PathBuf>,
registry: diagnostics::registry::Registry)
-> Session {
// FIXME: This is not general enough to make the warning lint completely override
// normal diagnostic warnings, since the warning lint can also be denied and changed
// later via the source code.
let can_print_warnings = sopts.lint_opts
.iter()
.filter(|&&(ref key, _)| *key == "warnings")
.map(|&(_, ref level)| *level != lint::Allow)
.last()
.unwrap_or(true);
let codemap = codemap::CodeMap::new();
let diagnostic_handler =
diagnostic::default_handler(sopts.color, Some(registry), can_print_warnings);
let span_diagnostic_handler =
diagnostic::mk_span_handler(diagnostic_handler, codemap);
build_session_(sopts, local_crate_source_file, span_diagnostic_handler)
}
pub fn build_session_(sopts: config::Options,
local_crate_source_file: Option<PathBuf>,
span_diagnostic: diagnostic::SpanHandler)
-> Session {
let host = match Target::search(config::host_triple()) {
Ok(t) => t,
Err(e) => {
span_diagnostic.handler()
.fatal(&format!("Error loading host specification: {}", e));
}
};
let target_cfg = config::build_target_config(&sopts, &span_diagnostic);
let p_s = parse::new_parse_sess_special_handler(span_diagnostic);
let default_sysroot = match sopts.maybe_sysroot {
Some(_) => None,
None => Some(filesearch::get_or_default_sysroot())
};
// Make the path absolute, if necessary
let local_crate_source_file = local_crate_source_file.map(|path|
if path.is_absolute() {
path.clone()
} else {
env::current_dir().unwrap().join(&path)
}
);
let can_print_warnings = sopts.lint_opts
.iter()
.filter(|&&(ref key, _)| *key == "warnings")
.map(|&(_, ref level)| *level != lint::Allow)
.last()
.unwrap_or(true);
let sess = Session {
target: target_cfg,
host: host,
opts: sopts,
cstore: CStore::new(token::get_ident_interner()),
parse_sess: p_s,
// For a library crate, this is always none
entry_fn: RefCell::new(None),
entry_type: Cell::new(None),
plugin_registrar_fn: Cell::new(None),
default_sysroot: default_sysroot,
local_crate_source_file: local_crate_source_file,
working_dir: env::current_dir().unwrap(),
lint_store: RefCell::new(lint::LintStore::new()),
lints: RefCell::new(NodeMap()),
plugin_llvm_passes: RefCell::new(Vec::new()),
crate_types: RefCell::new(Vec::new()),
crate_metadata: RefCell::new(Vec::new()),
delayed_span_bug: RefCell::new(None),
features: RefCell::new(feature_gate::Features::new()),
recursion_limit: Cell::new(64),
can_print_warnings: can_print_warnings
};
sess
}
// Seems out of place, but it uses session, so I'm putting it here
pub fn expect<T, M>(sess: &Session, opt: Option<T>, msg: M) -> T where
M: FnOnce() -> String,
{
diagnostic::expect(sess.diagnostic(), opt, msg)
}
pub fn early_error(msg: &str) -> ! {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Fatal);
panic!(diagnostic::FatalError);
}
pub fn early_warn(msg: &str) {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
| 36.838565 | 98 | 0.59227 |
75de3974089556bbad1016f5d403380cb8de35f4 | 1,256 | extern crate log;
extern crate log4rs;
use log::LevelFilter;
use log4rs::append::console::ConsoleAppender;
use log4rs::append::file::FileAppender;
use log4rs::config::{Appender, Config, Logger, Root};
use log4rs::encode::pattern::PatternEncoder;
pub fn init_log4(path: &str) {
let stdout = ConsoleAppender::builder().build();
let requests = FileAppender::builder()
.encoder(Box::new(PatternEncoder::new(
"[{d(%Y-%m-%d %H:%M:%S)}] [{l}] [thread:{I}] [{f}] [{t}]- {m}{n}",
)))
.build(path)
.unwrap();
let config = Config::builder()
.appender(Appender::builder().build("stdout", Box::new(stdout)))
.appender(Appender::builder().build("requests", Box::new(requests)))
.logger(Logger::builder().build("app::backend::db", LevelFilter::Info))
.logger(
Logger::builder()
.appender("requests")
.additive(false)
.build("app::requests", LevelFilter::Info),
)
.build(
Root::builder()
.appender("stdout")
.appender("requests")
.build(LevelFilter::Info),
)
.unwrap();
let handle = log4rs::init_config(config).unwrap();
}
| 32.205128 | 79 | 0.557325 |
1c8ef3d6b88f3b159dadd7ebf6f871fc5e24e6a9 | 223 | use serde::{Deserialize, Serialize};
/// This object represents a service message about a voice chat ended in the
/// chat.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct VoiceChatEnded {}
| 31.857143 | 76 | 0.744395 |
7609554033cef5021c56421362d0eb388d039591 | 19,436 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Helper routines used for fragmenting structural paths due to moves for
//! tracking drop obligations. Please see the extensive comments in the
//! section "Structural fragments" in `doc.rs`.
use self::Fragment::*;
use borrowck::{LoanPath};
use borrowck::LoanPathKind::{LpVar, LpUpvar, LpDowncast, LpExtend};
use borrowck::LoanPathElem::{LpDeref, LpInterior};
use borrowck::move_data::{InvalidMovePathIndex};
use borrowck::move_data::{MoveData, MovePathIndex};
use rustc::session::config;
use rustc::middle::ty;
use rustc::middle::mem_categorization as mc;
use rustc::util::ppaux::{Repr, UserString};
use std::mem;
use std::rc::Rc;
use std::slice;
use syntax::ast;
use syntax::ast_map;
use syntax::attr::AttrMetaMethods;
use syntax::codemap::Span;
#[deriving(PartialEq, Eq, PartialOrd, Ord)]
enum Fragment {
// This represents the path described by the move path index
Just(MovePathIndex),
// This represents the collection of all but one of the elements
// from an array at the path described by the move path index.
// Note that attached MovePathIndex should have mem_categorization
// of InteriorElement (i.e. array dereference `[]`).
AllButOneFrom(MovePathIndex),
}
impl Fragment {
fn loan_path_repr<'tcx>(&self, move_data: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) -> String {
let repr = |mpi| move_data.path_loan_path(mpi).repr(tcx);
match *self {
Just(mpi) => repr(mpi),
AllButOneFrom(mpi) => format!("$(allbutone {})", repr(mpi)),
}
}
fn loan_path_user_string<'tcx>(&self,
move_data: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>) -> String {
let user_string = |mpi| move_data.path_loan_path(mpi).user_string(tcx);
match *self {
Just(mpi) => user_string(mpi),
AllButOneFrom(mpi) => format!("$(allbutone {})", user_string(mpi)),
}
}
}
pub struct FragmentSets {
/// During move_data construction, `moved_leaf_paths` tracks paths
/// that have been used directly by being moved out of. When
/// move_data construction has been completed, `moved_leaf_paths`
/// tracks such paths that are *leaf fragments* (e.g. `a.j` if we
/// never move out any child like `a.j.x`); any parent paths
/// (e.g. `a` for the `a.j` example) are moved over to
/// `parents_of_fragments`.
moved_leaf_paths: Vec<MovePathIndex>,
/// `assigned_leaf_paths` tracks paths that have been used
/// directly by being overwritten, but is otherwise much like
/// `moved_leaf_paths`.
assigned_leaf_paths: Vec<MovePathIndex>,
/// `parents_of_fragments` tracks paths that are definitely
/// parents of paths that have been moved.
///
/// FIXME(pnkfelix) probably do not want/need
/// `parents_of_fragments` at all, if we can avoid it.
///
/// Update: I do not see a way to to avoid it. Maybe just remove
/// above fixme, or at least document why doing this may be hard.
parents_of_fragments: Vec<MovePathIndex>,
/// During move_data construction (specifically the
/// fixup_fragment_sets call), `unmoved_fragments` tracks paths
/// that have been "left behind" after a sibling has been moved or
/// assigned. When move_data construction has been completed,
/// `unmoved_fragments` tracks paths that were *only* results of
/// being left-behind, and never directly moved themselves.
unmoved_fragments: Vec<Fragment>,
}
impl FragmentSets {
pub fn new() -> FragmentSets {
FragmentSets {
unmoved_fragments: Vec::new(),
moved_leaf_paths: Vec::new(),
assigned_leaf_paths: Vec::new(),
parents_of_fragments: Vec::new(),
}
}
pub fn add_move(&mut self, path_index: MovePathIndex) {
self.moved_leaf_paths.push(path_index);
}
pub fn add_assignment(&mut self, path_index: MovePathIndex) {
self.assigned_leaf_paths.push(path_index);
}
}
pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
sp: Span,
id: ast::NodeId) {
let (span_err, print) = {
let attrs : &[ast::Attribute];
attrs = match tcx.map.find(id) {
Some(ast_map::NodeItem(ref item)) =>
item.attrs[],
Some(ast_map::NodeImplItem(&ast::MethodImplItem(ref m))) =>
m.attrs[],
Some(ast_map::NodeTraitItem(&ast::ProvidedMethod(ref m))) =>
m.attrs[],
_ => [][],
};
let span_err =
attrs.iter().any(|a| a.check_name("rustc_move_fragments"));
let print = tcx.sess.debugging_opt(config::PRINT_MOVE_FRAGMENTS);
(span_err, print)
};
if !span_err && !print { return; }
let instrument_all_paths = |kind, vec_rc: &Vec<MovePathIndex>| {
for (i, mpi) in vec_rc.iter().enumerate() {
let render = || this.path_loan_path(*mpi).user_string(tcx);
if span_err {
tcx.sess.span_err(sp, format!("{}: `{}`", kind, render())[]);
}
if print {
println!("id:{} {}[{}] `{}`", id, kind, i, render());
}
}
};
let instrument_all_fragments = |kind, vec_rc: &Vec<Fragment>| {
for (i, f) in vec_rc.iter().enumerate() {
let render = || f.loan_path_user_string(this, tcx);
if span_err {
tcx.sess.span_err(sp, format!("{}: `{}`", kind, render())[]);
}
if print {
println!("id:{} {}[{}] `{}`", id, kind, i, render());
}
}
};
let fragments = this.fragments.borrow();
instrument_all_paths("moved_leaf_path", &fragments.moved_leaf_paths);
instrument_all_fragments("unmoved_fragment", &fragments.unmoved_fragments);
instrument_all_paths("parent_of_fragments", &fragments.parents_of_fragments);
instrument_all_paths("assigned_leaf_path", &fragments.assigned_leaf_paths);
}
/// Normalizes the fragment sets in `this`; i.e., removes duplicate entries, constructs the set of
/// parents, and constructs the left-over fragments.
///
/// Note: "left-over fragments" means paths that were not directly referenced in moves nor
/// assignments, but must nonetheless be tracked as potential drop obligations.
pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
let mut fragments = this.fragments.borrow_mut();
// Swap out contents of fragments so that we can modify the fields
// without borrowing the common fragments.
let mut unmoved = mem::replace(&mut fragments.unmoved_fragments, vec![]);
let mut parents = mem::replace(&mut fragments.parents_of_fragments, vec![]);
let mut moved = mem::replace(&mut fragments.moved_leaf_paths, vec![]);
let mut assigned = mem::replace(&mut fragments.assigned_leaf_paths, vec![]);
let path_lps = |mpis: &[MovePathIndex]| -> Vec<String> {
mpis.iter().map(|mpi| this.path_loan_path(*mpi).repr(tcx)).collect()
};
let frag_lps = |fs: &[Fragment]| -> Vec<String> {
fs.iter().map(|f| f.loan_path_repr(this, tcx)).collect()
};
// First, filter out duplicates
moved.sort();
moved.dedup();
debug!("fragments 1 moved: {}", path_lps(moved[]));
assigned.sort();
assigned.dedup();
debug!("fragments 1 assigned: {}", path_lps(assigned[]));
// Second, build parents from the moved and assigned.
for m in moved.iter() {
let mut p = this.path_parent(*m);
while p != InvalidMovePathIndex {
parents.push(p);
p = this.path_parent(p);
}
}
for a in assigned.iter() {
let mut p = this.path_parent(*a);
while p != InvalidMovePathIndex {
parents.push(p);
p = this.path_parent(p);
}
}
parents.sort();
parents.dedup();
debug!("fragments 2 parents: {}", path_lps(parents[]));
// Third, filter the moved and assigned fragments down to just the non-parents
moved.retain(|f| non_member(*f, parents[]));
debug!("fragments 3 moved: {}", path_lps(moved[]));
assigned.retain(|f| non_member(*f, parents[]));
debug!("fragments 3 assigned: {}", path_lps(assigned[]));
// Fourth, build the leftover from the moved, assigned, and parents.
for m in moved.iter() {
let lp = this.path_loan_path(*m);
add_fragment_siblings(this, tcx, &mut unmoved, lp, None);
}
for a in assigned.iter() {
let lp = this.path_loan_path(*a);
add_fragment_siblings(this, tcx, &mut unmoved, lp, None);
}
for p in parents.iter() {
let lp = this.path_loan_path(*p);
add_fragment_siblings(this, tcx, &mut unmoved, lp, None);
}
unmoved.sort();
unmoved.dedup();
debug!("fragments 4 unmoved: {}", frag_lps(unmoved[]));
// Fifth, filter the leftover fragments down to its core.
unmoved.retain(|f| match *f {
AllButOneFrom(_) => true,
Just(mpi) => non_member(mpi, parents[]) &&
non_member(mpi, moved[]) &&
non_member(mpi, assigned[])
});
debug!("fragments 5 unmoved: {}", frag_lps(unmoved[]));
// Swap contents back in.
fragments.unmoved_fragments = unmoved;
fragments.parents_of_fragments = parents;
fragments.moved_leaf_paths = moved;
fragments.assigned_leaf_paths = assigned;
return;
fn non_member(elem: MovePathIndex, set: &[MovePathIndex]) -> bool {
match set.binary_search_elem(&elem) {
slice::BinarySearchResult::Found(_) => false,
slice::BinarySearchResult::NotFound(_) => true,
}
}
}
/// Adds all of the precisely-tracked siblings of `lp` as potential move paths of interest. For
/// example, if `lp` represents `s.x.j`, then adds moves paths for `s.x.i` and `s.x.k`, the
/// siblings of `s.x.j`.
fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
gathered_fragments: &mut Vec<Fragment>,
lp: Rc<LoanPath<'tcx>>,
origin_id: Option<ast::NodeId>) {
match lp.kind {
LpVar(_) | LpUpvar(..) => {} // Local variables have no siblings.
// Consuming a downcast is like consuming the original value, so propage inward.
LpDowncast(ref loan_parent, _) => {
add_fragment_siblings(this, tcx, gathered_fragments, loan_parent.clone(), origin_id);
}
// *LV for Unique consumes the contents of the box (at
// least when it is non-copy...), so propagate inward.
LpExtend(ref loan_parent, _, LpDeref(mc::Unique)) => {
add_fragment_siblings(this, tcx, gathered_fragments, loan_parent.clone(), origin_id);
}
// *LV for unsafe and borrowed pointers do not consume their loan path, so stop here.
LpExtend(_, _, LpDeref(mc::UnsafePtr(..))) |
LpExtend(_, _, LpDeref(mc::Implicit(..))) |
LpExtend(_, _, LpDeref(mc::BorrowedPtr(..))) => {}
// FIXME(pnkfelix): LV[j] should be tracked, at least in the
// sense of we will track the remaining drop obligation of the
// rest of the array.
//
// LV[j] is not tracked precisely
LpExtend(_, _, LpInterior(mc::InteriorElement(_))) => {
let mp = this.move_path(tcx, lp.clone());
gathered_fragments.push(AllButOneFrom(mp));
}
// field access LV.x and tuple access LV#k are the cases
// we are interested in
LpExtend(ref loan_parent, mc,
LpInterior(mc::InteriorField(ref field_name))) => {
let enum_variant_info = match loan_parent.kind {
LpDowncast(ref loan_parent_2, variant_def_id) =>
Some((variant_def_id, loan_parent_2.clone())),
LpExtend(..) | LpVar(..) | LpUpvar(..) =>
None,
};
add_fragment_siblings_for_extension(
this,
tcx,
gathered_fragments,
loan_parent, mc, field_name, &lp, origin_id, enum_variant_info);
}
}
}
/// We have determined that `origin_lp` destructures to LpExtend(parent, original_field_name).
/// Based on this, add move paths for all of the siblings of `origin_lp`.
fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
gathered_fragments: &mut Vec<Fragment>,
parent_lp: &Rc<LoanPath<'tcx>>,
mc: mc::MutabilityCategory,
origin_field_name: &mc::FieldName,
origin_lp: &Rc<LoanPath<'tcx>>,
origin_id: Option<ast::NodeId>,
enum_variant_info: Option<(ast::DefId,
Rc<LoanPath<'tcx>>)>) {
let parent_ty = parent_lp.to_type();
let add_fragment_sibling_local = |field_name, variant_did| {
add_fragment_sibling_core(
this, tcx, gathered_fragments, parent_lp.clone(), mc, field_name, origin_lp,
variant_did);
};
match (&parent_ty.sty, enum_variant_info) {
(&ty::ty_tup(ref v), None) => {
let tuple_idx = match *origin_field_name {
mc::PositionalField(tuple_idx) => tuple_idx,
mc::NamedField(_) =>
panic!("tuple type {} should not have named fields.",
parent_ty.repr(tcx)),
};
let tuple_len = v.len();
for i in range(0, tuple_len) {
if i == tuple_idx { continue }
let field_name = mc::PositionalField(i);
add_fragment_sibling_local(field_name, None);
}
}
(&ty::ty_struct(def_id, ref _substs), None) => {
let fields = ty::lookup_struct_fields(tcx, def_id);
match *origin_field_name {
mc::NamedField(ast_name) => {
for f in fields.iter() {
if f.name == ast_name {
continue;
}
let field_name = mc::NamedField(f.name);
add_fragment_sibling_local(field_name, None);
}
}
mc::PositionalField(tuple_idx) => {
for (i, _f) in fields.iter().enumerate() {
if i == tuple_idx {
continue
}
let field_name = mc::PositionalField(i);
add_fragment_sibling_local(field_name, None);
}
}
}
}
(&ty::ty_enum(enum_def_id, substs), ref enum_variant_info) => {
let variant_info = {
let mut variants = ty::substd_enum_variants(tcx, enum_def_id, substs);
match *enum_variant_info {
Some((variant_def_id, ref _lp2)) =>
variants.iter()
.find(|variant| variant.id == variant_def_id)
.expect("enum_variant_with_id(): no variant exists with that ID")
.clone(),
None => {
assert_eq!(variants.len(), 1);
variants.pop().unwrap()
}
}
};
match *origin_field_name {
mc::NamedField(ast_name) => {
let variant_arg_names = variant_info.arg_names.as_ref().unwrap();
for variant_arg_ident in variant_arg_names.iter() {
if variant_arg_ident.name == ast_name {
continue;
}
let field_name = mc::NamedField(variant_arg_ident.name);
add_fragment_sibling_local(field_name, Some(variant_info.id));
}
}
mc::PositionalField(tuple_idx) => {
let variant_arg_types = &variant_info.args;
for (i, _variant_arg_ty) in variant_arg_types.iter().enumerate() {
if tuple_idx == i {
continue;
}
let field_name = mc::PositionalField(i);
add_fragment_sibling_local(field_name, None);
}
}
}
}
ref sty_and_variant_info => {
let msg = format!("type {} ({}) is not fragmentable",
parent_ty.repr(tcx), sty_and_variant_info);
let opt_span = origin_id.and_then(|id|tcx.map.opt_span(id));
tcx.sess.opt_span_bug(opt_span, msg[])
}
}
}
/// Adds the single sibling `LpExtend(parent, new_field_name)` of `origin_lp` (the original
/// loan-path).
fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
tcx: &ty::ctxt<'tcx>,
gathered_fragments: &mut Vec<Fragment>,
parent: Rc<LoanPath<'tcx>>,
mc: mc::MutabilityCategory,
new_field_name: mc::FieldName,
origin_lp: &Rc<LoanPath<'tcx>>,
enum_variant_did: Option<ast::DefId>) -> MovePathIndex {
let opt_variant_did = match parent.kind {
LpDowncast(_, variant_did) => Some(variant_did),
LpVar(..) | LpUpvar(..) | LpExtend(..) => enum_variant_did,
};
let loan_path_elem = LpInterior(mc::InteriorField(new_field_name));
let new_lp_type = match new_field_name {
mc::NamedField(ast_name) =>
ty::named_element_ty(tcx, parent.to_type(), ast_name, opt_variant_did),
mc::PositionalField(idx) =>
ty::positional_element_ty(tcx, parent.to_type(), idx, opt_variant_did),
};
let new_lp_variant = LpExtend(parent, mc, loan_path_elem);
let new_lp = LoanPath::new(new_lp_variant, new_lp_type.unwrap());
debug!("add_fragment_sibling_core(new_lp={}, origin_lp={})",
new_lp.repr(tcx), origin_lp.repr(tcx));
let mp = this.move_path(tcx, Rc::new(new_lp));
// Do not worry about checking for duplicates here; we will sort
// and dedup after all are added.
gathered_fragments.push(Just(mp));
mp
}
| 40.831933 | 98 | 0.563851 |
39a077e8f64219830d05e2a6670b01636a709ee9 | 68 | #[allow(dead_code)]
struct Bcd;
mod b {
}
mod c {
}
mod d {
}
| 4.533333 | 19 | 0.514706 |
1e21cbabc716cb107fb652ae7ff3748f723f8154 | 40,881 | //! Helper module to submit transactions into the zkSync Network.
// Built-in uses
use std::iter::FromIterator;
use std::{
collections::{HashMap, HashSet},
fmt::Display,
str::FromStr,
};
// External uses
use bigdecimal::BigDecimal;
use chrono::{Duration, Utc};
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use itertools::izip;
use num::{bigint::ToBigInt, BigUint, Zero};
use thiserror::Error;
// Workspace uses
use zksync_api_types::{
v02::transaction::{SubmitBatchResponse, Toggle2FA, Toggle2FAResponse, TxHashSerializeWrapper},
TxWithSignature,
};
use zksync_config::ZkSyncConfig;
use zksync_storage::{chain::account::records::EthAccountType, ConnectionPool};
use zksync_types::{
tx::{
EthBatchSignData, EthBatchSignatures, EthSignData, Order, SignedZkSyncTx, TxEthSignature,
TxEthSignatureVariant, TxHash,
},
AccountId, Address, BatchFee, Fee, PubKeyHash, Token, TokenId, TokenLike, TxFeeTypes, ZkSyncTx,
H160,
};
// Local uses
use crate::{
api_server::forced_exit_checker::{ForcedExitAccountAgeChecker, ForcedExitChecker},
core_api_client::CoreApiClient,
fee_ticker::{ResponseBatchFee, ResponseFee, TickerRequest, TokenPriceRequestType},
signature_checker::{
BatchRequest, OrderRequest, RequestData, Toggle2FARequest, TxRequest, VerifiedTx,
VerifySignatureRequest,
},
tx_error::{Toggle2FAError, TxAddError},
utils::{block_details_cache::BlockDetailsCache, token_db_cache::TokenDBCache},
};
const VALIDNESS_INTERVAL_MINUTES: i64 = 40;
#[derive(Clone)]
pub struct TxSender {
pub core_api_client: CoreApiClient,
pub sign_verify_requests: mpsc::Sender<VerifySignatureRequest>,
pub ticker_requests: mpsc::Sender<TickerRequest>,
pub pool: ConnectionPool,
pub tokens: TokenDBCache,
pub forced_exit_checker: ForcedExitChecker,
pub blocks: BlockDetailsCache,
/// List of account IDs that do not have to pay fees for operations.
pub fee_free_accounts: HashSet<AccountId>,
pub enforce_pubkey_change_fee: bool,
// Limit the number of both transactions and Ethereum signatures per batch.
pub max_number_of_transactions_per_batch: usize,
pub max_number_of_authors_per_batch: usize,
}
#[derive(Debug, Error)]
pub enum SubmitError {
#[error("Account close tx is disabled.")]
AccountCloseDisabled,
#[error("Invalid params: {0}.")]
InvalidParams(String),
#[error("Fast processing available only for 'withdraw' operation type.")]
UnsupportedFastProcessing,
#[error("Incorrect transaction: {0}.")]
IncorrectTx(String),
#[error("Transaction adding error: {0}.")]
TxAdd(TxAddError),
#[error("Chosen token is not suitable for paying fees.")]
InappropriateFeeToken,
// Not all TxAddErrors would apply to Toggle2FA, but
// it is helpful to re-use IncorrectEthSignature and DbError
#[error("Failed to toggle 2FA: {0}.")]
Toggle2FA(Toggle2FAError),
#[error("Communication error with the core server: {0}.")]
CommunicationCoreServer(String),
#[error("Internal error.")]
Internal(anyhow::Error),
#[error("{0}")]
Other(String),
}
impl SubmitError {
pub fn internal(inner: impl Into<anyhow::Error>) -> Self {
Self::Internal(inner.into())
}
pub fn other(msg: impl Display) -> Self {
Self::Other(msg.to_string())
}
pub fn communication_core_server(msg: impl Display) -> Self {
Self::CommunicationCoreServer(msg.to_string())
}
pub fn invalid_params(msg: impl Display) -> Self {
Self::InvalidParams(msg.to_string())
}
}
#[macro_export]
macro_rules! internal_error {
($err:tt, $input:tt) => {{
vlog::warn!("Internal Server error: {}, input: {:?}", $err, $input);
SubmitError::internal($err)
}};
($err:tt) => {{
internal_error!($err, "N/A")
}};
}
impl TxSender {
pub fn new(
connection_pool: ConnectionPool,
sign_verify_request_sender: mpsc::Sender<VerifySignatureRequest>,
ticker_request_sender: mpsc::Sender<TickerRequest>,
config: &ZkSyncConfig,
) -> Self {
let core_api_client = CoreApiClient::new(config.api.private.url.clone());
Self::with_client(
core_api_client,
connection_pool,
sign_verify_request_sender,
ticker_request_sender,
config,
)
}
pub(crate) fn with_client(
core_api_client: CoreApiClient,
connection_pool: ConnectionPool,
sign_verify_request_sender: mpsc::Sender<VerifySignatureRequest>,
ticker_request_sender: mpsc::Sender<TickerRequest>,
config: &ZkSyncConfig,
) -> Self {
let max_number_of_transactions_per_batch =
config.api.common.max_number_of_transactions_per_batch as usize;
let max_number_of_authors_per_batch =
config.api.common.max_number_of_authors_per_batch as usize;
Self {
core_api_client,
pool: connection_pool,
sign_verify_requests: sign_verify_request_sender,
ticker_requests: ticker_request_sender,
tokens: TokenDBCache::new(),
forced_exit_checker: ForcedExitChecker::new(config),
enforce_pubkey_change_fee: config.api.common.enforce_pubkey_change_fee,
blocks: BlockDetailsCache::new(config.api.common.caches_size),
fee_free_accounts: HashSet::from_iter(config.api.common.fee_free_accounts.clone()),
max_number_of_transactions_per_batch,
max_number_of_authors_per_batch,
}
}
/// If `ForcedExit` has Ethereum siganture (e.g. it's a part of a batch), an actual signer
/// is initiator, not the target, thus, this function will perform a database query to acquire
/// the corresponding address.
async fn get_tx_sender(&self, tx: &ZkSyncTx) -> Result<Address, anyhow::Error> {
match tx {
ZkSyncTx::ForcedExit(tx) => self.get_address_by_id(tx.initiator_account_id).await,
_ => Ok(tx.account()),
}
}
async fn get_address_by_id(&self, id: AccountId) -> Result<Address, anyhow::Error> {
self.pool
.access_storage()
.await?
.chain()
.account_schema()
.account_address_by_id(id)
.await?
.ok_or_else(|| anyhow::anyhow!("Order signer account id not found in db"))
}
async fn get_tx_sender_type(&self, tx: &ZkSyncTx) -> Result<EthAccountType, SubmitError> {
self.get_sender_type(tx.account_id().or(Err(SubmitError::AccountCloseDisabled))?)
.await
.map_err(|_| SubmitError::TxAdd(TxAddError::DbError))
}
async fn get_sender_type(&self, id: AccountId) -> Result<EthAccountType, anyhow::Error> {
Ok(self
.pool
.access_storage()
.await?
.chain()
.account_schema()
.account_type_by_id(id)
.await?
.unwrap_or(EthAccountType::Owned))
}
pub async fn toggle_2fa(
&self,
toggle_2fa: Toggle2FA,
) -> Result<Toggle2FAResponse, SubmitError> {
let account_id = toggle_2fa.account_id;
let current_type = self
.get_sender_type(toggle_2fa.account_id)
.await
.map_err(|_| SubmitError::Toggle2FA(Toggle2FAError::DbError))?;
if matches!(current_type, EthAccountType::CREATE2) {
return Err(SubmitError::Toggle2FA(Toggle2FAError::CREATE2));
}
let new_type = if toggle_2fa.enable {
EthAccountType::Owned
} else {
EthAccountType::No2FA(toggle_2fa.pub_key_hash)
};
self.verify_toggle_2fa_request_eth_signature(toggle_2fa)
.await?;
self.pool
.access_storage()
.await
.map_err(|_| SubmitError::Toggle2FA(Toggle2FAError::DbError))?
.chain()
.account_schema()
.set_account_type(account_id, new_type)
.await
.map_err(|_| SubmitError::Toggle2FA(Toggle2FAError::DbError))?;
Ok(Toggle2FAResponse { success: true })
}
async fn verify_toggle_2fa_request_eth_signature(
&self,
toggle_2fa: Toggle2FA,
) -> Result<(), SubmitError> {
let current_time = Utc::now();
let request_time = toggle_2fa.timestamp;
let validness_interval = Duration::minutes(VALIDNESS_INTERVAL_MINUTES);
if current_time - validness_interval > request_time
|| current_time + validness_interval < request_time
{
return Err(SubmitError::InvalidParams(format!(
"Timestamp differs by more than {} minutes",
VALIDNESS_INTERVAL_MINUTES
)));
}
let message = toggle_2fa.get_ethereum_sign_message().into_bytes();
let signature = toggle_2fa.signature;
let signer = self
.get_address_by_id(toggle_2fa.account_id)
.await
.or(Err(SubmitError::TxAdd(TxAddError::DbError)))?;
let eth_sign_data = EthSignData { signature, message };
let (sender, receiever) = oneshot::channel();
let request = VerifySignatureRequest {
data: RequestData::Toggle2FA(Toggle2FARequest {
sign_data: eth_sign_data,
sender: signer,
}),
response: sender,
};
send_verify_request_and_recv(request, self.sign_verify_requests.clone(), receiever).await?;
Ok(())
}
async fn verify_order_eth_signature(
&self,
order: &Order,
signature: Option<TxEthSignature>,
) -> Result<(), SubmitError> {
let signer_type = self
.get_sender_type(order.account_id)
.await
.map_err(|_| SubmitError::TxAdd(TxAddError::DbError))?;
if matches!(signer_type, EthAccountType::CREATE2) {
return if signature.is_some() {
Err(SubmitError::IncorrectTx(
"Eth signature from CREATE2 account not expected".to_string(),
))
} else {
Ok(())
};
}
if matches!(signer_type, EthAccountType::No2FA(None)) {
// We don't verify signatures for accounts with no 2FA
return Ok(());
}
if let EthAccountType::No2FA(Some(unchecked_hash)) = signer_type {
let order_pub_key_hash = PubKeyHash::from_pubkey(&order.signature.pub_key.0);
// We don't scheck the signature only if the order was signed with the same
// is the same as unchecked PubKey
if order_pub_key_hash == unchecked_hash {
return Ok(());
}
}
let signature = signature.ok_or(SubmitError::TxAdd(TxAddError::MissingEthSignature))?;
let signer = self
.get_address_by_id(order.account_id)
.await
.or(Err(SubmitError::TxAdd(TxAddError::DbError)))?;
let token_sell = self.token_info_from_id(order.token_sell).await?;
let token_buy = self.token_info_from_id(order.token_buy).await?;
let message = order
.get_ethereum_sign_message(&token_sell.symbol, &token_buy.symbol, token_sell.decimals)
.into_bytes();
let eth_sign_data = EthSignData { signature, message };
let (sender, receiever) = oneshot::channel();
let request = VerifySignatureRequest {
data: RequestData::Order(OrderRequest {
order: Box::new(order.clone()),
sign_data: eth_sign_data,
sender: signer,
}),
response: sender,
};
send_verify_request_and_recv(request, self.sign_verify_requests.clone(), receiever).await?;
Ok(())
}
// This method is left for RPC API
#[deprecated(note = "Use the submit_tx function instead")]
pub async fn submit_tx_with_separate_fp(
&self,
mut tx: ZkSyncTx,
signature: TxEthSignatureVariant,
fast_processing: Option<bool>,
) -> Result<TxHash, SubmitError> {
let fast_processing = fast_processing.unwrap_or(false);
if fast_processing && !tx.is_withdraw() {
return Err(SubmitError::UnsupportedFastProcessing);
}
if let ZkSyncTx::Withdraw(withdraw) = &mut tx {
if withdraw.fast {
// We set `fast` field ourselves, so we have to check that user did not set it themselves.
return Err(SubmitError::IncorrectTx(
"'fast' field of Withdraw transaction must not be set manually.".to_string(),
));
}
withdraw.fast = fast_processing;
}
self.submit_tx(tx, signature).await
}
pub async fn submit_tx(
&self,
tx: ZkSyncTx,
signature: TxEthSignatureVariant,
) -> Result<TxHash, SubmitError> {
if tx.is_close() {
return Err(SubmitError::AccountCloseDisabled);
}
if let ZkSyncTx::ForcedExit(forced_exit) = &tx {
self.check_forced_exit(forced_exit).await?;
}
// Resolve the token.
let token = self.token_info_from_id(tx.token_id()).await?;
let msg_to_sign = tx
.get_ethereum_sign_message(token.clone())
.map(String::into_bytes);
let is_whitelisted_initiator = tx
.account_id()
.map(|account_id| self.fee_free_accounts.contains(&account_id))
.unwrap_or(false);
let tx_fee_info = if !is_whitelisted_initiator {
tx.get_fee_info()
} else {
None
};
let sign_verify_channel = self.sign_verify_requests.clone();
let ticker_request_sender = self.ticker_requests.clone();
if let Some((tx_type, token, address, provided_fee)) = tx_fee_info {
let should_enforce_fee = !matches!(tx_type, TxFeeTypes::ChangePubKey { .. })
|| self.enforce_pubkey_change_fee;
let fee_allowed =
Self::token_allowed_for_fees(ticker_request_sender.clone(), token.clone()).await?;
if !fee_allowed {
return Err(SubmitError::InappropriateFeeToken);
}
let required_fee_data =
Self::ticker_request(ticker_request_sender, tx_type, address, token.clone())
.await?;
// Converting `BitUint` to `BigInt` is safe.
let required_fee: BigDecimal = required_fee_data
.normal_fee
.total_fee
.to_bigint()
.unwrap()
.into();
let provided_fee: BigDecimal = provided_fee.to_bigint().unwrap().into();
// Scaling the fee required since the price may change between signing the transaction and sending it to the server.
let scaled_provided_fee = scale_user_fee_up(provided_fee);
if required_fee >= scaled_provided_fee && should_enforce_fee {
return Err(SubmitError::TxAdd(TxAddError::TxFeeTooLow));
}
}
let tx_sender = self
.get_tx_sender(&tx)
.await
.or(Err(SubmitError::TxAdd(TxAddError::DbError)))?;
let verified_tx = verify_tx_info_message_signature(
&tx,
tx_sender,
token.clone(),
self.get_tx_sender_type(&tx).await?,
signature.tx_signature().clone(),
msg_to_sign,
sign_verify_channel,
)
.await?
.unwrap_tx();
if let ZkSyncTx::Swap(tx) = &tx {
if signature.is_single() {
return Err(SubmitError::TxAdd(TxAddError::MissingEthSignature));
}
let signatures = signature.orders_signatures();
self.verify_order_eth_signature(&tx.orders.0, signatures.0.clone())
.await?;
self.verify_order_eth_signature(&tx.orders.1, signatures.1.clone())
.await?;
}
// Send verified transactions to the mempool.
self.core_api_client
.send_tx(verified_tx)
.await
.map_err(SubmitError::communication_core_server)?
.map_err(SubmitError::TxAdd)?;
// if everything is OK, return the transactions hashes.
Ok(tx.hash())
}
pub async fn submit_txs_batch(
&self,
txs: Vec<TxWithSignature>,
eth_signatures: Option<EthBatchSignatures>,
) -> Result<SubmitBatchResponse, SubmitError> {
// Bring the received signatures into a vector for simplified work.
let eth_signatures = EthBatchSignatures::api_arg_to_vec(eth_signatures);
if txs.is_empty() {
return Err(SubmitError::TxAdd(TxAddError::EmptyBatch));
}
// Even though this is going to be checked on the Mempool part,
// we don't want to verify huge batches as long as this operation
// is expensive.
if txs.len() > self.max_number_of_transactions_per_batch {
return Err(SubmitError::TxAdd(TxAddError::BatchTooBig));
}
// Same check but in terms of signatures.
if eth_signatures.len() > self.max_number_of_authors_per_batch {
return Err(SubmitError::TxAdd(TxAddError::EthSignaturesLimitExceeded));
}
if txs.iter().any(|tx| tx.tx.is_close()) {
return Err(SubmitError::AccountCloseDisabled);
}
// Checking fees data
let mut provided_total_usd_fee = BigDecimal::from(0);
let mut transaction_types = vec![];
let eth_token = TokenLike::Id(TokenId(0));
let mut token_fees = HashMap::<Address, BigUint>::new();
for tx in &txs {
let tx_fee_info = tx.tx.get_fee_info();
if let Some((tx_type, token, address, provided_fee)) = tx_fee_info {
// Save the transaction type before moving on to the next one, otherwise
// the total fee won't get affected by it.
transaction_types.push((tx_type, address));
if provided_fee == BigUint::zero() {
continue;
}
let fee_allowed =
Self::token_allowed_for_fees(self.ticker_requests.clone(), token.clone())
.await?;
// In batches, transactions with non-popular token are allowed to be included, but should not
// used to pay fees. Fees must be covered by some more common token.
if !fee_allowed && provided_fee != 0u64.into() {
return Err(SubmitError::InappropriateFeeToken);
}
let check_token = if fee_allowed {
// For allowed tokens, we perform check in the transaction token (as expected).
token.clone()
} else {
// For non-popular tokens we've already checked that the provided fee is 0,
// and the USD price will be checked in ETH.
eth_token.clone()
};
let token_price_in_usd = Self::ticker_price_request(
self.ticker_requests.clone(),
check_token.clone(),
TokenPriceRequestType::USDForOneWei,
)
.await?;
let token_data = self.token_info_from_id(token).await?;
let mut token_fee = token_fees.remove(&token_data.address).unwrap_or_default();
token_fee += &provided_fee;
token_fees.insert(token_data.address, token_fee);
provided_total_usd_fee +=
BigDecimal::from(provided_fee.clone().to_bigint().unwrap())
* &token_price_in_usd;
}
}
// Only one token in batch
if token_fees.len() == 1 {
let (batch_token, fee_paid) = token_fees.into_iter().next().unwrap();
let batch_token_fee = Self::ticker_batch_fee_request(
self.ticker_requests.clone(),
transaction_types.clone(),
batch_token.into(),
)
.await?;
let user_provided_fee =
scale_user_fee_up(BigDecimal::from(fee_paid.to_bigint().unwrap()));
let required_normal_fee =
BigDecimal::from(batch_token_fee.normal_fee.total_fee.to_bigint().unwrap());
// Not enough fee
if required_normal_fee > user_provided_fee {
vlog::error!(
"User provided batch fee in token is too low, required: {}, provided (scaled): {}",
required_normal_fee.to_string(),
user_provided_fee.to_string(),
);
return Err(SubmitError::TxAdd(TxAddError::TxBatchFeeTooLow));
}
} else {
// Calculate required fee for ethereum token
let required_eth_fee = Self::ticker_batch_fee_request(
self.ticker_requests.clone(),
transaction_types,
eth_token.clone(),
)
.await?
.normal_fee;
let eth_price_in_usd = Self::ticker_price_request(
self.ticker_requests.clone(),
eth_token,
TokenPriceRequestType::USDForOneWei,
)
.await?;
let required_total_usd_fee =
BigDecimal::from(required_eth_fee.total_fee.to_bigint().unwrap())
* ð_price_in_usd;
// Scaling the fee required since the price may change between signing the transaction and sending it to the server.
let scaled_provided_fee_in_usd = scale_user_fee_up(provided_total_usd_fee.clone());
if required_total_usd_fee > scaled_provided_fee_in_usd {
vlog::error!(
"User provided batch fee is too low, required: {}, provided: {} (scaled: {}); difference {}",
&required_total_usd_fee,
provided_total_usd_fee.to_string(),
scaled_provided_fee_in_usd.to_string(),
(&required_total_usd_fee - &scaled_provided_fee_in_usd).to_string(),
);
return Err(SubmitError::TxAdd(TxAddError::TxBatchFeeTooLow));
}
}
for tx in txs.iter() {
if let ZkSyncTx::Swap(swap) = &tx.tx {
if tx.signature.is_single() {
return Err(SubmitError::TxAdd(TxAddError::MissingEthSignature));
}
let signatures = tx.signature.orders_signatures();
self.verify_order_eth_signature(&swap.orders.0, signatures.0.clone())
.await?;
self.verify_order_eth_signature(&swap.orders.1, signatures.1.clone())
.await?;
}
}
let mut verified_txs = Vec::with_capacity(txs.len());
let mut verified_signatures = Vec::new();
let mut messages_to_sign = Vec::with_capacity(txs.len());
let mut tx_senders = Vec::with_capacity(txs.len());
let mut tx_sender_types = Vec::with_capacity(txs.len());
let mut tokens = Vec::with_capacity(txs.len());
for tx in txs.iter().map(|tx| &tx.tx) {
// Resolve the token and save it for constructing the batch message.
let token = self.token_info_from_id(tx.token_id()).await?;
tokens.push(token.clone());
messages_to_sign.push(tx.get_ethereum_sign_message(token).map(String::into_bytes));
tx_senders.push(
self.get_tx_sender(tx)
.await
.or(Err(SubmitError::TxAdd(TxAddError::DbError)))?,
);
tx_sender_types.push(self.get_tx_sender_type(&tx).await?);
}
let batch_sign_data = if !eth_signatures.is_empty() {
// User provided at least one signature for the whole batch.
// In this case each sender cannot be CREATE2.
if tx_sender_types
.iter()
.any(|_type| matches!(_type, EthAccountType::CREATE2))
{
return Err(SubmitError::IncorrectTx(
"Eth signature from CREATE2 account not expected".to_string(),
));
}
let _txs = txs
.iter()
.zip(tokens.iter().cloned())
.zip(tx_senders.iter().cloned())
.map(|((tx, token), sender)| (tx.tx.clone(), token, sender))
.collect::<Vec<_>>();
// Create batch signature data.
Some(EthBatchSignData::new(_txs, eth_signatures).map_err(SubmitError::other)?)
} else {
None
};
let (verified_batch, sign_data) = verify_txs_batch_signature(
txs,
tx_senders,
tokens,
tx_sender_types,
batch_sign_data,
messages_to_sign,
self.sign_verify_requests.clone(),
)
.await?
.unwrap_batch();
if let Some(sign_data) = sign_data {
verified_signatures.extend(sign_data.signatures.into_iter());
}
verified_txs.extend(verified_batch.into_iter());
let tx_hashes: Vec<TxHash> = verified_txs.iter().map(|tx| tx.tx.hash()).collect();
// Send verified transactions to the mempool.
self.core_api_client
.send_txs_batch(verified_txs, verified_signatures)
.await
.map_err(SubmitError::communication_core_server)?
.map_err(SubmitError::TxAdd)?;
let batch_hash = TxHash::batch_hash(&tx_hashes);
Ok(SubmitBatchResponse {
transaction_hashes: tx_hashes.into_iter().map(TxHashSerializeWrapper).collect(),
batch_hash,
})
}
pub async fn get_txs_fee_in_wei(
&self,
tx_type: TxFeeTypes,
address: Address,
token: TokenLike,
) -> Result<Fee, SubmitError> {
let resp_fee = Self::ticker_request(
self.ticker_requests.clone(),
tx_type,
address,
token.clone(),
)
.await?;
Ok(resp_fee.normal_fee)
}
pub async fn get_txs_batch_fee_in_wei(
&self,
transactions: Vec<(TxFeeTypes, Address)>,
token: TokenLike,
) -> Result<BatchFee, SubmitError> {
let resp_fee = Self::ticker_batch_fee_request(
self.ticker_requests.clone(),
transactions,
token.clone(),
)
.await?;
Ok(resp_fee.normal_fee)
}
/// For forced exits, we must check that target account exists for more
/// than 24 hours in order to give new account owners give an opportunity
/// to set the signing key. While `ForcedExit` operation doesn't do anything
/// bad to the account, it's more user-friendly to only allow this operation
/// after we're somewhat sure that zkSync account is not owned by anybody.
async fn check_forced_exit(
&self,
forced_exit: &zksync_types::ForcedExit,
) -> Result<(), SubmitError> {
let mut storage = self
.pool
.access_storage()
.await
.map_err(SubmitError::internal)?;
self.forced_exit_checker
.validate_forced_exit(&mut storage, forced_exit.target)
.await
}
/// Returns a message that user has to sign to send the transaction.
/// If the transaction doesn't need a message signature, returns `None`.
/// If any error is encountered during the message generation, returns `jsonrpc_core::Error`.
#[allow(dead_code)]
async fn tx_message_to_sign(&self, tx: &ZkSyncTx) -> Result<Option<Vec<u8>>, SubmitError> {
Ok(match tx {
ZkSyncTx::Transfer(tx) => {
let token = self.token_info_from_id(tx.token).await?;
let msg = tx
.get_ethereum_sign_message(&token.symbol, token.decimals)
.into_bytes();
Some(msg)
}
ZkSyncTx::Withdraw(tx) => {
let token = self.token_info_from_id(tx.token).await?;
let msg = tx
.get_ethereum_sign_message(&token.symbol, token.decimals)
.into_bytes();
Some(msg)
}
ZkSyncTx::MintNFT(tx) => {
let token = self.token_info_from_id(tx.fee_token).await?;
let msg = tx
.get_ethereum_sign_message(&token.symbol, token.decimals)
.into_bytes();
Some(msg)
}
_ => None,
})
}
/// Resolves the token from the database.
pub(crate) async fn token_info_from_id(
&self,
token_id: impl Into<TokenLike>,
) -> Result<Token, SubmitError> {
let mut storage = self
.pool
.access_storage()
.await
.map_err(SubmitError::internal)?;
self.tokens
.get_token(&mut storage, token_id)
.await
.map_err(SubmitError::internal)?
// TODO Make error more clean
.ok_or_else(|| SubmitError::other("Token not found in the DB"))
}
async fn ticker_batch_fee_request(
mut ticker_request_sender: mpsc::Sender<TickerRequest>,
transactions: Vec<(TxFeeTypes, Address)>,
token: TokenLike,
) -> Result<ResponseBatchFee, SubmitError> {
let req = oneshot::channel();
ticker_request_sender
.send(TickerRequest::GetBatchTxFee {
transactions,
token: token.clone(),
response: req.0,
})
.await
.map_err(SubmitError::internal)?;
let resp = req.1.await.map_err(SubmitError::internal)?;
resp.map_err(|err| internal_error!(err))
}
async fn ticker_request(
mut ticker_request_sender: mpsc::Sender<TickerRequest>,
tx_type: TxFeeTypes,
address: Address,
token: TokenLike,
) -> Result<ResponseFee, SubmitError> {
let req = oneshot::channel();
ticker_request_sender
.send(TickerRequest::GetTxFee {
tx_type,
address,
token: token.clone(),
response: req.0,
})
.await
.map_err(SubmitError::internal)?;
let resp = req.1.await.map_err(SubmitError::internal)?;
resp.map_err(|err| internal_error!(err))
}
pub async fn token_allowed_for_fees(
mut ticker_request_sender: mpsc::Sender<TickerRequest>,
token: TokenLike,
) -> Result<bool, SubmitError> {
let (sender, receiver) = oneshot::channel();
ticker_request_sender
.send(TickerRequest::IsTokenAllowed {
token: token.clone(),
response: sender,
})
.await
.expect("ticker receiver dropped");
receiver
.await
.expect("ticker answer sender dropped")
.map_err(SubmitError::internal)
}
pub async fn ticker_price_request(
mut ticker_request_sender: mpsc::Sender<TickerRequest>,
token: TokenLike,
req_type: TokenPriceRequestType,
) -> Result<BigDecimal, SubmitError> {
let req = oneshot::channel();
ticker_request_sender
.send(TickerRequest::GetTokenPrice {
token: token.clone(),
response: req.0,
req_type,
})
.await
.map_err(SubmitError::internal)?;
let resp = req.1.await.map_err(SubmitError::internal)?;
resp.map_err(|err| internal_error!(err))
}
}
async fn send_verify_request_and_recv(
request: VerifySignatureRequest,
mut req_channel: mpsc::Sender<VerifySignatureRequest>,
receiver: oneshot::Receiver<Result<VerifiedTx, TxAddError>>,
) -> Result<VerifiedTx, SubmitError> {
// Send the check request.
req_channel
.send(request)
.await
.map_err(SubmitError::internal)?;
// Wait for the check result.
receiver
.await
.map_err(|err| internal_error!(err))?
.map_err(SubmitError::TxAdd)
}
/// Send a request for Ethereum signature verification and wait for the response.
/// If `msg_to_sign` is not `None`, then the signature must be present.
async fn verify_tx_info_message_signature(
tx: &ZkSyncTx,
tx_sender: Address,
token: Token,
account_type: EthAccountType,
signature: Option<TxEthSignature>,
msg_to_sign: Option<Vec<u8>>,
req_channel: mpsc::Sender<VerifySignatureRequest>,
) -> Result<VerifiedTx, SubmitError> {
if matches!(
(account_type, signature.clone(), msg_to_sign.clone()),
(EthAccountType::CREATE2, Some(_), Some(_))
) {
return Err(SubmitError::IncorrectTx(
"Eth signature from CREATE2 account not expected".to_string(),
));
}
let should_check_eth_signature = match (account_type, tx) {
(EthAccountType::CREATE2, _) => false,
(EthAccountType::No2FA(_), ZkSyncTx::ChangePubKey(_)) => true,
(EthAccountType::No2FA(hash), _) => {
if let Some(not_checked_hash) = hash {
let tx_pub_key_hash = PubKeyHash::from_pubkey(&tx.signature().pub_key.0);
tx_pub_key_hash != not_checked_hash
} else {
false
}
}
_ => true,
};
let eth_sign_data = match (msg_to_sign, should_check_eth_signature) {
(Some(message), true) => {
let signature = signature.ok_or(SubmitError::TxAdd(TxAddError::MissingEthSignature))?;
Some(EthSignData { signature, message })
}
_ => None,
};
let (sender, receiever) = oneshot::channel();
let request = VerifySignatureRequest {
data: RequestData::Tx(TxRequest {
tx: SignedZkSyncTx {
tx: tx.clone(),
eth_sign_data,
},
sender: tx_sender,
token,
}),
response: sender,
};
send_verify_request_and_recv(request, req_channel, receiever).await
}
/// Send a request for Ethereum signature verification and wait for the response.
/// Unlike in case of `verify_tx_info_message_signature`, we do not require
/// every transaction from the batch to be signed. The signature must be obtained
/// through signing a human-readable message with accordance to zkSync protocol.
async fn verify_txs_batch_signature(
batch: Vec<TxWithSignature>,
senders: Vec<Address>,
tokens: Vec<Token>,
sender_types: Vec<EthAccountType>,
batch_sign_data: Option<EthBatchSignData>,
msgs_to_sign: Vec<Option<Vec<u8>>>,
req_channel: mpsc::Sender<VerifySignatureRequest>,
) -> Result<VerifiedTx, SubmitError> {
// This hashset holds addresses that have performed a CREATE2 ChangePubKey
// within this batch, so that we don't check ETH signatures on their transactions
// from this batch. We save the account type to the db later.
let mut create2_senders = HashSet::<H160>::new();
let mut txs = Vec::with_capacity(batch.len());
for (tx, message, sender, mut sender_type) in
izip!(batch, msgs_to_sign, senders.iter(), sender_types)
{
if create2_senders.contains(sender) {
sender_type = EthAccountType::CREATE2;
}
if let ZkSyncTx::ChangePubKey(tx) = &tx.tx {
if let Some(auth_data) = &tx.eth_auth_data {
if auth_data.is_create2() {
create2_senders.insert(*sender);
}
}
}
// If we have more signatures provided than required,
// we will verify those too.
let eth_sign_data = if let Some(message) = message {
match sender_type {
EthAccountType::CREATE2 => {
if tx.signature.exists() {
return Err(SubmitError::IncorrectTx(
"Eth signature from CREATE2 account not expected".to_string(),
));
}
None
}
EthAccountType::Owned => {
if batch_sign_data.is_none() && !tx.signature.exists() {
return Err(SubmitError::TxAdd(TxAddError::MissingEthSignature));
}
tx.signature
.tx_signature()
.clone()
.map(|signature| EthSignData { signature, message })
}
EthAccountType::No2FA(Some(unchecked_hash)) => {
let tx_pub_key_hash = PubKeyHash::from_pubkey(&tx.tx.signature().pub_key.0);
if tx_pub_key_hash != unchecked_hash {
if batch_sign_data.is_none() && !tx.signature.exists() {
return Err(SubmitError::TxAdd(TxAddError::MissingEthSignature));
}
tx.signature
.tx_signature()
.clone()
.map(|signature| EthSignData { signature, message })
} else {
None
}
}
EthAccountType::No2FA(None) => None,
}
} else {
None
};
txs.push(SignedZkSyncTx {
tx: tx.tx,
eth_sign_data,
});
}
let (sender, receiver) = oneshot::channel();
let request = VerifySignatureRequest {
data: RequestData::Batch(BatchRequest {
txs,
batch_sign_data,
senders,
tokens,
}),
response: sender,
};
send_verify_request_and_recv(request, req_channel, receiver).await
}
/// Scales the fee provided by user up to check whether the provided fee is enough to cover our expenses for
/// maintaining the protocol.
///
/// We calculate both `provided_fee * 1.05` and `provided_fee + 1 cent` and choose the maximum.
/// This is required since the price may change between signing the transaction and sending it to the server.
fn scale_user_fee_up(provided_total_usd_fee: BigDecimal) -> BigDecimal {
let one_cent = BigDecimal::from_str("0.01").unwrap();
// This formula is needed when the fee is really small.
//
// We don't compare it with any of the following scaled numbers, because
// a) Scaling by two (100%) is always greater than scaling by 5%.
// b) It is intended as a smaller substitute for 1 cent scaling when
// scaling by 1 cent means scaling more than 2x.
if provided_total_usd_fee < one_cent {
let scaled_by_two_provided_fee_in_usd = provided_total_usd_fee * BigDecimal::from(2u32);
return scaled_by_two_provided_fee_in_usd;
}
// Scale by 5%.
let scaled_percent_provided_fee_in_usd =
provided_total_usd_fee.clone() * BigDecimal::from(105u32) / BigDecimal::from(100u32);
// Scale by 1 cent.
let scaled_one_cent_provided_fee_in_usd = provided_total_usd_fee + one_cent;
// Choose the maximum of these two values.
std::cmp::max(
scaled_percent_provided_fee_in_usd,
scaled_one_cent_provided_fee_in_usd,
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_scaling_user_fee_by_two() {
let provided_fee = BigDecimal::from_str("0.005").unwrap();
let provided_fee_scaled_by_two = BigDecimal::from_str("0.01").unwrap();
let scaled_fee = scale_user_fee_up(provided_fee);
assert_eq!(provided_fee_scaled_by_two, scaled_fee);
}
#[test]
fn test_scaling_user_fee_by_one_cent() {
let provided_fee = BigDecimal::from_str("0.015").unwrap();
let provided_fee_scaled_by_cent = BigDecimal::from_str("0.025").unwrap();
let scaled_fee = scale_user_fee_up(provided_fee);
assert_eq!(provided_fee_scaled_by_cent, scaled_fee);
}
#[test]
fn test_scaling_user_fee_by_5_percent() {
let provided_fee = BigDecimal::from_str("0.30").unwrap();
let provided_fee_scaled_by_five_percent = BigDecimal::from_str("0.315").unwrap();
let scaled_fee = scale_user_fee_up(provided_fee);
assert_eq!(provided_fee_scaled_by_five_percent, scaled_fee);
}
}
| 36.370996 | 128 | 0.590715 |
bbecf447ad67923e20cd33d2f1dc0e4a2d9ea361 | 20,467 | extern crate env_logger;
extern crate gfx_corell;
#[cfg(feature = "dx12")]
extern crate gfx_device_dx12ll as back;
#[cfg(feature = "vulkan")]
extern crate gfx_device_vulkanll as back;
#[cfg(feature = "metal")]
extern crate gfx_device_metalll as back;
extern crate winit;
extern crate image;
use gfx_corell::{buffer, command, format, pass, pso, shade, state, target,
Device, CommandPool, GraphicsCommandPool, IndexType,
GraphicsCommandBuffer, ProcessingCommandBuffer, TransferCommandBuffer, PrimaryCommandBuffer,
Primitive, Instance, Adapter, Surface, Swapchain, QueueFamily, QueueSubmit, Factory, SubPass, FrameSync};
use gfx_corell::format::Formatted;
use gfx_corell::memory::{self, ImageBarrier, ImageStateSrc, ImageStateDst, ImageLayout, ImageAccess};
use gfx_corell::factory::{DescriptorHeapType, DescriptorPoolDesc, DescriptorType,
DescriptorSetLayoutBinding, DescriptorSetWrite, DescriptorWrite,
ResourceHeapType, WaitFor};
use std::io::Cursor;
use gfx_corell::image as i;
const USE_INDEX_BUFFER: bool = false;
type ColorFormat = gfx_corell::format::Srgba8;
const VS: &str = "vs_main";
const PS: &str = "ps_main";
#[derive(Debug, Clone, Copy)]
#[allow(non_snake_case)]
struct Vertex {
a_Pos: [f32; 2],
a_Uv: [f32; 2],
}
const TRIANGLE: [Vertex; 6] = [
Vertex { a_Pos: [ -0.5, 0.33 ], a_Uv: [0.0, 1.0] },
Vertex { a_Pos: [ 0.5, 0.33 ], a_Uv: [1.0, 1.0] },
Vertex { a_Pos: [ 0.5,-0.33 ], a_Uv: [1.0, 0.0] },
Vertex { a_Pos: [ -0.5, 0.33 ], a_Uv: [0.0, 1.0] },
Vertex { a_Pos: [ 0.5,-0.33 ], a_Uv: [1.0, 0.0] },
Vertex { a_Pos: [ -0.5,-0.33 ], a_Uv: [0.0, 0.0] },
];
#[cfg(any(feature = "vulkan", feature = "dx12", feature = "metal"))]
fn main() {
env_logger::init().unwrap();
let mut events_loop = winit::EventsLoop::new();
let window = winit::WindowBuilder::new()
.with_dimensions(1024, 768)
.with_title("triangle (Low Level)".to_string())
.build(&events_loop)
.unwrap();
let (pixel_width, pixel_height) = window.get_inner_size_pixels().unwrap();
let pixel_width = pixel_width as u16;
let pixel_height = pixel_height as u16;
// instantiate backend
let instance = back::Instance::create();
let adapters = instance.enumerate_adapters();
for adapter in &adapters {
println!("{:?}", adapter.get_info());
}
let adapter = &adapters[0];
let surface = instance.create_surface(&window);
let queue_descs = adapter.get_queue_families().map(|family| (family, family.num_queues()) );
// Build a new device and associated command queues
let Device { mut factory, mut general_queues, heap_types, caps, .. } = adapter.open(queue_descs);
let mut swap_chain = surface.build_swapchain::<ColorFormat>(&general_queues[0]);
// Setup renderpass and pipeline
// dx12 runtime shader compilation
#[cfg(feature = "dx12")]
let shader_lib = factory.create_shader_library_from_source(&[
(VS, shade::Stage::Vertex, include_bytes!("shader/triangle.hlsl")),
(PS, shade::Stage::Pixel, include_bytes!("shader/triangle.hlsl")),
]).expect("Error on creating shader lib");
#[cfg(feature = "vulkan")]
let shader_lib = factory.create_shader_library(&[
(VS, include_bytes!("data/vs_main.spv")),
(PS, include_bytes!("data/ps_main.spv")),
]).expect("Error on creating shader lib");
#[cfg(all(feature = "metal", feature = "metal_argument_buffer"))]
let shader_lib = factory.create_shader_library_from_source(
include_str!("shader/triangle_indirect.metal"),
back::LanguageVersion::new(2, 0),
).expect("Error on creating shader lib");
#[cfg(all(feature = "metal", not(feature = "metal_argument_buffer")))]
let shader_lib = factory.create_shader_library_from_source(
include_str!("shader/triangle.metal"),
back::LanguageVersion::new(1, 1),
).expect("Error on creating shader lib");
let shader_entries = pso::GraphicsShaderSet {
vertex_shader: VS,
hull_shader: None,
domain_shader: None,
geometry_shader: None,
pixel_shader: Some(PS),
};
let set0_layout = factory.create_descriptor_set_layout(&[
DescriptorSetLayoutBinding {
binding: 0,
ty: DescriptorType::SampledImage,
count: 1,
stage_flags: shade::STAGE_PIXEL,
}
],
);
let set1_layout = factory.create_descriptor_set_layout(&[
DescriptorSetLayoutBinding {
binding: 0,
ty: DescriptorType::Sampler,
count: 1,
stage_flags: shade::STAGE_PIXEL,
}
],
);
let pipeline_layout = factory.create_pipeline_layout(&[&set0_layout, &set1_layout]);
let render_pass = {
let attachment = pass::Attachment {
format: ColorFormat::get_format(),
load_op: pass::AttachmentLoadOp::Clear,
store_op: pass::AttachmentStoreOp::Store,
stencil_load_op: pass::AttachmentLoadOp::DontCare,
stencil_store_op: pass::AttachmentStoreOp::DontCare,
src_layout: memory::ImageLayout::Undefined, // TODO: maybe Option<_> here?
dst_layout: memory::ImageLayout::Present,
};
let subpass = pass::SubpassDesc {
color_attachments: &[(0, memory::ImageLayout::ColorAttachmentOptimal)],
};
let dependency = pass::SubpassDependency {
src_pass: pass::SubpassRef::External,
dst_pass: pass::SubpassRef::Pass(0),
src_stage: pso::COLOR_ATTACHMENT_OUTPUT,
dst_stage: pso::COLOR_ATTACHMENT_OUTPUT,
src_access: memory::ImageAccess::empty(),
dst_access: memory::COLOR_ATTACHMENT_READ | memory::COLOR_ATTACHMENT_WRITE,
};
factory.create_renderpass(&[attachment], &[subpass], &[dependency])
};
//
let mut pipeline_desc = pso::GraphicsPipelineDesc::new(
if USE_INDEX_BUFFER { Primitive::TriangleStrip } else { Primitive::TriangleList },
state::Rasterizer::new_fill(),
shader_entries);
pipeline_desc.color_targets[0] = Some((
ColorFormat::get_format(),
state::Blend {
color: state::BlendChannel {
equation: state::Equation::Add,
source: state::Factor::ZeroPlus(state::BlendValue::SourceAlpha),
destination: state::Factor::OneMinus(state::BlendValue::SourceAlpha),
},
alpha: state::BlendChannel {
equation: state::Equation::Add,
source: state::Factor::One,
destination: state::Factor::One,
},
}.into()
));
pipeline_desc.vertex_buffers.push(pso::VertexBufferDesc {
stride: std::mem::size_of::<Vertex>() as u8,
rate: 0,
});
pipeline_desc.attributes.push((0, pso::Element {
format: <format::Vec2<f32> as format::Formatted>::get_format(),
offset: 0
}));
pipeline_desc.attributes.push((0, pso::Element {
format: <format::Vec2<f32> as format::Formatted>::get_format(),
offset: 8
}));
//
let pipelines = factory.create_graphics_pipelines(&[
(&shader_lib, &pipeline_layout, SubPass { index: 0, main_pass: &render_pass }, &pipeline_desc)
]);
println!("pipelines: {:?}", pipelines);
// Descriptors
let heap_srv = factory.create_descriptor_heap(DescriptorHeapType::SrvCbvUav, 16);
let mut srv_pool = factory.create_descriptor_set_pool(
&heap_srv,
1, // sets
0, // offset
&[DescriptorPoolDesc { ty: DescriptorType::SampledImage, count: 1 }],
);
let set0 = factory.create_descriptor_sets(&mut srv_pool, &[&set0_layout]);
let heap_sampler = factory.create_descriptor_heap(DescriptorHeapType::Sampler, 16);
let mut sampler_pool = factory.create_descriptor_set_pool(
&heap_sampler,
1, // sets
0, // offset
&[DescriptorPoolDesc { ty: DescriptorType::Sampler, count: 1 }],
);
let set1 = factory.create_descriptor_sets(&mut sampler_pool, &[&set1_layout]);
// Framebuffer and render target creation
let frame_rtvs = swap_chain.get_images().iter().map(|image| {
factory.view_image_as_render_target(&image, ColorFormat::get_format()).unwrap()
}).collect::<Vec<_>>();
let framebuffers = frame_rtvs.iter().map(|frame_rtv| {
factory.create_framebuffer(&render_pass, &[&frame_rtv], &[], pixel_width as u32, pixel_height as u32, 1)
}).collect::<Vec<_>>();
let upload_heap =
heap_types.iter().find(|&&heap_type| {
heap_type.properties.contains(memory::CPU_VISIBLE | memory::COHERENT)
})
.unwrap();
// Buffer allocations
println!("Memory heaps: {:?}", heap_types);
let heap = factory.create_heap(upload_heap, ResourceHeapType::Buffers, 0x20000).unwrap();
let buffer_stride = std::mem::size_of::<Vertex>() as u64;
let buffer_len = TRIANGLE.len() as u64 * buffer_stride;
let vertex_buffer = {
let buffer = factory.create_buffer(buffer_len, buffer_stride, buffer::VERTEX).unwrap();
println!("{:?}", buffer);
factory.bind_buffer_memory(&heap, 0, buffer).unwrap()
};
type Index = u16;
let index_stride = std::mem::size_of::<Index>() as u64;
let indices = [0 as Index, 1, 2, !0, 3, 4, 5];
let index_buffer = if USE_INDEX_BUFFER {
let index_len = index_stride * indices.len() as u64;
let unbound_buffer = factory.create_buffer(index_len, index_stride, buffer::INDEX).unwrap();
println!("{:?}", unbound_buffer);
let offset = (buffer_len | 0xFFFF) + 1; //TODO: respect D3D12 alignment
let buffer = factory.bind_buffer_memory(&heap, offset, unbound_buffer).unwrap();
factory.write_mapping::<Index>(&buffer, 0, index_len)
.unwrap()
.copy_from_slice(&indices);
Some(buffer)
} else {
None
};
// TODO: check transitions: read/write mapping and vertex buffer read
factory.write_mapping::<Vertex>(&vertex_buffer, 0, buffer_len)
.unwrap()
.copy_from_slice(&TRIANGLE);
// Image
let img_data = include_bytes!("data/logo.png");
let img = image::load(Cursor::new(&img_data[..]), image::PNG).unwrap().to_rgba();
let (width, height) = img.dimensions();
let kind = i::Kind::D2(width as i::Size, height as i::Size, i::AaMode::Single);
let row_alignment_mask = caps.buffer_copy_row_pitch_alignment as u32 - 1;
let image_stride = 4usize;
let row_pitch = (width * image_stride as u32 + row_alignment_mask) & !row_alignment_mask;
let upload_size = (height * row_pitch) as u64;
println!("upload row pitch {}, total size {}", row_pitch, upload_size);
let image_upload_heap = factory.create_heap(upload_heap, ResourceHeapType::Buffers, upload_size).unwrap();
let image_upload_buffer = {
let buffer = factory.create_buffer(upload_size, image_stride as u64, buffer::TRANSFER_SRC).unwrap();
factory.bind_buffer_memory(&image_upload_heap, 0, buffer).unwrap()
};
// copy image data into staging buffer
{
let mut mapping = factory.write_mapping::<u8>(&image_upload_buffer, 0, upload_size).unwrap();
for y in 0 .. height as usize {
let row = &(*img)[y*(width as usize)*image_stride .. (y+1)*(width as usize)*image_stride];
let dest_base = y * row_pitch as usize;
mapping[dest_base .. dest_base + row.len()].copy_from_slice(row);
}
}
let image = factory.create_image(kind, 1, gfx_corell::format::Srgba8::get_format(), i::TRANSFER_DST | i::SAMPLED).unwrap(); // TODO: usage
println!("{:?}", image);
let image_req = factory.get_image_requirements(&image);
let device_heap = heap_types.iter().find(|&&heap_type| heap_type.properties.contains(memory::DEVICE_LOCAL)).unwrap();
let image_heap = factory.create_heap(device_heap, ResourceHeapType::Images, image_req.size).unwrap();
let image_logo = factory.bind_image_memory(&image_heap, 0, image).unwrap();
let image_srv = factory.view_image_as_shader_resource(&image_logo, gfx_corell::format::Srgba8::get_format()).unwrap();
let sampler = factory.create_sampler(i::SamplerInfo::new(
i::FilterMethod::Bilinear,
i::WrapMode::Clamp,
));
factory.update_descriptor_sets(&[
DescriptorSetWrite {
set: &set0[0],
binding: 0,
array_offset: 0,
write: DescriptorWrite::SampledImage(vec![(&image_srv, memory::ImageLayout::Undefined)]),
},
DescriptorSetWrite {
set: &set1[0],
binding: 0,
array_offset: 0,
write: DescriptorWrite::Sampler(vec![&sampler]),
},
]);
// Rendering setup
let viewport = target::Rect {
x: 0, y: 0,
w: pixel_width, h: pixel_height,
};
let scissor = target::Rect {
x: 0, y: 0,
w: pixel_width, h: pixel_height,
};
let mut frame_semaphore = factory.create_semaphore();
let mut frame_fence = factory.create_fence(false); // TODO: remove
let mut graphics_pool = back::GraphicsCommandPool::from_queue(&mut general_queues[0], 16);
// copy buffer to texture
{
let submit = {
let mut cmd_buffer = graphics_pool.acquire_command_buffer();
let image_barrier = ImageBarrier {
state_src: ImageStateSrc::State(ImageAccess::empty(), ImageLayout::Undefined),
state_dst: ImageStateDst::State(memory::TRANSFER_WRITE, ImageLayout::TransferDstOptimal),
image: &image_logo,
};
cmd_buffer.pipeline_barrier(&[], &[], &[image_barrier]);
cmd_buffer.copy_buffer_to_image(
&image_upload_buffer,
&image_logo,
memory::ImageLayout::TransferDstOptimal,
&[command::BufferImageCopy {
buffer_offset: 0,
buffer_row_pitch: row_pitch,
buffer_slice_pitch: row_pitch * (height as u32),
image_mip_level: 0,
image_base_layer: 0,
image_layers: 1,
image_offset: command::Offset { x: 0, y: 0, z: 0 },
}]);
let image_barrier = ImageBarrier {
state_src: ImageStateSrc::State(memory::TRANSFER_WRITE, ImageLayout::TransferDstOptimal),
state_dst: ImageStateDst::State(memory::SHADER_READ, ImageLayout::ShaderReadOnlyOptimal),
image: &image_logo,
};
cmd_buffer.pipeline_barrier(&[], &[], &[image_barrier]);
cmd_buffer.finish()
};
general_queues[0].submit_graphics(
&[
QueueSubmit {
cmd_buffers: &[submit],
wait_semaphores: &[],
signal_semaphores: &[],
}
],
Some(&mut frame_fence),
);
factory.wait_for_fences(&[&frame_fence], WaitFor::All, !0);
}
//
let mut running = true;
while running {
events_loop.poll_events(|event| {
if let winit::Event::WindowEvent { event, .. } = event {
match event {
winit::WindowEvent::KeyboardInput {
input: winit::KeyboardInput {
virtual_keycode: Some(winit::VirtualKeyCode::Escape),
.. },
..
} | winit::WindowEvent::Closed => running = false,
_ => (),
}
}
});
factory.reset_fences(&[&frame_fence]);
graphics_pool.reset();
let frame = swap_chain.acquire_frame(FrameSync::Semaphore(&mut frame_semaphore));
// Rendering
let submit = {
let mut cmd_buffer = graphics_pool.acquire_command_buffer();
let rtv = &swap_chain.get_images()[frame.id()];
let rtv_target_barrier = ImageBarrier {
state_src: ImageStateSrc::State(ImageAccess::empty(), ImageLayout::Undefined),
state_dst: ImageStateDst::State(memory::COLOR_ATTACHMENT_WRITE, ImageLayout::ColorAttachmentOptimal),
image: rtv,
};
cmd_buffer.pipeline_barrier(&[], &[], &[rtv_target_barrier]);
cmd_buffer.set_viewports(&[viewport]);
cmd_buffer.set_scissors(&[scissor]);
cmd_buffer.bind_graphics_pipeline(&pipelines[0].as_ref().unwrap());
if let Some(ref ibuf) = index_buffer {
cmd_buffer.bind_index_buffer(buffer::IndexBufferView {
buffer: ibuf,
offset: 0,
index_type: match index_stride {
2 => IndexType::U16,
4 => IndexType::U32,
_ => unreachable!()
},
});
};
cmd_buffer.bind_vertex_buffers(pso::VertexBufferSet(vec![(&vertex_buffer, 0)]));
cmd_buffer.bind_descriptor_heaps(Some(&heap_srv), Some(&heap_sampler));
cmd_buffer.bind_graphics_descriptor_sets(&pipeline_layout, 0, &[&set0[0], &set1[0]]);
{
let mut encoder = cmd_buffer.begin_render_pass_inline(
&render_pass,
&framebuffers[frame.id()],
target::Rect { x: 0, y: 0, w: pixel_width, h: pixel_height },
&[command::ClearValue::Color(command::ClearColor::Float([0.8, 0.8, 0.8, 1.0]))]);
if USE_INDEX_BUFFER {
encoder.draw_indexed(0, indices.len() as _, 0, None);
} else {
encoder.draw(0, 6, None);
}
}
let rtv_present_barrier = ImageBarrier {
state_src: ImageStateSrc::State(memory::COLOR_ATTACHMENT_WRITE, ImageLayout::ColorAttachmentOptimal),
state_dst: ImageStateDst::Present,
image: rtv,
};
cmd_buffer.pipeline_barrier(&[], &[], &[rtv_present_barrier]);
cmd_buffer.finish()
};
general_queues[0].submit_graphics(
&[
QueueSubmit {
cmd_buffers: &[submit],
wait_semaphores: &[(&mut frame_semaphore, pso::BOTTOM_OF_PIPE)],
signal_semaphores: &[],
}
],
Some(&mut frame_fence),
);
// TODO: replace with semaphore
factory.wait_for_fences(&[&frame_fence], WaitFor::All, !0);
// present frame
swap_chain.present();
}
// cleanup!
factory.destroy_descriptor_heap(heap_srv);
factory.destroy_descriptor_heap(heap_sampler);
factory.destroy_descriptor_set_pool(srv_pool);
factory.destroy_descriptor_set_pool(sampler_pool);
factory.destroy_descriptor_set_layout(set0_layout);
factory.destroy_descriptor_set_layout(set1_layout);
factory.destroy_shader_lib(shader_lib);
factory.destroy_pipeline_layout(pipeline_layout);
factory.destroy_renderpass(render_pass);
factory.destroy_heap(heap);
factory.destroy_heap(image_heap);
factory.destroy_heap(image_upload_heap);
factory.destroy_buffer(vertex_buffer);
factory.destroy_buffer(image_upload_buffer);
factory.destroy_image(image_logo);
factory.destroy_shader_resource_view(image_srv);
factory.destroy_sampler(sampler);
factory.destroy_fence(frame_fence);
factory.destroy_semaphore(frame_semaphore);
for pipeline in pipelines {
if let Ok(pipeline) = pipeline {
factory.destroy_graphics_pipeline(pipeline);
}
}
for framebuffer in framebuffers {
factory.destroy_framebuffer(framebuffer);
}
for rtv in frame_rtvs {
factory.destroy_render_target_view(rtv);
}
}
#[cfg(not(any(feature = "vulkan", feature = "dx12", feature = "metal")))]
fn main() {
println!("You need to enable the native API feature (vulkan/metal) in order to test the LL");
}
| 38.984762 | 142 | 0.605023 |
c1c85031ba6ab33b8d07643fa02713c2793b96dc | 3,297 | use proc_macro2::{Span, TokenStream};
use quote::ToTokens;
use std::collections::HashSet;
use std::iter;
use syn::{Data, DeriveInput, Field, Fields, Ident};
use utils::{
add_where_clauses_for_new_ident, field_idents, get_field_types_iter, named_to_vec,
number_idents, unnamed_to_vec,
};
pub fn expand(input: &DeriveInput, trait_name: &str) -> TokenStream {
let trait_ident = Ident::new(trait_name, Span::call_site());
let trait_path = "e!(::std::ops::#trait_ident);
let method_name = trait_name.to_lowercase();
let method_ident = &Ident::new(&method_name, Span::call_site());
let input_type = &input.ident;
let (block, fields) = match input.data {
Data::Struct(ref data_struct) => match data_struct.fields {
Fields::Unnamed(ref fields) => {
let field_vec = unnamed_to_vec(fields);
(
tuple_content(input_type, &field_vec, method_ident),
field_vec,
)
}
Fields::Named(ref fields) => {
let field_vec = named_to_vec(fields);
(
struct_content(input_type, &field_vec, method_ident),
field_vec,
)
}
_ => panic!(format!("Unit structs cannot use derive({})", trait_name)),
},
_ => panic!(format!("Only structs can use derive({})", trait_name)),
};
let scalar_ident = &Ident::new("__RhsT", Span::call_site());
let tys: &HashSet<_> = &get_field_types_iter(&fields).collect();
let tys2 = tys;
let scalar_iter = iter::repeat(scalar_ident);
let trait_path_iter = iter::repeat(trait_path);
let type_where_clauses = quote!{
where #(#tys: #trait_path_iter<#scalar_iter, Output=#tys2>),*
};
let new_generics =
add_where_clauses_for_new_ident(&input.generics, &fields, scalar_ident, type_where_clauses);
let (impl_generics, _, where_clause) = new_generics.split_for_impl();
let (_, ty_generics, _) = input.generics.split_for_impl();
quote!(
impl#impl_generics #trait_path<#scalar_ident> for #input_type#ty_generics #where_clause {
type Output = #input_type#ty_generics;
#[inline]
fn #method_ident(self, rhs: #scalar_ident) -> #input_type#ty_generics {
#block
}
}
)
}
fn tuple_content<'a, T: ToTokens>(
input_type: &T,
fields: &[&'a Field],
method_ident: &Ident,
) -> TokenStream {
let exprs = tuple_exprs(fields, method_ident);
quote!(#input_type(#(#exprs),*))
}
pub fn tuple_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
number_idents(fields.len())
.iter()
.map(|i| quote!(self.#i.#method_ident(rhs)))
.collect()
}
fn struct_content<'a, T: ToTokens>(
input_type: &T,
fields: &[&'a Field],
method_ident: &Ident,
) -> TokenStream {
let exprs = struct_exprs(fields, method_ident);
let field_names = field_idents(fields);
quote!(#input_type{#(#field_names: #exprs),*})
}
pub fn struct_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
field_idents(fields)
.iter()
.map(|f| quote!(self.#f.#method_ident(rhs)))
.collect()
}
| 33.642857 | 100 | 0.606915 |
897b95c6a52523cae5df8efa1f5d7767cfbc7896 | 347 | use anyhow::Result;
use aoc_2015_day_08::*;
#[test]
fn part_one_answer() -> Result<()> {
let input = include_str!("../input/input.txt");
assert_eq!(part_one(input)?, 1342);
Ok(())
}
#[test]
fn part_two_answer() -> Result<()> {
let input = include_str!("../input/input.txt");
assert_eq!(part_two(input)?, 2074);
Ok(())
}
| 19.277778 | 51 | 0.596542 |
5bf98f1f87f2e0a593e1e20803bcc53bee45e665 | 1,841 | use std::{error,
fmt,
fs::{self, File},
io,
io::{prelude::*, BufWriter, Write}};
use robin::prelude::*;
use robin::redis_queue::*;
pub fn setup() {
fs::create_dir("tests/tmp").ok();
}
pub fn teardown() {}
pub fn test_config() -> Config {
Config::default()
}
pub fn test_redis_init() -> RedisConfig {
let mut config = RedisConfig::default();
config.namespace = uuid();
config.timeout = 1;
config
}
pub fn uuid() -> String {
use uuid::Uuid;
Uuid::new_v4().hyphenated().to_string()
}
#[derive(Debug)]
pub struct TestError(pub &'static str);
impl TestError {
pub fn into_job_result(self) -> JobResult {
Err(Box::new(TestError(self.0)))
}
}
impl fmt::Display for TestError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl error::Error for TestError {
fn description(&self) -> &str {
self.0
}
}
pub fn write_tmp_test_file<S: ToString>(file: S, data: S) {
let file = file.to_string();
let file = format!("tests/tmp/{}", file);
let data = data.to_string();
let f = File::create(&file).expect(format!("Couldn't create file {}", &file).as_ref());
let mut f = BufWriter::new(f);
f.write_all(data.as_bytes())
.expect(format!("Couldn't write to {}", &file,).as_ref());
}
pub fn read_tmp_test_file<S: ToString>(file: S) -> Result<String, io::Error> {
let file = file.to_string();
let file = format!("tests/tmp/{}", file);
let mut f = File::open(&file)?;
let mut contents = String::new();
f.read_to_string(&mut contents)?;
Ok(contents)
}
#[allow(dead_code)]
pub fn delete_tmp_test_file<S: ToString>(file: S) {
let file = file.to_string();
let file = format!("tests/tmp/{}", file);
fs::remove_file(&file).ok();
}
| 23.0125 | 91 | 0.590983 |
647dcc27b89de164130dbc6f07df03f5901f396c | 2,232 | //! This crate provides ways of identifying an actor within the git repository both in shared/mutable and mutable variants.
//!
//! ## Feature Flags
#![cfg_attr(
feature = "document-features",
cfg_attr(doc, doc = ::document_features::document_features!())
)]
#![forbid(unsafe_code)]
#![deny(rust_2018_idioms, missing_docs)]
use bstr::{BStr, BString};
///
pub mod signature;
mod time;
const SPACE: &[u8; 1] = b" ";
/// A mutable signature is created by an actor at a certain time.
///
/// Note that this is not a cryptographical signature.
#[derive(Default, PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Signature {
/// The actors name.
pub name: BString,
/// The actor's email.
pub email: BString,
/// The time stamp at which the signature is performed.
pub time: Time,
}
/// A immutable signature is created by an actor at a certain time.
///
/// Note that this is not a cryptographical signature.
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy, Default)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct SignatureRef<'a> {
/// The actor's name.
#[cfg_attr(feature = "serde1", serde(borrow))]
pub name: &'a BStr,
/// The actor's email.
pub email: &'a BStr,
/// The time stamp at which the signature was performed.
pub time: Time,
}
/// Indicates if a number is positive or negative for use in [`Time`].
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
#[allow(missing_docs)]
pub enum Sign {
Plus,
Minus,
}
/// A timestamp with timezone.
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Time {
/// time in seconds since epoch.
pub seconds_since_unix_epoch: u32,
/// time offset in seconds, may be negative to match the `sign` field.
pub offset_in_seconds: i32,
/// the sign of `offset`, used to encode `-0000` which would otherwise loose sign information.
pub sign: Sign,
}
| 33.313433 | 123 | 0.683244 |
d7acf3b85f3d504f8a5718a1e7abbb29095f8455 | 4,875 | use super::obj::*;
use super::sprite::{gui__col_tex_ps, gui__pos_col_tex_vs, sampler};
use crate::uses::{math::*, *};
use GL::{atlas::VTex2d, shader::*, tex::*, VaoBinding};
pub struct Sprite9<'r, S> {
pub pos: Vec2,
pub size: Vec2,
pub corner: f32,
pub color: Color,
pub tex: &'r VTex2d<S, u8>,
}
impl<S: TexSize> Sprite9<'_, S> {
#[inline(always)]
pub fn compare(&self, crop: &Crop, r: &Sprite9Impl<S>) -> State {
let &Self { pos, size, corner, color, tex } = self;
let xyzw = (State::XYZW | State::UV).or_def(geom_cmp(pos, size, crop, &r.base) || corner != r.corner);
let rgba = State::RGBA.or_def(color != r.base.color);
let _tex = State::UV.or_def(!ptr::eq(tex, r.tex));
let ord = State::MISMATCH.or_def((!_tex.is_empty() && atlas_cmp(tex, r.tex)) || (!rgba.is_empty() && ordering_cmp::<S, _>(color, r)));
ord | xyzw | rgba | _tex
}
pub fn obj(self, crop: Crop) -> Sprite9Impl<S> {
let Self { pos, size, corner, color, tex } = self;
Sprite9Impl {
base: Base { pos, size, crop, color },
corner,
tex,
}
}
}
pub struct Sprite9Impl<S> {
base: Base,
corner: f32,
tex: *const VTex2d<S, u8>,
}
impl<S: TexSize> Sprite9Impl<S> {
pub fn batchable(&self, r: &Self) -> bool {
self.ordered() == r.ordered() && atlas_cmp(self.tex, r.tex)
}
}
impl<S: TexSize> Object for Sprite9Impl<S> {
fn base(&self) -> &Base {
&self.base
}
fn write_mesh(&self, aspect: Vec2, range: BatchRange) {
let (crop, &Base { pos, size, color, .. }, (u1, v1, u2, v2)) = (self.base.bound_box(), self.base(), unsafe { &*self.tex }.region);
let c = size.x().min(size.y()) * self.corner.min(0.5).max(0.);
write_sprite9((aspect, pos, size, (c, c), crop, (u1, v2, u2, v1), color), range);
}
fn batch_draw(&self, b: &VaoBinding<u16>, (offset, num): (u16, u16)) {
let s = UnsafeOnce!(Shader, { Shader::pure((gui__pos_col_tex_vs, gui__col_tex_ps)) });
let t = unsafe { &*self.tex }.tex.Bind(sampler());
let _ = Uniforms!(s, ("src", &t));
b.Draw((num, offset, gl::TRIANGLES));
}
fn vert_count(&self) -> u32 {
16
}
fn ordered(&self) -> bool {
S::TYPE == gl::RGBA || Object::ordered(self)
}
fn gen_idxs(&self, (start, size): (u16, u16)) -> Vec<u16> {
sprite9_idxs((start, size))
}
}
type Sprite9Desc = (Vec2, Vec2, Vec2, Vec2, Crop, TexCoord, Color);
pub fn write_sprite9((aspect, pos, size, corner, (crop1, crop2), (u1, v1, u2, v2), color): Sprite9Desc, (z, state, xyzw, rgba, uv): BatchRange) {
if state.contains(State::XYZW) {
let (((x1, y1), (x2, y2), (m1x, m1y), (m2x, m2y)), (u1, v1, u2, v2), (m1u, m1v, m2u, m2v)) = <_>::to({
let (xy1, xy2) = (pos, pos.sum(size));
let (m1, m2, ms) = (xy1.sum(corner), xy2.sub(corner), corner);
let (uv, muv) = {
let wh = (u2 - u1, v2 - v1).div(ms);
let (u1m, v1m) = (u1, v1).sum(wh.mul(m1.sub(crop2)).mul(crop2.ls(m1)));
let (u2m, v2m) = (u1, v1).sum(wh.mul(crop1.sub(m2)).mul(crop1.gt(m2)));
let (u1, v1) = (u2, v2).sub(wh.mul(crop1.sub(xy1)));
let (u2, v2) = (u2, v2).sub(wh.mul(xy2.sub(crop2)));
((u1, v2, u2, v1), (u1m, v2m, u2m, v1m))
};
(
(crop1.mul(aspect), crop2.mul(aspect), m1.clmp(crop1, crop2).mul(aspect), m2.clmp(crop1, crop2).mul(aspect)),
uv,
muv,
)
});
const O: f16 = f16::ZERO;
if state.contains(State::XYZW) {
#[rustfmt::skip]
xyzw[..64].copy_from_slice(&[x1, y1, z, O, m1x, y1, z, O, m2x, y1, z, O, x2, y1, z, O,
x1, m1y, z, O, m1x, m1y, z, O, m2x, m1y, z, O, x2, m1y, z, O,
x1, m2y, z, O, m1x, m2y, z, O, m2x, m2y, z, O, x2, m2y, z, O,
x1, y2, z, O, m1x, y2, z, O, m2x, y2, z, O, x2, y2, z, O]);
}
if state.contains(State::UV) {
#[rustfmt::skip]
uv[..32].copy_from_slice(&[u1, v2, m1u, v2, m2u, v2, u2, v2,
u1, m2v, m1u, m2v, m2u, m2v, u2, m2v,
u1, m1v, m1u, m1v, m2u, m1v, u2, m1v,
u1, v1, m1u, v1, m2u, v1, u2, v1]);
}
}
if state.contains(State::RGBA) {
let (r, g, b, a) = vec4::to(color.mul(255).clmp(0, 255).round());
#[rustfmt::skip]
rgba[..64].copy_from_slice(&[r, g, b, a, r, g, b, a, r, g, b, a, r, g, b, a,
r, g, b, a, r, g, b, a, r, g, b, a, r, g, b, a,
r, g, b, a, r, g, b, a, r, g, b, a, r, g, b, a,
r, g, b, a, r, g, b, a, r, g, b, a, r, g, b, a]);
}
}
pub fn sprite9_idxs((start, size): (u16, u16)) -> Vec<u16> {
(start..(start + size))
.step_by(16)
.flat_map(|i| {
let s = |j| i + j;
#[rustfmt::skip] let s =
[s(0), s(1), s(4), s(4), s(1), s(5), s(5), s(1), s(2), s(2), s(5), s(6), s(6), s(2), s(3), s(3), s(6), s(7),
s(7), s(6), s(11), s(11), s(6), s(10), s(10), s(6), s(5), s(5), s(10), s(9), s(9), s(5), s(4), s(4), s(9), s(8),
s(8), s(9), s(12), s(12), s(9), s(13), s(13), s(9), s(10), s(10), s(13), s(14), s(14), s(10), s(11), s(11), s(14), s(15)];
s
})
.collect()
}
| 36.931818 | 145 | 0.534154 |
761ae3433e10b1f43d7cf501fbf32795da0139eb | 1,655 | use rand::XorShiftRng;
use vec::{random_in_unit_disc, Ray, Vec3};
use std::f32::consts::PI;
#[derive(Clone, Copy, Debug)]
pub struct Camera {
origin: Vec3,
u: Vec3, // unit vector in direction of x coordinates
v: Vec3, // unit vector in dirction of y coordinates
w: Vec3, // unit vector from the target toward the camera
lower_left_corner: Vec3,
horizontal: Vec3,
vertical: Vec3,
lens_radius: f32,
}
impl Camera {
pub fn new(
lookfrom: Vec3,
lookat: Vec3,
vup: Vec3,
vfov_degrees: f32,
aspect: f32,
aperture: f32,
focus_distance: f32,
) -> Camera {
let theta = vfov_degrees * PI / 180.0;
let half_height = (theta / 2.0).tan();
let half_width = aspect * half_height;
let w = (lookfrom - lookat).to_unit_vector();
let u = vup.cross(w).to_unit_vector();
let v = w.cross(u);
Camera {
origin: lookfrom,
u: u,
v: v,
w: w,
lower_left_corner: lookfrom - focus_distance * (half_width * u + half_height * v + w),
horizontal: focus_distance * 2.0 * half_width * u,
vertical: focus_distance * 2.0 * half_height * v,
lens_radius: aperture / 2.0,
}
}
pub fn get_ray(&self, u: f32, v: f32, rng: &mut XorShiftRng) -> Ray {
let Vec3(du, dv, _) = self.lens_radius * random_in_unit_disc(rng);
let origin = self.origin + du * self.u + dv * self.v;
Ray::new(
origin,
self.lower_left_corner + u * self.horizontal + v * self.vertical - origin,
)
}
}
| 30.090909 | 98 | 0.559517 |
5bdb3c84955330673f74e2cb4c9d898c771b6d45 | 3,960 | /*
Copyright 2020 Adobe
All Rights Reserved.
NOTICE: Adobe permits you to use, modify, and distribute this file in
accordance with the terms of the Adobe license agreement accompanying
it.
*/
use super::{ctrl_c_handler, serve_req};
use crate::cache::Cache;
use crate::settings::Settings;
use async_stream::stream;
use core::task::{Context, Poll};
use eyre::{Report, Result, WrapErr};
use futures_util::stream::{Stream, StreamExt};
use hyper::server::Server;
use hyper::service::{make_service_fn, service_fn};
use log::{error, info};
use std::fs::File;
use std::io::Read;
use std::pin::Pin;
use std::sync::Arc;
use tokio::io;
use tokio::net::{TcpListener, TcpStream};
use tokio_native_tls::{native_tls, TlsAcceptor, TlsStream};
pub async fn run_server(conf: &Settings, cache: Arc<Cache>) -> Result<()> {
let acceptor = {
let path = &conf.ssl.cert_path;
let password = &conf.ssl.cert_password;
let mut file =
File::open(path).wrap_err(format!("Can't open SSL certificate file: {}", path))?;
let mut identity = vec![];
file.read_to_end(&mut identity)
.wrap_err(format!("Can't read SSL cert data from file: {}", path))?;
let identity = native_tls::Identity::from_pkcs12(&identity, password)
.wrap_err("Can't decrypt SSL cert data - incorrect password?")?;
let sync_acceptor = native_tls::TlsAcceptor::new(identity)
.wrap_err("Can't create TLS socket listener - is the port free?")?;
let async_acceptor: TlsAcceptor = sync_acceptor.into();
async_acceptor
};
let full_host = format!("{}:{}", conf.proxy.host, conf.proxy.ssl_port);
let tcp = TcpListener::bind(&full_host).await?;
let incoming_tls_stream = incoming(tcp, acceptor).boxed();
let hyper_acceptor = HyperAcceptor {
acceptor: incoming_tls_stream,
};
let service = make_service_fn(move |_| {
let conf = conf.clone();
let cache = Arc::clone(&cache);
async move {
Ok::<_, Report>(service_fn(move |_req| {
let conf = conf.clone();
let cache = Arc::clone(&cache);
async move { serve_req(_req, conf, cache).await }
}))
}
});
let server = Server::builder(hyper_acceptor).serve(service);
let (tx, rx) = tokio::sync::oneshot::channel::<()>();
ctrl_c_handler(move || tx.send(()).unwrap_or(()));
let graceful = server.with_graceful_shutdown(async {
rx.await.ok();
});
// Run the server, keep going until an error occurs.
info!("Starting to serve on https://{}", full_host);
graceful.await.wrap_err("Unexpected server shutdown")?;
Ok(())
}
fn incoming(
listener: TcpListener,
acceptor: TlsAcceptor,
) -> impl Stream<Item = TlsStream<TcpStream>> {
stream! {
loop {
// just swallow errors and wait again if necessary
match listener.accept().await {
Ok((stream, _)) => {
match acceptor.accept(stream).await {
Ok(x) => { yield x; }
Err(e) => { error!("SSL Failure with client: {}", e); }
}
}
Err(e) => { error!("Connection failure with client: {}", e); }
}
};
}
}
struct HyperAcceptor<'a> {
acceptor: Pin<Box<dyn Stream<Item = TlsStream<TcpStream>> + 'a>>,
}
impl hyper::server::accept::Accept for HyperAcceptor<'_> {
type Conn = TlsStream<TcpStream>;
type Error = io::Error;
fn poll_accept(
mut self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
let result = Pin::new(&mut self.acceptor).poll_next(cx);
match result {
Poll::Ready(Some(stream)) => Poll::Ready(Some(Ok(stream))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
| 34.736842 | 93 | 0.594697 |
29c2dbe950fab77b747da51f01de4e88608aec7a | 4,352 | //! Group membership API.
use std::ffi::CStr;
use std::fmt;
use std::slice;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::util::{KafkaDrop, NativePtr};
/// Group member information container.
pub struct GroupMemberInfo(RDKafkaGroupMemberInfo);
impl GroupMemberInfo {
/// Returns the ID of the member.
pub fn id(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.member_id)
.to_str()
.expect("Member id is not a valid UTF-8 string")
}
}
/// Returns the client ID of the member.
pub fn client_id(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.client_id)
.to_str()
.expect("Client id is not a valid UTF-8 string")
}
}
/// Return the client host of the member.
pub fn client_host(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.client_host)
.to_str()
.expect("Member host is not a valid UTF-8 string")
}
}
/// Return the metadata of the member.
pub fn metadata(&self) -> Option<&[u8]> {
unsafe {
if self.0.member_metadata.is_null() {
None
} else {
Some(slice::from_raw_parts::<u8>(
self.0.member_metadata as *const u8,
self.0.member_metadata_size as usize,
))
}
}
}
/// Return the partition assignment of the member.
pub fn assignment(&self) -> Option<&[u8]> {
unsafe {
if self.0.member_assignment.is_null() {
None
} else {
Some(slice::from_raw_parts::<u8>(
self.0.member_assignment as *const u8,
self.0.member_assignment_size as usize,
))
}
}
}
}
/// Group information container.
pub struct GroupInfo(RDKafkaGroupInfo);
impl GroupInfo {
/// Returns the name of the group.
pub fn name(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.group)
.to_str()
.expect("Group name is not a valid UTF-8 string")
}
}
/// Returns the members of the group.
pub fn members(&self) -> &[GroupMemberInfo] {
unsafe {
slice::from_raw_parts(
self.0.members as *const GroupMemberInfo,
self.0.member_cnt as usize,
)
}
}
/// Returns the state of the group.
pub fn state(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.state)
.to_str()
.expect("State is not a valid UTF-8 string")
}
}
/// Returns the protocol of the group.
pub fn protocol(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.protocol)
.to_str()
.expect("Protocol name is not a valid UTF-8 string")
}
}
/// Returns the protocol type of the group.
pub fn protocol_type(&self) -> &str {
unsafe {
CStr::from_ptr(self.0.protocol_type)
.to_str()
.expect("Protocol type is not a valid UTF-8 string")
}
}
}
impl fmt::Debug for GroupInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.name())
}
}
/// List of groups.
///
/// This structure wraps the pointer returned by rdkafka-sys, and deallocates
/// all the native resources when dropped.
pub struct GroupList(NativePtr<RDKafkaGroupList>);
unsafe impl KafkaDrop for RDKafkaGroupList {
const TYPE: &'static str = "group";
const DROP: unsafe extern "C" fn(*mut Self) = drop_group_list;
}
unsafe extern "C" fn drop_group_list(ptr: *mut RDKafkaGroupList) {
rdsys::rd_kafka_group_list_destroy(ptr as *const _)
}
impl GroupList {
/// Creates a new group list given a pointer to the native rdkafka-sys group list structure.
pub(crate) unsafe fn from_ptr(ptr: *const RDKafkaGroupList) -> GroupList {
GroupList(NativePtr::from_ptr(ptr as *mut _).unwrap())
}
/// Returns all the groups in the list.
pub fn groups(&self) -> &[GroupInfo] {
unsafe {
slice::from_raw_parts(self.0.groups as *const GroupInfo, self.0.group_cnt as usize)
}
}
}
| 27.719745 | 96 | 0.548024 |
b963abe4a5201bb2779cf791ea43a078a7b74411 | 3,789 | mod models;
pub mod persistence;
use std::collections::hash_map::Entry;
use chrono::{Duration, Utc};
use itertools::Itertools;
use redis::AsyncCommands;
use tokio::time::interval;
use tracing::{info, instrument};
use crate::aggregator::models::{Analytics, BattleCount, DurationWrapper};
use crate::aggregator::persistence::{store_analytics, UPDATED_AT_KEY};
use crate::battle_stream::entry::DenormalizedStreamEntry;
use crate::battle_stream::stream::Stream;
use crate::math::statistics::{ConfidenceInterval, Z};
use crate::opts::AggregateOpts;
use crate::AHashMap;
#[tracing::instrument(skip_all)]
pub async fn run(opts: AggregateOpts) -> crate::Result {
sentry::configure_scope(|scope| scope.set_tag("app", "aggregator"));
let mut redis = ::redis::Client::open(opts.redis_uri.as_str())?
.get_multiplexed_async_connection()
.await?;
let mut stream = Stream::read(redis.clone(), *opts.time_spans.iter().max().unwrap()).await?;
let mut interval = interval(opts.interval);
info!("running…");
loop {
interval.tick().await;
stream.refresh().await?;
let analytics = calculate_analytics(&stream.entries, &opts.time_spans);
store_analytics(&mut redis, &analytics).await?;
redis.set(UPDATED_AT_KEY, Utc::now().timestamp()).await?;
}
}
#[instrument(level = "info", skip_all)]
fn calculate_analytics(sample: &[DenormalizedStreamEntry], time_spans: &[Duration]) -> Analytics {
let now = Utc::now();
let deadlines = time_spans
.iter()
.map(|time_span| (now - *time_span).timestamp())
.collect_vec();
let mut statistics = AHashMap::default();
for sample_entry in sample {
match statistics.entry(sample_entry.tank.tank_id) {
Entry::Vacant(entry) => {
let value = deadlines
.iter()
.map(|deadline| {
if sample_entry.tank.timestamp >= *deadline {
BattleCount {
n_battles: sample_entry.tank.n_battles,
n_wins: sample_entry.tank.n_wins,
}
} else {
BattleCount::default()
}
})
.collect_vec();
entry.insert(value);
}
Entry::Occupied(mut entry) => {
for (value, deadline) in entry.get_mut().iter_mut().zip(&deadlines) {
if sample_entry.tank.timestamp >= *deadline {
value.n_battles += sample_entry.tank.n_battles;
value.n_wins += sample_entry.tank.n_wins;
}
}
}
}
}
Analytics {
time_spans: time_spans
.iter()
.map(|time_span| DurationWrapper {
duration: *time_span,
})
.collect(),
win_rates: statistics
.into_iter()
.map(|(tank_id, counts)| {
(
tank_id,
counts
.into_iter()
.map(|count| {
if count.n_battles != 0 {
Some(ConfidenceInterval::wilson_score_interval(
count.n_battles,
count.n_wins,
Z::default(),
))
} else {
None
}
})
.collect(),
)
})
.collect(),
}
}
| 33.830357 | 98 | 0.492478 |
9c282f3dd78b6fc41a27f1285a54c94cdb225471 | 3,115 | use core::fmt::{Arguments, Debug};
use crate::{IntoWriteFn, NeverError, WriteBytes, WriteStr};
/// A writer that calls `write_str` once with a combined string.
///
/// Write function can return either `()` or `for<T, E> `[`Result`]`<T, E>`.
///
/// # Panics
///
/// Writer panics if the write function returns `Result::Err`.
///
/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct ConcatWriter<F1>(F1);
/// A helper trait used by [`ConcatWriter`]
/// to convert wrapped function result to [`Result`]`<T, NeverError>` with error unwrapping.
///
/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
pub trait ExpectConcatWriteResult {
/// The resulting type after convertion.
type Output;
/// Performs the conversion with error unwrapping.
fn expect_concat_write_result(self) -> Self::Output;
}
impl<F1> ConcatWriter<F1>
where
F1: WriteStr,
{
/// Creates a new `ConcatWriter` from an object that implements [`WriteStr`].
pub fn new(write: F1) -> Self {
Self(write)
}
/// Creates a new `ConcatWriter` with a [`WriteStr`] wrapper
/// deduced with [`IntoWriteFn`] by the closure signature and constructed from it.
pub fn from_closure<F, Ts>(closure: F) -> Self
where
F: IntoWriteFn<Ts, WriteFn = F1>,
{
Self(closure.into_write_fn())
}
}
impl<F1> ConcatWriter<F1>
where
Self: WriteStr,
{
/// Writes a formatted string into this writer.
///
/// This method is primarily used to interface with the [`format_args!`] macro,
/// but it is rare that this should explicitly be called.
/// The [`write!`] macro should be favored to invoke this method instead.
///
/// [`write!`]: https://doc.rust-lang.org/std/macro.write.html
/// [`format_args!`]: https://doc.rust-lang.org/std/macro.format_args.html
pub fn write_fmt(&mut self, args: Arguments<'_>) -> <Self as WriteStr>::Output {
if let Some(buf) = args.as_str() {
self.write_str(buf)
} else {
let buf = alloc::fmt::format(args);
self.write_str(&buf)
}
}
}
impl<F1, Output> WriteStr for ConcatWriter<F1>
where
F1: WriteStr,
F1::Output: ExpectConcatWriteResult<Output = Output>,
{
type Output = Output;
fn write_str(&mut self, buf: &str) -> Output {
self.0.write_str(buf).expect_concat_write_result()
}
}
impl<F1, Output> WriteBytes for ConcatWriter<F1>
where
F1: WriteBytes,
F1::Output: ExpectConcatWriteResult<Output = Output>,
{
type Output = Output;
fn write_bytes(&mut self, buf: &[u8]) -> Output {
self.0.write_bytes(buf).expect_concat_write_result()
}
}
impl ExpectConcatWriteResult for () {
type Output = Result<(), NeverError>;
fn expect_concat_write_result(self) -> Self::Output {
Ok(())
}
}
impl<T, E: Debug> ExpectConcatWriteResult for Result<T, E> {
type Output = Result<T, NeverError>;
fn expect_concat_write_result(self) -> Self::Output {
Ok(self.expect("failed writing"))
}
}
| 28.842593 | 92 | 0.642376 |
112de6846608446dca9fccf999e6b41a3bdcaefd | 83,202 | pub mod on_unimplemented;
pub mod suggestions;
use super::{
ConstEvalFailure, EvaluationResult, FulfillmentError, FulfillmentErrorCode,
MismatchedProjectionTypes, Obligation, ObligationCause, ObligationCauseCode,
OnUnimplementedDirective, OnUnimplementedNote, OutputTypeParameterMismatch, Overflow,
PredicateObligation, SelectionContext, SelectionError, TraitNotObjectSafe,
};
use crate::infer::error_reporting::{TyCategory, TypeAnnotationNeeded as ErrorCode};
use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use crate::infer::{self, InferCtxt, TyCtxtInferExt};
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorReported};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::intravisit::Visitor;
use rustc_hir::Node;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::error::ExpectedFound;
use rustc_middle::ty::fold::TypeFolder;
use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::{
self, fast_reject, AdtKind, SubtypePredicate, ToPolyTraitRef, ToPredicate, Ty, TyCtxt,
TypeFoldable, WithConstness,
};
use rustc_session::DiagnosticMessageId;
use rustc_span::symbol::{kw, sym};
use rustc_span::{ExpnKind, MultiSpan, Span, DUMMY_SP};
use std::fmt;
use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
use crate::traits::query::normalize::AtExt as _;
use on_unimplemented::InferCtxtExt as _;
use suggestions::InferCtxtExt as _;
pub use rustc_infer::traits::error_reporting::*;
pub trait InferCtxtExt<'tcx> {
fn report_fulfillment_errors(
&self,
errors: &[FulfillmentError<'tcx>],
body_id: Option<hir::BodyId>,
fallback_has_occurred: bool,
);
fn report_overflow_error<T>(
&self,
obligation: &Obligation<'tcx, T>,
suggest_increasing_limit: bool,
) -> !
where
T: fmt::Display + TypeFoldable<'tcx>;
fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> !;
fn report_selection_error(
&self,
obligation: &PredicateObligation<'tcx>,
error: &SelectionError<'tcx>,
fallback_has_occurred: bool,
points_at_arg: bool,
);
/// Given some node representing a fn-like thing in the HIR map,
/// returns a span and `ArgKind` information that describes the
/// arguments it expects. This can be supplied to
/// `report_arg_count_mismatch`.
fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)>;
/// Reports an error when the number of arguments needed by a
/// trait match doesn't match the number that the expression
/// provides.
fn report_arg_count_mismatch(
&self,
span: Span,
found_span: Option<Span>,
expected_args: Vec<ArgKind>,
found_args: Vec<ArgKind>,
is_closure: bool,
) -> DiagnosticBuilder<'tcx>;
}
impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
fn report_fulfillment_errors(
&self,
errors: &[FulfillmentError<'tcx>],
body_id: Option<hir::BodyId>,
fallback_has_occurred: bool,
) {
#[derive(Debug)]
struct ErrorDescriptor<'tcx> {
predicate: ty::Predicate<'tcx>,
index: Option<usize>, // None if this is an old error
}
let mut error_map: FxHashMap<_, Vec<_>> = self
.reported_trait_errors
.borrow()
.iter()
.map(|(&span, predicates)| {
(
span,
predicates
.iter()
.map(|&predicate| ErrorDescriptor { predicate, index: None })
.collect(),
)
})
.collect();
for (index, error) in errors.iter().enumerate() {
// We want to ignore desugarings here: spans are equivalent even
// if one is the result of a desugaring and the other is not.
let mut span = error.obligation.cause.span;
let expn_data = span.ctxt().outer_expn_data();
if let ExpnKind::Desugaring(_) = expn_data.kind {
span = expn_data.call_site;
}
error_map.entry(span).or_default().push(ErrorDescriptor {
predicate: error.obligation.predicate,
index: Some(index),
});
self.reported_trait_errors
.borrow_mut()
.entry(span)
.or_default()
.push(error.obligation.predicate);
}
// We do this in 2 passes because we want to display errors in order, though
// maybe it *is* better to sort errors by span or something.
let mut is_suppressed = vec![false; errors.len()];
for (_, error_set) in error_map.iter() {
// We want to suppress "duplicate" errors with the same span.
for error in error_set {
if let Some(index) = error.index {
// Suppress errors that are either:
// 1) strictly implied by another error.
// 2) implied by an error with a smaller index.
for error2 in error_set {
if error2.index.map_or(false, |index2| is_suppressed[index2]) {
// Avoid errors being suppressed by already-suppressed
// errors, to prevent all errors from being suppressed
// at once.
continue;
}
if self.error_implies(error2.predicate, error.predicate)
&& !(error2.index >= error.index
&& self.error_implies(error.predicate, error2.predicate))
{
info!("skipping {:?} (implied by {:?})", error, error2);
is_suppressed[index] = true;
break;
}
}
}
}
}
for (error, suppressed) in errors.iter().zip(is_suppressed) {
if !suppressed {
self.report_fulfillment_error(error, body_id, fallback_has_occurred);
}
}
}
/// Reports that an overflow has occurred and halts compilation. We
/// halt compilation unconditionally because it is important that
/// overflows never be masked -- they basically represent computations
/// whose result could not be truly determined and thus we can't say
/// if the program type checks or not -- and they are unusual
/// occurrences in any case.
fn report_overflow_error<T>(
&self,
obligation: &Obligation<'tcx, T>,
suggest_increasing_limit: bool,
) -> !
where
T: fmt::Display + TypeFoldable<'tcx>,
{
let predicate = self.resolve_vars_if_possible(&obligation.predicate);
let mut err = struct_span_err!(
self.tcx.sess,
obligation.cause.span,
E0275,
"overflow evaluating the requirement `{}`",
predicate
);
if suggest_increasing_limit {
self.suggest_new_overflow_limit(&mut err);
}
self.note_obligation_cause_code(
&mut err,
&obligation.predicate,
&obligation.cause.code,
&mut vec![],
);
err.emit();
self.tcx.sess.abort_if_errors();
bug!();
}
/// Reports that a cycle was detected which led to overflow and halts
/// compilation. This is equivalent to `report_overflow_error` except
/// that we can give a more helpful error message (and, in particular,
/// we do not suggest increasing the overflow limit, which is not
/// going to help).
fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> ! {
let cycle = self.resolve_vars_if_possible(&cycle.to_owned());
assert!(!cycle.is_empty());
debug!("report_overflow_error_cycle: cycle={:?}", cycle);
self.report_overflow_error(&cycle[0], false);
}
fn report_selection_error(
&self,
obligation: &PredicateObligation<'tcx>,
error: &SelectionError<'tcx>,
fallback_has_occurred: bool,
points_at_arg: bool,
) {
let tcx = self.tcx;
let span = obligation.cause.span;
let mut err = match *error {
SelectionError::Unimplemented => {
if let ObligationCauseCode::CompareImplMethodObligation {
item_name,
impl_item_def_id,
trait_item_def_id,
}
| ObligationCauseCode::CompareImplTypeObligation {
item_name,
impl_item_def_id,
trait_item_def_id,
} = obligation.cause.code
{
self.report_extra_impl_obligation(
span,
item_name,
impl_item_def_id,
trait_item_def_id,
&format!("`{}`", obligation.predicate),
)
.emit();
return;
}
match obligation.predicate.kind() {
ty::PredicateKind::Trait(ref trait_predicate, _) => {
let trait_predicate = self.resolve_vars_if_possible(trait_predicate);
if self.tcx.sess.has_errors() && trait_predicate.references_error() {
return;
}
let trait_ref = trait_predicate.to_poly_trait_ref();
let (post_message, pre_message, type_def) = self
.get_parent_trait_ref(&obligation.cause.code)
.map(|(t, s)| {
(
format!(" in `{}`", t),
format!("within `{}`, ", t),
s.map(|s| (format!("within this `{}`", t), s)),
)
})
.unwrap_or_default();
let OnUnimplementedNote { message, label, note, enclosing_scope } =
self.on_unimplemented_note(trait_ref, obligation);
let have_alt_message = message.is_some() || label.is_some();
let is_try = self
.tcx
.sess
.source_map()
.span_to_snippet(span)
.map(|s| &s == "?")
.unwrap_or(false);
let is_from = self.tcx.get_diagnostic_item(sym::from_trait)
== Some(trait_ref.def_id());
let is_unsize =
{ Some(trait_ref.def_id()) == self.tcx.lang_items().unsize_trait() };
let (message, note) = if is_try && is_from {
(
Some(format!(
"`?` couldn't convert the error to `{}`",
trait_ref.skip_binder().self_ty(),
)),
Some(
"the question mark operation (`?`) implicitly performs a \
conversion on the error value using the `From` trait"
.to_owned(),
),
)
} else {
(message, note)
};
let mut err = struct_span_err!(
self.tcx.sess,
span,
E0277,
"{}",
message.unwrap_or_else(|| format!(
"the trait bound `{}` is not satisfied{}",
trait_ref.without_const().to_predicate(tcx),
post_message,
))
);
if is_try && is_from {
let none_error = self
.tcx
.get_diagnostic_item(sym::none_error)
.map(|def_id| tcx.type_of(def_id));
let should_convert_option_to_result =
Some(trait_ref.skip_binder().substs.type_at(1)) == none_error;
let should_convert_result_to_option =
Some(trait_ref.self_ty().skip_binder()) == none_error;
if should_convert_option_to_result {
err.span_suggestion_verbose(
span.shrink_to_lo(),
"consider converting the `Option<T>` into a `Result<T, _>` \
using `Option::ok_or` or `Option::ok_or_else`",
".ok_or_else(|| /* error value */)".to_string(),
Applicability::HasPlaceholders,
);
} else if should_convert_result_to_option {
err.span_suggestion_verbose(
span.shrink_to_lo(),
"consider converting the `Result<T, _>` into an `Option<T>` \
using `Result::ok`",
".ok()".to_string(),
Applicability::MachineApplicable,
);
}
if let Some(ret_span) = self.return_type_span(obligation) {
err.span_label(
ret_span,
&format!(
"expected `{}` because of this",
trait_ref.skip_binder().self_ty()
),
);
}
}
let explanation =
if obligation.cause.code == ObligationCauseCode::MainFunctionType {
"consider using `()`, or a `Result`".to_owned()
} else {
format!(
"{}the trait `{}` is not implemented for `{}`",
pre_message,
trait_ref.print_only_trait_path(),
trait_ref.skip_binder().self_ty(),
)
};
if self.suggest_add_reference_to_arg(
&obligation,
&mut err,
&trait_ref,
points_at_arg,
have_alt_message,
) {
self.note_obligation_cause(&mut err, obligation);
err.emit();
return;
}
if let Some(ref s) = label {
// If it has a custom `#[rustc_on_unimplemented]`
// error message, let's display it as the label!
err.span_label(span, s.as_str());
if !matches!(trait_ref.skip_binder().self_ty().kind, ty::Param(_)) {
// When the self type is a type param We don't need to "the trait
// `std::marker::Sized` is not implemented for `T`" as we will point
// at the type param with a label to suggest constraining it.
err.help(&explanation);
}
} else {
err.span_label(span, explanation);
}
if let Some((msg, span)) = type_def {
err.span_label(span, &msg);
}
if let Some(ref s) = note {
// If it has a custom `#[rustc_on_unimplemented]` note, let's display it
err.note(s.as_str());
}
if let Some(ref s) = enclosing_scope {
let enclosing_scope_span = tcx.def_span(
tcx.hir()
.opt_local_def_id(obligation.cause.body_id)
.unwrap_or_else(|| {
tcx.hir().body_owner_def_id(hir::BodyId {
hir_id: obligation.cause.body_id,
})
})
.to_def_id(),
);
err.span_label(enclosing_scope_span, s.as_str());
}
self.suggest_dereferences(&obligation, &mut err, &trait_ref, points_at_arg);
self.suggest_fn_call(&obligation, &mut err, &trait_ref, points_at_arg);
self.suggest_remove_reference(&obligation, &mut err, &trait_ref);
self.suggest_semicolon_removal(&obligation, &mut err, span, &trait_ref);
self.note_version_mismatch(&mut err, &trait_ref);
if Some(trait_ref.def_id()) == tcx.lang_items().try_trait() {
self.suggest_await_before_try(&mut err, &obligation, &trait_ref, span);
}
if self.suggest_impl_trait(&mut err, span, &obligation, &trait_ref) {
err.emit();
return;
}
if is_unsize {
// If the obligation failed due to a missing implementation of the
// `Unsize` trait, give a pointer to why that might be the case
err.note(
"all implementations of `Unsize` are provided \
automatically by the compiler, see \
<https://doc.rust-lang.org/stable/std/marker/trait.Unsize.html> \
for more information",
);
}
let is_fn_trait = [
self.tcx.lang_items().fn_trait(),
self.tcx.lang_items().fn_mut_trait(),
self.tcx.lang_items().fn_once_trait(),
]
.contains(&Some(trait_ref.def_id()));
let is_target_feature_fn =
if let ty::FnDef(def_id, _) = trait_ref.skip_binder().self_ty().kind {
!self.tcx.codegen_fn_attrs(def_id).target_features.is_empty()
} else {
false
};
if is_fn_trait && is_target_feature_fn {
err.note(
"`#[target_feature]` functions do not implement the `Fn` traits",
);
}
// Try to report a help message
if !trait_ref.has_infer_types_or_consts()
&& self.predicate_can_apply(obligation.param_env, trait_ref)
{
// If a where-clause may be useful, remind the
// user that they can add it.
//
// don't display an on-unimplemented note, as
// these notes will often be of the form
// "the type `T` can't be frobnicated"
// which is somewhat confusing.
self.suggest_restricting_param_bound(
&mut err,
trait_ref,
obligation.cause.body_id,
);
} else {
if !have_alt_message {
// Can't show anything else useful, try to find similar impls.
let impl_candidates = self.find_similar_impl_candidates(trait_ref);
self.report_similar_impl_candidates(impl_candidates, &mut err);
}
// Changing mutability doesn't make a difference to whether we have
// an `Unsize` impl (Fixes ICE in #71036)
if !is_unsize {
self.suggest_change_mut(
&obligation,
&mut err,
&trait_ref,
points_at_arg,
);
}
}
// If this error is due to `!: Trait` not implemented but `(): Trait` is
// implemented, and fallback has occurred, then it could be due to a
// variable that used to fallback to `()` now falling back to `!`. Issue a
// note informing about the change in behaviour.
if trait_predicate.skip_binder().self_ty().is_never()
&& fallback_has_occurred
{
let predicate = trait_predicate.map_bound(|mut trait_pred| {
trait_pred.trait_ref.substs = self.tcx.mk_substs_trait(
self.tcx.mk_unit(),
&trait_pred.trait_ref.substs[1..],
);
trait_pred
});
let unit_obligation = Obligation {
predicate: ty::PredicateKind::Trait(
predicate,
hir::Constness::NotConst,
)
.to_predicate(self.tcx),
..obligation.clone()
};
if self.predicate_may_hold(&unit_obligation) {
err.note(
"the trait is implemented for `()`. \
Possibly this error has been caused by changes to \
Rust's type-inference algorithm (see issue #48950 \
<https://github.com/rust-lang/rust/issues/48950> \
for more information). Consider whether you meant to use \
the type `()` here instead.",
);
}
}
err
}
ty::PredicateKind::Subtype(ref predicate) => {
// Errors for Subtype predicates show up as
// `FulfillmentErrorCode::CodeSubtypeError`,
// not selection error.
span_bug!(span, "subtype requirement gave wrong error: `{:?}`", predicate)
}
ty::PredicateKind::RegionOutlives(ref predicate) => {
let predicate = self.resolve_vars_if_possible(predicate);
let err = self
.region_outlives_predicate(&obligation.cause, predicate)
.err()
.unwrap();
struct_span_err!(
self.tcx.sess,
span,
E0279,
"the requirement `{}` is not satisfied (`{}`)",
predicate,
err,
)
}
ty::PredicateKind::Projection(..) | ty::PredicateKind::TypeOutlives(..) => {
let predicate = self.resolve_vars_if_possible(&obligation.predicate);
struct_span_err!(
self.tcx.sess,
span,
E0280,
"the requirement `{}` is not satisfied",
predicate
)
}
&ty::PredicateKind::ObjectSafe(trait_def_id) => {
let violations = self.tcx.object_safety_violations(trait_def_id);
report_object_safety_error(self.tcx, span, trait_def_id, violations)
}
&ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
let found_kind = self.closure_kind(closure_substs).unwrap();
let closure_span =
self.tcx.sess.source_map().guess_head_span(
self.tcx.hir().span_if_local(closure_def_id).unwrap(),
);
let hir_id = self.tcx.hir().as_local_hir_id(closure_def_id.expect_local());
let mut err = struct_span_err!(
self.tcx.sess,
closure_span,
E0525,
"expected a closure that implements the `{}` trait, \
but this closure only implements `{}`",
kind,
found_kind
);
err.span_label(
closure_span,
format!("this closure implements `{}`, not `{}`", found_kind, kind),
);
err.span_label(
obligation.cause.span,
format!("the requirement to implement `{}` derives from here", kind),
);
// Additional context information explaining why the closure only implements
// a particular trait.
if let Some(typeck_results) = self.in_progress_typeck_results {
let typeck_results = typeck_results.borrow();
match (found_kind, typeck_results.closure_kind_origins().get(hir_id)) {
(ty::ClosureKind::FnOnce, Some((span, name))) => {
err.span_label(
*span,
format!(
"closure is `FnOnce` because it moves the \
variable `{}` out of its environment",
name
),
);
}
(ty::ClosureKind::FnMut, Some((span, name))) => {
err.span_label(
*span,
format!(
"closure is `FnMut` because it mutates the \
variable `{}` here",
name
),
);
}
_ => {}
}
}
err.emit();
return;
}
ty::PredicateKind::WellFormed(ty) => {
if !self.tcx.sess.opts.debugging_opts.chalk {
// WF predicates cannot themselves make
// errors. They can only block due to
// ambiguity; otherwise, they always
// degenerate into other obligations
// (which may fail).
span_bug!(span, "WF predicate not satisfied for {:?}", ty);
} else {
// FIXME: we'll need a better message which takes into account
// which bounds actually failed to hold.
self.tcx.sess.struct_span_err(
span,
&format!("the type `{}` is not well-formed (chalk)", ty),
)
}
}
ty::PredicateKind::ConstEvaluatable(..) => {
// Errors for `ConstEvaluatable` predicates show up as
// `SelectionError::ConstEvalFailure`,
// not `Unimplemented`.
span_bug!(
span,
"const-evaluatable requirement gave wrong error: `{:?}`",
obligation
)
}
ty::PredicateKind::ConstEquate(..) => {
// Errors for `ConstEquate` predicates show up as
// `SelectionError::ConstEvalFailure`,
// not `Unimplemented`.
span_bug!(
span,
"const-equate requirement gave wrong error: `{:?}`",
obligation
)
}
}
}
OutputTypeParameterMismatch(ref found_trait_ref, ref expected_trait_ref, _) => {
let found_trait_ref = self.resolve_vars_if_possible(&*found_trait_ref);
let expected_trait_ref = self.resolve_vars_if_possible(&*expected_trait_ref);
if expected_trait_ref.self_ty().references_error() {
return;
}
let found_trait_ty = match found_trait_ref.self_ty().no_bound_vars() {
Some(ty) => ty,
None => return,
};
let found_did = match found_trait_ty.kind {
ty::Closure(did, _) | ty::Foreign(did) | ty::FnDef(did, _) => Some(did),
ty::Adt(def, _) => Some(def.did),
_ => None,
};
let found_span = found_did
.and_then(|did| self.tcx.hir().span_if_local(did))
.map(|sp| self.tcx.sess.source_map().guess_head_span(sp)); // the sp could be an fn def
if self.reported_closure_mismatch.borrow().contains(&(span, found_span)) {
// We check closures twice, with obligations flowing in different directions,
// but we want to complain about them only once.
return;
}
self.reported_closure_mismatch.borrow_mut().insert((span, found_span));
let found = match found_trait_ref.skip_binder().substs.type_at(1).kind {
ty::Tuple(ref tys) => vec![ArgKind::empty(); tys.len()],
_ => vec![ArgKind::empty()],
};
let expected_ty = expected_trait_ref.skip_binder().substs.type_at(1);
let expected = match expected_ty.kind {
ty::Tuple(ref tys) => tys
.iter()
.map(|t| ArgKind::from_expected_ty(t.expect_ty(), Some(span)))
.collect(),
_ => vec![ArgKind::Arg("_".to_owned(), expected_ty.to_string())],
};
if found.len() == expected.len() {
self.report_closure_arg_mismatch(
span,
found_span,
found_trait_ref,
expected_trait_ref,
)
} else {
let (closure_span, found) = found_did
.and_then(|did| {
let node = self.tcx.hir().get_if_local(did)?;
let (found_span, found) = self.get_fn_like_arguments(node)?;
Some((Some(found_span), found))
})
.unwrap_or((found_span, found));
self.report_arg_count_mismatch(
span,
closure_span,
expected,
found,
found_trait_ty.is_closure(),
)
}
}
TraitNotObjectSafe(did) => {
let violations = self.tcx.object_safety_violations(did);
report_object_safety_error(self.tcx, span, did, violations)
}
ConstEvalFailure(ErrorHandled::TooGeneric) => {
// In this instance, we have a const expression containing an unevaluated
// generic parameter. We have no idea whether this expression is valid or
// not (e.g. it might result in an error), but we don't want to just assume
// that it's okay, because that might result in post-monomorphisation time
// errors. The onus is really on the caller to provide values that it can
// prove are well-formed.
let mut err = self
.tcx
.sess
.struct_span_err(span, "constant expression depends on a generic parameter");
// FIXME(const_generics): we should suggest to the user how they can resolve this
// issue. However, this is currently not actually possible
// (see https://github.com/rust-lang/rust/issues/66962#issuecomment-575907083).
err.note("this may fail depending on what value the parameter takes");
err
}
// Already reported in the query.
ConstEvalFailure(ErrorHandled::Reported(ErrorReported)) => {
// FIXME(eddyb) remove this once `ErrorReported` becomes a proof token.
self.tcx.sess.delay_span_bug(span, "`ErrorReported` without an error");
return;
}
// Already reported in the query, but only as a lint.
// This shouldn't actually happen for constants used in types, modulo
// bugs. The `delay_span_bug` here ensures it won't be ignored.
ConstEvalFailure(ErrorHandled::Linted) => {
self.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint");
return;
}
Overflow => {
bug!("overflow should be handled before the `report_selection_error` path");
}
};
self.note_obligation_cause(&mut err, obligation);
self.point_at_returns_when_relevant(&mut err, &obligation);
err.emit();
}
/// Given some node representing a fn-like thing in the HIR map,
/// returns a span and `ArgKind` information that describes the
/// arguments it expects. This can be supplied to
/// `report_arg_count_mismatch`.
fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)> {
let sm = self.tcx.sess.source_map();
let hir = self.tcx.hir();
Some(match node {
Node::Expr(&hir::Expr {
kind: hir::ExprKind::Closure(_, ref _decl, id, span, _),
..
}) => (
sm.guess_head_span(span),
hir.body(id)
.params
.iter()
.map(|arg| {
if let hir::Pat { kind: hir::PatKind::Tuple(ref args, _), span, .. } =
*arg.pat
{
Some(ArgKind::Tuple(
Some(span),
args.iter()
.map(|pat| {
sm.span_to_snippet(pat.span)
.ok()
.map(|snippet| (snippet, "_".to_owned()))
})
.collect::<Option<Vec<_>>>()?,
))
} else {
let name = sm.span_to_snippet(arg.pat.span).ok()?;
Some(ArgKind::Arg(name, "_".to_owned()))
}
})
.collect::<Option<Vec<ArgKind>>>()?,
),
Node::Item(&hir::Item { span, kind: hir::ItemKind::Fn(ref sig, ..), .. })
| Node::ImplItem(&hir::ImplItem {
span,
kind: hir::ImplItemKind::Fn(ref sig, _),
..
})
| Node::TraitItem(&hir::TraitItem {
span,
kind: hir::TraitItemKind::Fn(ref sig, _),
..
}) => (
sm.guess_head_span(span),
sig.decl
.inputs
.iter()
.map(|arg| match arg.clone().kind {
hir::TyKind::Tup(ref tys) => ArgKind::Tuple(
Some(arg.span),
vec![("_".to_owned(), "_".to_owned()); tys.len()],
),
_ => ArgKind::empty(),
})
.collect::<Vec<ArgKind>>(),
),
Node::Ctor(ref variant_data) => {
let span = variant_data.ctor_hir_id().map(|id| hir.span(id)).unwrap_or(DUMMY_SP);
let span = sm.guess_head_span(span);
(span, vec![ArgKind::empty(); variant_data.fields().len()])
}
_ => panic!("non-FnLike node found: {:?}", node),
})
}
/// Reports an error when the number of arguments needed by a
/// trait match doesn't match the number that the expression
/// provides.
fn report_arg_count_mismatch(
&self,
span: Span,
found_span: Option<Span>,
expected_args: Vec<ArgKind>,
found_args: Vec<ArgKind>,
is_closure: bool,
) -> DiagnosticBuilder<'tcx> {
let kind = if is_closure { "closure" } else { "function" };
let args_str = |arguments: &[ArgKind], other: &[ArgKind]| {
let arg_length = arguments.len();
let distinct = match &other[..] {
&[ArgKind::Tuple(..)] => true,
_ => false,
};
match (arg_length, arguments.get(0)) {
(1, Some(&ArgKind::Tuple(_, ref fields))) => {
format!("a single {}-tuple as argument", fields.len())
}
_ => format!(
"{} {}argument{}",
arg_length,
if distinct && arg_length > 1 { "distinct " } else { "" },
pluralize!(arg_length)
),
}
};
let expected_str = args_str(&expected_args, &found_args);
let found_str = args_str(&found_args, &expected_args);
let mut err = struct_span_err!(
self.tcx.sess,
span,
E0593,
"{} is expected to take {}, but it takes {}",
kind,
expected_str,
found_str,
);
err.span_label(span, format!("expected {} that takes {}", kind, expected_str));
if let Some(found_span) = found_span {
err.span_label(found_span, format!("takes {}", found_str));
// move |_| { ... }
// ^^^^^^^^-- def_span
//
// move |_| { ... }
// ^^^^^-- prefix
let prefix_span = self.tcx.sess.source_map().span_until_non_whitespace(found_span);
// move |_| { ... }
// ^^^-- pipe_span
let pipe_span =
if let Some(span) = found_span.trim_start(prefix_span) { span } else { found_span };
// Suggest to take and ignore the arguments with expected_args_length `_`s if
// found arguments is empty (assume the user just wants to ignore args in this case).
// For example, if `expected_args_length` is 2, suggest `|_, _|`.
if found_args.is_empty() && is_closure {
let underscores = vec!["_"; expected_args.len()].join(", ");
err.span_suggestion_verbose(
pipe_span,
&format!(
"consider changing the closure to take and ignore the expected argument{}",
pluralize!(expected_args.len())
),
format!("|{}|", underscores),
Applicability::MachineApplicable,
);
}
if let &[ArgKind::Tuple(_, ref fields)] = &found_args[..] {
if fields.len() == expected_args.len() {
let sugg = fields
.iter()
.map(|(name, _)| name.to_owned())
.collect::<Vec<String>>()
.join(", ");
err.span_suggestion_verbose(
found_span,
"change the closure to take multiple arguments instead of a single tuple",
format!("|{}|", sugg),
Applicability::MachineApplicable,
);
}
}
if let &[ArgKind::Tuple(_, ref fields)] = &expected_args[..] {
if fields.len() == found_args.len() && is_closure {
let sugg = format!(
"|({}){}|",
found_args
.iter()
.map(|arg| match arg {
ArgKind::Arg(name, _) => name.to_owned(),
_ => "_".to_owned(),
})
.collect::<Vec<String>>()
.join(", "),
// add type annotations if available
if found_args.iter().any(|arg| match arg {
ArgKind::Arg(_, ty) => ty != "_",
_ => false,
}) {
format!(
": ({})",
fields
.iter()
.map(|(_, ty)| ty.to_owned())
.collect::<Vec<String>>()
.join(", ")
)
} else {
String::new()
},
);
err.span_suggestion_verbose(
found_span,
"change the closure to accept a tuple instead of individual arguments",
sugg,
Applicability::MachineApplicable,
);
}
}
}
err
}
}
trait InferCtxtPrivExt<'tcx> {
// returns if `cond` not occurring implies that `error` does not occur - i.e., that
// `error` occurring implies that `cond` occurs.
fn error_implies(&self, cond: ty::Predicate<'tcx>, error: ty::Predicate<'tcx>) -> bool;
fn report_fulfillment_error(
&self,
error: &FulfillmentError<'tcx>,
body_id: Option<hir::BodyId>,
fallback_has_occurred: bool,
);
fn report_projection_error(
&self,
obligation: &PredicateObligation<'tcx>,
error: &MismatchedProjectionTypes<'tcx>,
);
fn fuzzy_match_tys(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool;
fn describe_generator(&self, body_id: hir::BodyId) -> Option<&'static str>;
fn find_similar_impl_candidates(
&self,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> Vec<ty::TraitRef<'tcx>>;
fn report_similar_impl_candidates(
&self,
impl_candidates: Vec<ty::TraitRef<'tcx>>,
err: &mut DiagnosticBuilder<'_>,
);
/// Gets the parent trait chain start
fn get_parent_trait_ref(
&self,
code: &ObligationCauseCode<'tcx>,
) -> Option<(String, Option<Span>)>;
/// If the `Self` type of the unsatisfied trait `trait_ref` implements a trait
/// with the same path as `trait_ref`, a help message about
/// a probable version mismatch is added to `err`
fn note_version_mismatch(
&self,
err: &mut DiagnosticBuilder<'_>,
trait_ref: &ty::PolyTraitRef<'tcx>,
);
/// Creates a `PredicateObligation` with `new_self_ty` replacing the existing type in the
/// `trait_ref`.
///
/// For this to work, `new_self_ty` must have no escaping bound variables.
fn mk_trait_obligation_with_new_self_ty(
&self,
param_env: ty::ParamEnv<'tcx>,
trait_ref: &ty::PolyTraitRef<'tcx>,
new_self_ty: Ty<'tcx>,
) -> PredicateObligation<'tcx>;
fn maybe_report_ambiguity(
&self,
obligation: &PredicateObligation<'tcx>,
body_id: Option<hir::BodyId>,
);
fn predicate_can_apply(
&self,
param_env: ty::ParamEnv<'tcx>,
pred: ty::PolyTraitRef<'tcx>,
) -> bool;
fn note_obligation_cause(
&self,
err: &mut DiagnosticBuilder<'tcx>,
obligation: &PredicateObligation<'tcx>,
);
fn suggest_unsized_bound_if_applicable(
&self,
err: &mut DiagnosticBuilder<'tcx>,
obligation: &PredicateObligation<'tcx>,
);
fn is_recursive_obligation(
&self,
obligated_types: &mut Vec<&ty::TyS<'tcx>>,
cause_code: &ObligationCauseCode<'tcx>,
) -> bool;
}
impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> {
// returns if `cond` not occurring implies that `error` does not occur - i.e., that
// `error` occurring implies that `cond` occurs.
fn error_implies(&self, cond: ty::Predicate<'tcx>, error: ty::Predicate<'tcx>) -> bool {
if cond == error {
return true;
}
let (cond, error) = match (cond.kind(), error.kind()) {
(ty::PredicateKind::Trait(..), ty::PredicateKind::Trait(error, _)) => (cond, error),
_ => {
// FIXME: make this work in other cases too.
return false;
}
};
for obligation in super::elaborate_predicates(self.tcx, std::iter::once(cond)) {
if let ty::PredicateKind::Trait(implication, _) = obligation.predicate.kind() {
let error = error.to_poly_trait_ref();
let implication = implication.to_poly_trait_ref();
// FIXME: I'm just not taking associated types at all here.
// Eventually I'll need to implement param-env-aware
// `Γ₁ ⊦ φ₁ => Γ₂ ⊦ φ₂` logic.
let param_env = ty::ParamEnv::empty();
if self.can_sub(param_env, error, implication).is_ok() {
debug!("error_implies: {:?} -> {:?} -> {:?}", cond, error, implication);
return true;
}
}
}
false
}
fn report_fulfillment_error(
&self,
error: &FulfillmentError<'tcx>,
body_id: Option<hir::BodyId>,
fallback_has_occurred: bool,
) {
debug!("report_fulfillment_error({:?})", error);
match error.code {
FulfillmentErrorCode::CodeSelectionError(ref selection_error) => {
self.report_selection_error(
&error.obligation,
selection_error,
fallback_has_occurred,
error.points_at_arg_span,
);
}
FulfillmentErrorCode::CodeProjectionError(ref e) => {
self.report_projection_error(&error.obligation, e);
}
FulfillmentErrorCode::CodeAmbiguity => {
self.maybe_report_ambiguity(&error.obligation, body_id);
}
FulfillmentErrorCode::CodeSubtypeError(ref expected_found, ref err) => {
self.report_mismatched_types(
&error.obligation.cause,
expected_found.expected,
expected_found.found,
err.clone(),
)
.emit();
}
FulfillmentErrorCode::CodeConstEquateError(ref expected_found, ref err) => {
self.report_mismatched_consts(
&error.obligation.cause,
expected_found.expected,
expected_found.found,
err.clone(),
)
.emit();
}
}
}
fn report_projection_error(
&self,
obligation: &PredicateObligation<'tcx>,
error: &MismatchedProjectionTypes<'tcx>,
) {
let predicate = self.resolve_vars_if_possible(&obligation.predicate);
if predicate.references_error() {
return;
}
self.probe(|_| {
let err_buf;
let mut err = &error.err;
let mut values = None;
// try to find the mismatched types to report the error with.
//
// this can fail if the problem was higher-ranked, in which
// cause I have no idea for a good error message.
if let ty::PredicateKind::Projection(ref data) = predicate.kind() {
let mut selcx = SelectionContext::new(self);
let (data, _) = self.replace_bound_vars_with_fresh_vars(
obligation.cause.span,
infer::LateBoundRegionConversionTime::HigherRankedType,
data,
);
let mut obligations = vec![];
let normalized_ty = super::normalize_projection_type(
&mut selcx,
obligation.param_env,
data.projection_ty,
obligation.cause.clone(),
0,
&mut obligations,
);
debug!(
"report_projection_error obligation.cause={:?} obligation.param_env={:?}",
obligation.cause, obligation.param_env
);
debug!(
"report_projection_error normalized_ty={:?} data.ty={:?}",
normalized_ty, data.ty
);
let is_normalized_ty_expected = match &obligation.cause.code {
ObligationCauseCode::ItemObligation(_)
| ObligationCauseCode::BindingObligation(_, _)
| ObligationCauseCode::ObjectCastObligation(_) => false,
_ => true,
};
if let Err(error) = self.at(&obligation.cause, obligation.param_env).eq_exp(
is_normalized_ty_expected,
normalized_ty,
data.ty,
) {
values = Some(infer::ValuePairs::Types(ExpectedFound::new(
is_normalized_ty_expected,
normalized_ty,
data.ty,
)));
err_buf = error;
err = &err_buf;
}
}
let msg = format!("type mismatch resolving `{}`", predicate);
let error_id = (DiagnosticMessageId::ErrorId(271), Some(obligation.cause.span), msg);
let fresh = self.tcx.sess.one_time_diagnostics.borrow_mut().insert(error_id);
if fresh {
let mut diag = struct_span_err!(
self.tcx.sess,
obligation.cause.span,
E0271,
"type mismatch resolving `{}`",
predicate
);
self.note_type_err(&mut diag, &obligation.cause, None, values, err);
self.note_obligation_cause(&mut diag, obligation);
diag.emit();
}
});
}
fn fuzzy_match_tys(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
/// returns the fuzzy category of a given type, or None
/// if the type can be equated to any type.
fn type_category(t: Ty<'_>) -> Option<u32> {
match t.kind {
ty::Bool => Some(0),
ty::Char => Some(1),
ty::Str => Some(2),
ty::Int(..) | ty::Uint(..) | ty::Infer(ty::IntVar(..)) => Some(3),
ty::Float(..) | ty::Infer(ty::FloatVar(..)) => Some(4),
ty::Ref(..) | ty::RawPtr(..) => Some(5),
ty::Array(..) | ty::Slice(..) => Some(6),
ty::FnDef(..) | ty::FnPtr(..) => Some(7),
ty::Dynamic(..) => Some(8),
ty::Closure(..) => Some(9),
ty::Tuple(..) => Some(10),
ty::Projection(..) => Some(11),
ty::Param(..) => Some(12),
ty::Opaque(..) => Some(13),
ty::Never => Some(14),
ty::Adt(adt, ..) => match adt.adt_kind() {
AdtKind::Struct => Some(15),
AdtKind::Union => Some(16),
AdtKind::Enum => Some(17),
},
ty::Generator(..) => Some(18),
ty::Foreign(..) => Some(19),
ty::GeneratorWitness(..) => Some(20),
ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => None,
}
}
match (type_category(a), type_category(b)) {
(Some(cat_a), Some(cat_b)) => match (&a.kind, &b.kind) {
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => def_a == def_b,
_ => cat_a == cat_b,
},
// infer and error can be equated to all types
_ => true,
}
}
fn describe_generator(&self, body_id: hir::BodyId) -> Option<&'static str> {
self.tcx.hir().body(body_id).generator_kind.map(|gen_kind| match gen_kind {
hir::GeneratorKind::Gen => "a generator",
hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) => "an async block",
hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn) => "an async function",
hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure) => "an async closure",
})
}
fn find_similar_impl_candidates(
&self,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> Vec<ty::TraitRef<'tcx>> {
let simp = fast_reject::simplify_type(self.tcx, trait_ref.skip_binder().self_ty(), true);
let all_impls = self.tcx.all_impls(trait_ref.def_id());
match simp {
Some(simp) => all_impls
.filter_map(|def_id| {
let imp = self.tcx.impl_trait_ref(def_id).unwrap();
let imp_simp = fast_reject::simplify_type(self.tcx, imp.self_ty(), true);
if let Some(imp_simp) = imp_simp {
if simp != imp_simp {
return None;
}
}
Some(imp)
})
.collect(),
None => all_impls.map(|def_id| self.tcx.impl_trait_ref(def_id).unwrap()).collect(),
}
}
fn report_similar_impl_candidates(
&self,
impl_candidates: Vec<ty::TraitRef<'tcx>>,
err: &mut DiagnosticBuilder<'_>,
) {
if impl_candidates.is_empty() {
return;
}
let len = impl_candidates.len();
let end = if impl_candidates.len() <= 5 { impl_candidates.len() } else { 4 };
let normalize = |candidate| {
self.tcx.infer_ctxt().enter(|ref infcx| {
let normalized = infcx
.at(&ObligationCause::dummy(), ty::ParamEnv::empty())
.normalize(candidate)
.ok();
match normalized {
Some(normalized) => format!("\n {:?}", normalized.value),
None => format!("\n {:?}", candidate),
}
})
};
// Sort impl candidates so that ordering is consistent for UI tests.
let mut normalized_impl_candidates =
impl_candidates.iter().map(normalize).collect::<Vec<String>>();
// Sort before taking the `..end` range,
// because the ordering of `impl_candidates` may not be deterministic:
// https://github.com/rust-lang/rust/pull/57475#issuecomment-455519507
normalized_impl_candidates.sort();
err.help(&format!(
"the following implementations were found:{}{}",
normalized_impl_candidates[..end].join(""),
if len > 5 { format!("\nand {} others", len - 4) } else { String::new() }
));
}
/// Gets the parent trait chain start
fn get_parent_trait_ref(
&self,
code: &ObligationCauseCode<'tcx>,
) -> Option<(String, Option<Span>)> {
match code {
&ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref);
match self.get_parent_trait_ref(&data.parent_code) {
Some(t) => Some(t),
None => {
let ty = parent_trait_ref.skip_binder().self_ty();
let span =
TyCategory::from_ty(ty).map(|(_, def_id)| self.tcx.def_span(def_id));
Some((ty.to_string(), span))
}
}
}
_ => None,
}
}
/// If the `Self` type of the unsatisfied trait `trait_ref` implements a trait
/// with the same path as `trait_ref`, a help message about
/// a probable version mismatch is added to `err`
fn note_version_mismatch(
&self,
err: &mut DiagnosticBuilder<'_>,
trait_ref: &ty::PolyTraitRef<'tcx>,
) {
let get_trait_impl = |trait_def_id| {
let mut trait_impl = None;
self.tcx.for_each_relevant_impl(
trait_def_id,
trait_ref.skip_binder().self_ty(),
|impl_def_id| {
if trait_impl.is_none() {
trait_impl = Some(impl_def_id);
}
},
);
trait_impl
};
let required_trait_path = self.tcx.def_path_str(trait_ref.def_id());
let all_traits = self.tcx.all_traits(LOCAL_CRATE);
let traits_with_same_path: std::collections::BTreeSet<_> = all_traits
.iter()
.filter(|trait_def_id| **trait_def_id != trait_ref.def_id())
.filter(|trait_def_id| self.tcx.def_path_str(**trait_def_id) == required_trait_path)
.collect();
for trait_with_same_path in traits_with_same_path {
if let Some(impl_def_id) = get_trait_impl(*trait_with_same_path) {
let impl_span = self.tcx.def_span(impl_def_id);
err.span_help(impl_span, "trait impl with same name found");
let trait_crate = self.tcx.crate_name(trait_with_same_path.krate);
let crate_msg = format!(
"perhaps two different versions of crate `{}` are being used?",
trait_crate
);
err.note(&crate_msg);
}
}
}
fn mk_trait_obligation_with_new_self_ty(
&self,
param_env: ty::ParamEnv<'tcx>,
trait_ref: &ty::PolyTraitRef<'tcx>,
new_self_ty: Ty<'tcx>,
) -> PredicateObligation<'tcx> {
assert!(!new_self_ty.has_escaping_bound_vars());
let trait_ref = trait_ref.map_bound_ref(|tr| ty::TraitRef {
substs: self.tcx.mk_substs_trait(new_self_ty, &tr.substs[1..]),
..*tr
});
Obligation::new(
ObligationCause::dummy(),
param_env,
trait_ref.without_const().to_predicate(self.tcx),
)
}
fn maybe_report_ambiguity(
&self,
obligation: &PredicateObligation<'tcx>,
body_id: Option<hir::BodyId>,
) {
// Unable to successfully determine, probably means
// insufficient type information, but could mean
// ambiguous impls. The latter *ought* to be a
// coherence violation, so we don't report it here.
let predicate = self.resolve_vars_if_possible(&obligation.predicate);
let span = obligation.cause.span;
debug!(
"maybe_report_ambiguity(predicate={:?}, obligation={:?} body_id={:?}, code={:?})",
predicate, obligation, body_id, obligation.cause.code,
);
// Ambiguity errors are often caused as fallout from earlier
// errors. So just ignore them if this infcx is tainted.
if self.is_tainted_by_errors() {
return;
}
let mut err = match predicate.kind() {
ty::PredicateKind::Trait(ref data, _) => {
let trait_ref = data.to_poly_trait_ref();
let self_ty = trait_ref.skip_binder().self_ty();
debug!("self_ty {:?} {:?} trait_ref {:?}", self_ty, self_ty.kind, trait_ref);
if predicate.references_error() {
return;
}
// Typically, this ambiguity should only happen if
// there are unresolved type inference variables
// (otherwise it would suggest a coherence
// failure). But given #21974 that is not necessarily
// the case -- we can have multiple where clauses that
// are only distinguished by a region, which results
// in an ambiguity even when all types are fully
// known, since we don't dispatch based on region
// relationships.
// This is kind of a hack: it frequently happens that some earlier
// error prevents types from being fully inferred, and then we get
// a bunch of uninteresting errors saying something like "<generic
// #0> doesn't implement Sized". It may even be true that we
// could just skip over all checks where the self-ty is an
// inference variable, but I was afraid that there might be an
// inference variable created, registered as an obligation, and
// then never forced by writeback, and hence by skipping here we'd
// be ignoring the fact that we don't KNOW the type works
// out. Though even that would probably be harmless, given that
// we're only talking about builtin traits, which are known to be
// inhabited. We used to check for `self.tcx.sess.has_errors()` to
// avoid inundating the user with unnecessary errors, but we now
// check upstream for type errors and don't add the obligations to
// begin with in those cases.
if self
.tcx
.lang_items()
.sized_trait()
.map_or(false, |sized_id| sized_id == trait_ref.def_id())
{
self.need_type_info_err(body_id, span, self_ty, ErrorCode::E0282).emit();
return;
}
let mut err = self.need_type_info_err(body_id, span, self_ty, ErrorCode::E0283);
err.note(&format!("cannot satisfy `{}`", predicate));
if let ObligationCauseCode::ItemObligation(def_id) = obligation.cause.code {
self.suggest_fully_qualified_path(&mut err, def_id, span, trait_ref.def_id());
} else if let (
Ok(ref snippet),
ObligationCauseCode::BindingObligation(ref def_id, _),
) =
(self.tcx.sess.source_map().span_to_snippet(span), &obligation.cause.code)
{
let generics = self.tcx.generics_of(*def_id);
if generics.params.iter().any(|p| p.name != kw::SelfUpper)
&& !snippet.ends_with('>')
{
// FIXME: To avoid spurious suggestions in functions where type arguments
// where already supplied, we check the snippet to make sure it doesn't
// end with a turbofish. Ideally we would have access to a `PathSegment`
// instead. Otherwise we would produce the following output:
//
// error[E0283]: type annotations needed
// --> $DIR/issue-54954.rs:3:24
// |
// LL | const ARR_LEN: usize = Tt::const_val::<[i8; 123]>();
// | ^^^^^^^^^^^^^^^^^^^^^^^^^^
// | |
// | cannot infer type
// | help: consider specifying the type argument
// | in the function call:
// | `Tt::const_val::<[i8; 123]>::<T>`
// ...
// LL | const fn const_val<T: Sized>() -> usize {
// | - required by this bound in `Tt::const_val`
// |
// = note: cannot satisfy `_: Tt`
err.span_suggestion_verbose(
span.shrink_to_hi(),
&format!(
"consider specifying the type argument{} in the function call",
pluralize!(generics.params.len()),
),
format!(
"::<{}>",
generics
.params
.iter()
.map(|p| p.name.to_string())
.collect::<Vec<String>>()
.join(", ")
),
Applicability::HasPlaceholders,
);
}
}
err
}
ty::PredicateKind::WellFormed(arg) => {
// Same hacky approach as above to avoid deluging user
// with error messages.
if arg.references_error() || self.tcx.sess.has_errors() {
return;
}
match arg.unpack() {
GenericArgKind::Lifetime(lt) => {
span_bug!(span, "unexpected well formed predicate: {:?}", lt)
}
GenericArgKind::Type(ty) => {
self.need_type_info_err(body_id, span, ty, ErrorCode::E0282)
}
GenericArgKind::Const(ct) => {
self.need_type_info_err_const(body_id, span, ct, ErrorCode::E0282)
}
}
}
ty::PredicateKind::Subtype(ref data) => {
if data.references_error() || self.tcx.sess.has_errors() {
// no need to overload user in such cases
return;
}
let SubtypePredicate { a_is_expected: _, a, b } = data.skip_binder();
// both must be type variables, or the other would've been instantiated
assert!(a.is_ty_var() && b.is_ty_var());
self.need_type_info_err(body_id, span, a, ErrorCode::E0282)
}
ty::PredicateKind::Projection(ref data) => {
let trait_ref = data.to_poly_trait_ref(self.tcx);
let self_ty = trait_ref.skip_binder().self_ty();
let ty = data.skip_binder().ty;
if predicate.references_error() {
return;
}
if self_ty.needs_infer() && ty.needs_infer() {
// We do this for the `foo.collect()?` case to produce a suggestion.
let mut err = self.need_type_info_err(body_id, span, self_ty, ErrorCode::E0284);
err.note(&format!("cannot satisfy `{}`", predicate));
err
} else {
let mut err = struct_span_err!(
self.tcx.sess,
span,
E0284,
"type annotations needed: cannot satisfy `{}`",
predicate,
);
err.span_label(span, &format!("cannot satisfy `{}`", predicate));
err
}
}
_ => {
if self.tcx.sess.has_errors() {
return;
}
let mut err = struct_span_err!(
self.tcx.sess,
span,
E0284,
"type annotations needed: cannot satisfy `{}`",
predicate,
);
err.span_label(span, &format!("cannot satisfy `{}`", predicate));
err
}
};
self.note_obligation_cause(&mut err, obligation);
err.emit();
}
/// Returns `true` if the trait predicate may apply for *some* assignment
/// to the type parameters.
fn predicate_can_apply(
&self,
param_env: ty::ParamEnv<'tcx>,
pred: ty::PolyTraitRef<'tcx>,
) -> bool {
struct ParamToVarFolder<'a, 'tcx> {
infcx: &'a InferCtxt<'a, 'tcx>,
var_map: FxHashMap<Ty<'tcx>, Ty<'tcx>>,
}
impl<'a, 'tcx> TypeFolder<'tcx> for ParamToVarFolder<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
self.infcx.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
if let ty::Param(ty::ParamTy { name, .. }) = ty.kind {
let infcx = self.infcx;
self.var_map.entry(ty).or_insert_with(|| {
infcx.next_ty_var(TypeVariableOrigin {
kind: TypeVariableOriginKind::TypeParameterDefinition(name, None),
span: DUMMY_SP,
})
})
} else {
ty.super_fold_with(self)
}
}
}
self.probe(|_| {
let mut selcx = SelectionContext::new(self);
let cleaned_pred =
pred.fold_with(&mut ParamToVarFolder { infcx: self, var_map: Default::default() });
let cleaned_pred = super::project::normalize(
&mut selcx,
param_env,
ObligationCause::dummy(),
&cleaned_pred,
)
.value;
let obligation = Obligation::new(
ObligationCause::dummy(),
param_env,
cleaned_pred.without_const().to_predicate(selcx.tcx()),
);
self.predicate_may_hold(&obligation)
})
}
fn note_obligation_cause(
&self,
err: &mut DiagnosticBuilder<'tcx>,
obligation: &PredicateObligation<'tcx>,
) {
// First, attempt to add note to this error with an async-await-specific
// message, and fall back to regular note otherwise.
if !self.maybe_note_obligation_cause_for_async_await(err, obligation) {
self.note_obligation_cause_code(
err,
&obligation.predicate,
&obligation.cause.code,
&mut vec![],
);
self.suggest_unsized_bound_if_applicable(err, obligation);
}
}
fn suggest_unsized_bound_if_applicable(
&self,
err: &mut DiagnosticBuilder<'tcx>,
obligation: &PredicateObligation<'tcx>,
) {
let (pred, item_def_id, span) =
match (obligation.predicate.kind(), &obligation.cause.code.peel_derives()) {
(
ty::PredicateKind::Trait(pred, _),
ObligationCauseCode::BindingObligation(item_def_id, span),
) => (pred, item_def_id, span),
_ => return,
};
let node = match (
self.tcx.hir().get_if_local(*item_def_id),
Some(pred.def_id()) == self.tcx.lang_items().sized_trait(),
) {
(Some(node), true) => node,
_ => return,
};
let generics = match node.generics() {
Some(generics) => generics,
None => return,
};
for param in generics.params {
if param.span != *span
|| param.bounds.iter().any(|bound| {
bound.trait_ref().and_then(|trait_ref| trait_ref.trait_def_id())
== self.tcx.lang_items().sized_trait()
})
{
continue;
}
match node {
hir::Node::Item(
item
@
hir::Item {
kind:
hir::ItemKind::Enum(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::Union(..),
..
},
) => {
// Suggesting `T: ?Sized` is only valid in an ADT if `T` is only used in a
// borrow. `struct S<'a, T: ?Sized>(&'a T);` is valid, `struct S<T: ?Sized>(T);`
// is not.
let mut visitor = FindTypeParam {
param: param.name.ident().name,
invalid_spans: vec![],
nested: false,
};
visitor.visit_item(item);
if !visitor.invalid_spans.is_empty() {
let mut multispan: MultiSpan = param.span.into();
multispan.push_span_label(
param.span,
format!("this could be changed to `{}: ?Sized`...", param.name.ident()),
);
for sp in visitor.invalid_spans {
multispan.push_span_label(
sp,
format!(
"...if indirection was used here: `Box<{}>`",
param.name.ident(),
),
);
}
err.span_help(
multispan,
&format!(
"you could relax the implicit `Sized` bound on `{T}` if it were \
used through indirection like `&{T}` or `Box<{T}>`",
T = param.name.ident(),
),
);
return;
}
}
_ => {}
}
let (span, separator) = match param.bounds {
[] => (span.shrink_to_hi(), ":"),
[.., bound] => (bound.span().shrink_to_hi(), " +"),
};
err.span_suggestion_verbose(
span,
"consider relaxing the implicit `Sized` restriction",
format!("{} ?Sized", separator),
Applicability::MachineApplicable,
);
return;
}
}
fn is_recursive_obligation(
&self,
obligated_types: &mut Vec<&ty::TyS<'tcx>>,
cause_code: &ObligationCauseCode<'tcx>,
) -> bool {
if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = cause_code {
let parent_trait_ref = self.resolve_vars_if_possible(&data.parent_trait_ref);
if obligated_types.iter().any(|ot| ot == &parent_trait_ref.skip_binder().self_ty()) {
return true;
}
}
false
}
}
/// Look for type `param` in an ADT being used only through a reference to confirm that suggesting
/// `param: ?Sized` would be a valid constraint.
struct FindTypeParam {
param: rustc_span::Symbol,
invalid_spans: Vec<Span>,
nested: bool,
}
impl<'v> Visitor<'v> for FindTypeParam {
type Map = rustc_hir::intravisit::ErasedMap<'v>;
fn nested_visit_map(&mut self) -> hir::intravisit::NestedVisitorMap<Self::Map> {
hir::intravisit::NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &hir::Ty<'_>) {
// We collect the spans of all uses of the "bare" type param, like in `field: T` or
// `field: (T, T)` where we could make `T: ?Sized` while skipping cases that are known to be
// valid like `field: &'a T` or `field: *mut T` and cases that *might* have further `Sized`
// obligations like `Box<T>` and `Vec<T>`, but we perform no extra analysis for those cases
// and suggest `T: ?Sized` regardless of their obligations. This is fine because the errors
// in that case should make what happened clear enough.
match ty.kind {
hir::TyKind::Ptr(_) | hir::TyKind::Rptr(..) | hir::TyKind::TraitObject(..) => {}
hir::TyKind::Path(hir::QPath::Resolved(None, path))
if path.segments.len() == 1 && path.segments[0].ident.name == self.param =>
{
if !self.nested {
self.invalid_spans.push(ty.span);
}
}
hir::TyKind::Path(_) => {
let prev = self.nested;
self.nested = true;
hir::intravisit::walk_ty(self, ty);
self.nested = prev;
}
_ => {
hir::intravisit::walk_ty(self, ty);
}
}
}
}
pub fn recursive_type_with_infinite_size_error(
tcx: TyCtxt<'tcx>,
type_def_id: DefId,
spans: Vec<Span>,
) {
assert!(type_def_id.is_local());
let span = tcx.hir().span_if_local(type_def_id).unwrap();
let span = tcx.sess.source_map().guess_head_span(span);
let path = tcx.def_path_str(type_def_id);
let mut err =
struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size", path);
err.span_label(span, "recursive type has infinite size");
for &span in &spans {
err.span_label(span, "recursive without indirection");
}
let msg = format!(
"insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `{}` representable",
path,
);
if spans.len() <= 4 {
err.multipart_suggestion(
&msg,
spans
.iter()
.flat_map(|&span| {
vec![
(span.shrink_to_lo(), "Box<".to_string()),
(span.shrink_to_hi(), ">".to_string()),
]
.into_iter()
})
.collect(),
Applicability::HasPlaceholders,
);
} else {
err.help(&msg);
}
err.emit();
}
/// Summarizes information
#[derive(Clone)]
pub enum ArgKind {
/// An argument of non-tuple type. Parameters are (name, ty)
Arg(String, String),
/// An argument of tuple type. For a "found" argument, the span is
/// the locationo in the source of the pattern. For a "expected"
/// argument, it will be None. The vector is a list of (name, ty)
/// strings for the components of the tuple.
Tuple(Option<Span>, Vec<(String, String)>),
}
impl ArgKind {
fn empty() -> ArgKind {
ArgKind::Arg("_".to_owned(), "_".to_owned())
}
/// Creates an `ArgKind` from the expected type of an
/// argument. It has no name (`_`) and an optional source span.
pub fn from_expected_ty(t: Ty<'_>, span: Option<Span>) -> ArgKind {
match t.kind {
ty::Tuple(ref tys) => ArgKind::Tuple(
span,
tys.iter().map(|ty| ("_".to_owned(), ty.to_string())).collect::<Vec<_>>(),
),
_ => ArgKind::Arg("_".to_owned(), t.to_string()),
}
}
}
| 42.711499 | 107 | 0.455903 |
e2a8924f5bc83d325c64b1ff73c25ff58f716547 | 5,062 | #[cfg(not(feature = "library"))]
use cosmwasm_std::entry_point;
use cosmwasm_std::{to_binary, Binary, Deps, DepsMut, Env, MessageInfo, Response, StdResult};
use cw2::set_contract_version;
use crate::error::ContractError;
use crate::msg::{CountResponse, ExecuteMsg, InstantiateMsg, QueryMsg};
use crate::state::{State, STATE};
// version info for migration info
const CONTRACT_NAME: &str = "crates.io:simple-option";
const CONTRACT_VERSION: &str = env!("CARGO_PKG_VERSION");
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn instantiate(
deps: DepsMut,
_env: Env,
info: MessageInfo,
msg: InstantiateMsg,
) -> Result<Response, ContractError> {
let state = State {
count: msg.count,
owner: info.sender.clone(),
};
set_contract_version(deps.storage, CONTRACT_NAME, CONTRACT_VERSION)?;
STATE.save(deps.storage, &state)?;
Ok(Response::new()
.add_attribute("method", "instantiate")
.add_attribute("owner", info.sender)
.add_attribute("count", msg.count.to_string()))
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn execute(
deps: DepsMut,
_env: Env,
info: MessageInfo,
msg: ExecuteMsg,
) -> Result<Response, ContractError> {
match msg {
ExecuteMsg::Increment {} => try_increment(deps),
ExecuteMsg::Reset { count } => try_reset(deps, info, count),
}
}
pub fn try_increment(deps: DepsMut) -> Result<Response, ContractError> {
STATE.update(deps.storage, |mut state| -> Result<_, ContractError> {
state.count += 1;
Ok(state)
})?;
Ok(Response::new().add_attribute("method", "try_increment"))
}
pub fn try_reset(deps: DepsMut, info: MessageInfo, count: i32) -> Result<Response, ContractError> {
STATE.update(deps.storage, |mut state| -> Result<_, ContractError> {
if info.sender != state.owner {
return Err(ContractError::Unauthorized {});
}
state.count = count;
Ok(state)
})?;
Ok(Response::new().add_attribute("method", "reset"))
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult<Binary> {
match msg {
QueryMsg::GetCount {} => to_binary(&query_count(deps)?),
}
}
fn query_count(deps: Deps) -> StdResult<CountResponse> {
let state = STATE.load(deps.storage)?;
Ok(CountResponse { count: state.count })
}
#[cfg(test)]
mod tests {
use super::*;
use cosmwasm_std::testing::{mock_dependencies, mock_env, mock_info};
use cosmwasm_std::{coins, from_binary};
#[test]
fn proper_initialization() {
let mut deps = mock_dependencies(&[]);
let msg = InstantiateMsg { count: 17 };
let info = mock_info("creator", &coins(1000, "earth"));
// we can just call .unwrap() to assert this was a success
let res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
assert_eq!(0, res.messages.len());
// it worked, let's query the state
let res = query(deps.as_ref(), mock_env(), QueryMsg::GetCount {}).unwrap();
let value: CountResponse = from_binary(&res).unwrap();
assert_eq!(17, value.count);
}
#[test]
fn increment() {
let mut deps = mock_dependencies(&coins(2, "token"));
let msg = InstantiateMsg { count: 17 };
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
// beneficiary can release it
let info = mock_info("anyone", &coins(2, "token"));
let msg = ExecuteMsg::Increment {};
let _res = execute(deps.as_mut(), mock_env(), info, msg).unwrap();
// should increase counter by 1
let res = query(deps.as_ref(), mock_env(), QueryMsg::GetCount {}).unwrap();
let value: CountResponse = from_binary(&res).unwrap();
assert_eq!(18, value.count);
}
#[test]
fn reset() {
let mut deps = mock_dependencies(&coins(2, "token"));
let msg = InstantiateMsg { count: 17 };
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
// beneficiary can release it
let unauth_info = mock_info("anyone", &coins(2, "token"));
let msg = ExecuteMsg::Reset { count: 5 };
let res = execute(deps.as_mut(), mock_env(), unauth_info, msg);
match res {
Err(ContractError::Unauthorized {}) => {}
_ => panic!("Must return unauthorized error"),
}
// only the original creator can reset the counter
let auth_info = mock_info("creator", &coins(2, "token"));
let msg = ExecuteMsg::Reset { count: 5 };
let _res = execute(deps.as_mut(), mock_env(), auth_info, msg).unwrap();
// should now be 5
let res = query(deps.as_ref(), mock_env(), QueryMsg::GetCount {}).unwrap();
let value: CountResponse = from_binary(&res).unwrap();
assert_eq!(5, value.count);
}
}
| 34.202703 | 99 | 0.617938 |
339c263120c9576e34c4e558f6cf7bfe6c3751c0 | 356 | // primitive_types3.rs
// Create an array with at least 100 elements in it where the ??? is.
// Execute `rustlings hint primitive_types3` for hints!
// I AM NO DONE
fn main() {
let a = [0;100];
if a.len() >= 100 {
println!("Wow, that's a big array!");
} else {
println!("Meh, I eat arrays like that for breakfast.");
}
}
| 22.25 | 69 | 0.592697 |
1db704b65ca0f909193571c6e07b01a8d8d097c3 | 4,716 | use crate::text::parsers::numeric_support::{
digits_before_dot, exponent_digits, floating_point_number,
};
use crate::text::parsers::stop_character;
use crate::text::text_value::TextValue;
use nom::branch::alt;
use nom::bytes::streaming::tag;
use nom::character::streaming::one_of;
use nom::combinator::{map, map_res, opt, recognize};
use nom::sequence::{preceded, terminated, tuple};
use nom::{IResult, Parser};
use std::num::ParseFloatError;
use std::str::FromStr;
/// Matches the text representation of a float value and returns the resulting [f64]
/// as a [TextValue::Float].
pub(crate) fn parse_float(input: &str) -> IResult<&str, TextValue> {
terminated(
alt((float_special_value, float_numeric_value)),
stop_character,
)(input)
}
/// Matches special IEEE-754 floating point values, including +/- infinity and NaN.
fn float_special_value(input: &str) -> IResult<&str, TextValue> {
map(tag("nan"), |_| TextValue::Float(f64::NAN))
.or(map(tag("+inf"), |_| TextValue::Float(f64::INFINITY)))
.or(map(tag("-inf"), |_| TextValue::Float(f64::NEG_INFINITY)))
.parse(input)
}
/// Matches numeric floating point values. (e.g. `7e0`, `7.1e0` or `71e-1`)
fn float_numeric_value(input: &str) -> IResult<&str, TextValue> {
map_res::<_, _, _, _, ParseFloatError, _, _>(
recognize(tuple((
opt(tag("-")),
alt((floating_point_number, digits_before_dot)),
recognize(float_exponent_marker_followed_by_digits),
))),
|text| {
// TODO: Reusable buffer for sanitization
let mut sanitized = text.replace("_", "");
if sanitized.ends_with('e') || sanitized.ends_with('E') {
sanitized.push('0');
}
Ok(TextValue::Float(f64::from_str(&sanitized)?))
},
)(input)
}
fn float_exponent_marker_followed_by_digits(input: &str) -> IResult<&str, &str> {
preceded(one_of("eE"), exponent_digits)(input)
}
#[cfg(test)]
mod float_parsing_tests {
use crate::text::parsers::float::parse_float;
use crate::text::parsers::unit_test_support::{parse_test_err, parse_test_ok, parse_unwrap};
use crate::text::text_value::TextValue;
use std::str::FromStr;
fn parse_equals(text: &str, expected: f64) {
parse_test_ok(parse_float, text, TextValue::Float(expected))
}
fn parse_fails(text: &str) {
parse_test_err(parse_float, text)
}
#[test]
fn test_parse_float_special_values() {
parse_equals("+inf ", f64::INFINITY);
parse_equals("-inf ", f64::NEG_INFINITY);
// Can't test two NaNs for equality with assert_eq
let value = parse_unwrap(parse_float, "nan ");
if let TextValue::Float(f) = value {
assert!(f.is_nan());
} else {
panic!("Expected NaN, but got: {:?}", value);
}
}
#[test]
fn test_parse_float_numeric_values() {
parse_equals("0.0e0 ", 0.0);
parse_equals("0E0 ", 0.0);
parse_equals("0e0 ", 0e0);
parse_equals("305e1 ", 3050.0);
parse_equals("305.0e1 ", 3050.0);
parse_equals("-0.279e3 ", -279.0);
parse_equals("-279e0 ", -279.0);
parse_equals("-279.5e0 ", -279.5);
// Missing exponent (would be parsed as an integer)
parse_fails("305 ");
// Has exponent delimiter but missing exponent
parse_fails("305e ");
// No digits before the decimal point
parse_fails(".305e ");
// Fractional exponent
parse_fails("305e0.5");
// Negative fractional exponent
parse_fails("305e-0.5");
// Doesn't consume leading whitespace
parse_fails(" 305e1 ");
// Doesn't accept leading zeros
parse_fails("0305e1 ");
// Doesn't accept leading plus sign
parse_fails("+305e1 ");
// Doesn't accept multiple negative signs
parse_fails("--305e1 ");
// Doesn't accept a number if it's the last thing in the input (might be incomplete stream)
parse_fails("305e1");
}
#[test]
fn test_parse_float_numeric_values_with_underscores() {
parse_equals("111_111e222 ", 111111.0 * 10f64.powf(222f64));
parse_equals("111_111.667e222 ", 111111.667 * 10f64.powf(222f64));
parse_equals("111_111e222_222 ", 111111.0 * 10f64.powf(222222f64));
parse_equals("-999_9e9_9 ", f64::from_str("-9999e99").unwrap());
// Doesn't accept leading underscores
parse_fails("_305e1 ");
// Doesn't accept trailing underscores
parse_fails("305e1_ ");
// Doesn't accept multiple consecutive underscores
parse_fails("30__5e1 ");
}
}
| 35.727273 | 99 | 0.619169 |
29a2e3545bcf7840d00e7c95bc425e5acff40753 | 31,191 | //! A C representation of Rust's `std::option::Option` used across the FFI
//! boundary for Solana program interfaces
//!
//! This implementation mostly matches `std::option` except iterators since the iteration
//! trait requires returning `std::option::Option`
use std::{
convert, mem,
ops::{Deref, DerefMut},
};
/// A C representation of Rust's `std::option::Option`
#[repr(C)]
#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
pub enum COption<T> {
/// No value
None,
/// Some value `T`
Some(T),
}
/////////////////////////////////////////////////////////////////////////////
// Type implementation
/////////////////////////////////////////////////////////////////////////////
impl<T> COption<T> {
/////////////////////////////////////////////////////////////////////////
// Querying the contained values
/////////////////////////////////////////////////////////////////////////
/// Returns `true` if the option is a [`COption::Some`] value.
///
/// # Examples
///
/// ```ignore
/// let x: COption<u32> = COption::Some(2);
/// assert_eq!(x.is_some(), true);
///
/// let x: COption<u32> = COption::None;
/// assert_eq!(x.is_some(), false);
/// ```
///
/// [`COption::Some`]: #variant.COption::Some
#[must_use = "if you intended to assert that this has a value, consider `.unwrap()` instead"]
#[inline]
pub fn is_some(&self) -> bool {
match *self {
COption::Some(_) => true,
COption::None => false,
}
}
/// Returns `true` if the option is a [`COption::None`] value.
///
/// # Examples
///
/// ```ignore
/// let x: COption<u32> = COption::Some(2);
/// assert_eq!(x.is_none(), false);
///
/// let x: COption<u32> = COption::None;
/// assert_eq!(x.is_none(), true);
/// ```
///
/// [`COption::None`]: #variant.COption::None
#[must_use = "if you intended to assert that this doesn't have a value, consider \
`.and_then(|| panic!(\"`COption` had a value when expected `COption::None`\"))` instead"]
#[inline]
pub fn is_none(&self) -> bool {
!self.is_some()
}
/// Returns `true` if the option is a [`COption::Some`] value containing the given value.
///
/// # Examples
///
/// ```ignore
/// #![feature(option_result_contains)]
///
/// let x: COption<u32> = COption::Some(2);
/// assert_eq!(x.contains(&2), true);
///
/// let x: COption<u32> = COption::Some(3);
/// assert_eq!(x.contains(&2), false);
///
/// let x: COption<u32> = COption::None;
/// assert_eq!(x.contains(&2), false);
/// ```
#[must_use]
#[inline]
pub fn contains<U>(&self, x: &U) -> bool
where
U: PartialEq<T>,
{
match self {
COption::Some(y) => x == y,
COption::None => false,
}
}
/////////////////////////////////////////////////////////////////////////
// Adapter for working with references
/////////////////////////////////////////////////////////////////////////
/// Converts from `&COption<T>` to `COption<&T>`.
///
/// # Examples
///
/// Converts an `COption<`[`String`]`>` into an `COption<`[`usize`]`>`, preserving the original.
/// The [`map`] method takes the `self` argument by value, consuming the original,
/// so this technique uses `as_ref` to first take an `COption` to a reference
/// to the value inside the original.
///
/// [`map`]: enum.COption.html#method.map
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```ignore
/// let text: COption<String> = COption::Some("Hello, world!".to_string());
/// // First, cast `COption<String>` to `COption<&String>` with `as_ref`,
/// // then consume *that* with `map`, leaving `text` on the stack.
/// let text_length: COption<usize> = text.as_ref().map(|s| s.len());
/// println!("still can print text: {:?}", text);
/// ```
#[inline]
pub fn as_ref(&self) -> COption<&T> {
match *self {
COption::Some(ref x) => COption::Some(x),
COption::None => COption::None,
}
}
/// Converts from `&mut COption<T>` to `COption<&mut T>`.
///
/// # Examples
///
/// ```ignore
/// let mut x = COption::Some(2);
/// match x.as_mut() {
/// COption::Some(v) => *v = 42,
/// COption::None => {},
/// }
/// assert_eq!(x, COption::Some(42));
/// ```
#[inline]
pub fn as_mut(&mut self) -> COption<&mut T> {
match *self {
COption::Some(ref mut x) => COption::Some(x),
COption::None => COption::None,
}
}
/////////////////////////////////////////////////////////////////////////
// Getting to contained values
/////////////////////////////////////////////////////////////////////////
/// Unwraps an option, yielding the content of a [`COption::Some`].
///
/// # Panics
///
/// Panics if the value is a [`COption::None`] with a custom panic message provided by
/// `msg`.
///
/// [`COption::Some`]: #variant.COption::Some
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some("value");
/// assert_eq!(x.expect("the world is ending"), "value");
/// ```
///
/// ```ignore{.should_panic}
/// let x: COption<&str> = COption::None;
/// x.expect("the world is ending"); // panics with `the world is ending`
/// ```
#[inline]
pub fn expect(self, msg: &str) -> T {
match self {
COption::Some(val) => val,
COption::None => expect_failed(msg),
}
}
/// Moves the value `v` out of the `COption<T>` if it is [`COption::Some(v)`].
///
/// In general, because this function may panic, its use is discouraged.
/// Instead, prefer to use pattern matching and handle the [`COption::None`]
/// case explicitly.
///
/// # Panics
///
/// Panics if the self value equals [`COption::None`].
///
/// [`COption::Some(v)`]: #variant.COption::Some
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some("air");
/// assert_eq!(x.unwrap(), "air");
/// ```
///
/// ```ignore{.should_panic}
/// let x: COption<&str> = COption::None;
/// assert_eq!(x.unwrap(), "air"); // fails
/// ```
#[inline]
pub fn unwrap(self) -> T {
match self {
COption::Some(val) => val,
COption::None => panic!("called `COption::unwrap()` on a `COption::None` value"),
}
}
/// Returns the contained value or a default.
///
/// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
/// the result of a function call, it is recommended to use [`unwrap_or_else`],
/// which is lazily evaluated.
///
/// [`unwrap_or_else`]: #method.unwrap_or_else
///
/// # Examples
///
/// ```ignore
/// assert_eq!(COption::Some("car").unwrap_or("bike"), "car");
/// assert_eq!(COption::None.unwrap_or("bike"), "bike");
/// ```
#[inline]
pub fn unwrap_or(self, def: T) -> T {
match self {
COption::Some(x) => x,
COption::None => def,
}
}
/// Returns the contained value or computes it from a closure.
///
/// # Examples
///
/// ```ignore
/// let k = 10;
/// assert_eq!(COption::Some(4).unwrap_or_else(|| 2 * k), 4);
/// assert_eq!(COption::None.unwrap_or_else(|| 2 * k), 20);
/// ```
#[inline]
pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
match self {
COption::Some(x) => x,
COption::None => f(),
}
}
/////////////////////////////////////////////////////////////////////////
// Transforming contained values
/////////////////////////////////////////////////////////////////////////
/// Maps an `COption<T>` to `COption<U>` by applying a function to a contained value.
///
/// # Examples
///
/// Converts an `COption<`[`String`]`>` into an `COption<`[`usize`]`>`, consuming the original:
///
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```ignore
/// let maybe_some_string = COption::Some(String::from("Hello, World!"));
/// // `COption::map` takes self *by value*, consuming `maybe_some_string`
/// let maybe_some_len = maybe_some_string.map(|s| s.len());
///
/// assert_eq!(maybe_some_len, COption::Some(13));
/// ```
#[inline]
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> COption<U> {
match self {
COption::Some(x) => COption::Some(f(x)),
COption::None => COption::None,
}
}
/// Applies a function to the contained value (if any),
/// or returns the provided default (if not).
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some("foo");
/// assert_eq!(x.map_or(42, |v| v.len()), 3);
///
/// let x: COption<&str> = COption::None;
/// assert_eq!(x.map_or(42, |v| v.len()), 42);
/// ```
#[inline]
pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
match self {
COption::Some(t) => f(t),
COption::None => default,
}
}
/// Applies a function to the contained value (if any),
/// or computes a default (if not).
///
/// # Examples
///
/// ```ignore
/// let k = 21;
///
/// let x = COption::Some("foo");
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 3);
///
/// let x: COption<&str> = COption::None;
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
/// ```
#[inline]
pub fn map_or_else<U, D: FnOnce() -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
match self {
COption::Some(t) => f(t),
COption::None => default(),
}
}
/// Transforms the `COption<T>` into a [`Result<T, E>`], mapping [`COption::Some(v)`] to
/// [`Ok(v)`] and [`COption::None`] to [`Err(err)`].
///
/// Arguments passed to `ok_or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`ok_or_else`], which is
/// lazily evaluated.
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err)`]: ../../std/result/enum.Result.html#variant.Err
/// [`COption::None`]: #variant.COption::None
/// [`COption::Some(v)`]: #variant.COption::Some
/// [`ok_or_else`]: #method.ok_or_else
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some("foo");
/// assert_eq!(x.ok_or(0), Ok("foo"));
///
/// let x: COption<&str> = COption::None;
/// assert_eq!(x.ok_or(0), Err(0));
/// ```
#[inline]
pub fn ok_or<E>(self, err: E) -> Result<T, E> {
match self {
COption::Some(v) => Ok(v),
COption::None => Err(err),
}
}
/// Transforms the `COption<T>` into a [`Result<T, E>`], mapping [`COption::Some(v)`] to
/// [`Ok(v)`] and [`COption::None`] to [`Err(err())`].
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err())`]: ../../std/result/enum.Result.html#variant.Err
/// [`COption::None`]: #variant.COption::None
/// [`COption::Some(v)`]: #variant.COption::Some
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some("foo");
/// assert_eq!(x.ok_or_else(|| 0), Ok("foo"));
///
/// let x: COption<&str> = COption::None;
/// assert_eq!(x.ok_or_else(|| 0), Err(0));
/// ```
#[inline]
pub fn ok_or_else<E, F: FnOnce() -> E>(self, err: F) -> Result<T, E> {
match self {
COption::Some(v) => Ok(v),
COption::None => Err(err()),
}
}
/////////////////////////////////////////////////////////////////////////
// Boolean operations on the values, eager and lazy
/////////////////////////////////////////////////////////////////////////
/// Returns [`COption::None`] if the option is [`COption::None`], otherwise returns `optb`.
///
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some(2);
/// let y: COption<&str> = COption::None;
/// assert_eq!(x.and(y), COption::None);
///
/// let x: COption<u32> = COption::None;
/// let y = COption::Some("foo");
/// assert_eq!(x.and(y), COption::None);
///
/// let x = COption::Some(2);
/// let y = COption::Some("foo");
/// assert_eq!(x.and(y), COption::Some("foo"));
///
/// let x: COption<u32> = COption::None;
/// let y: COption<&str> = COption::None;
/// assert_eq!(x.and(y), COption::None);
/// ```
#[inline]
pub fn and<U>(self, optb: COption<U>) -> COption<U> {
match self {
COption::Some(_) => optb,
COption::None => COption::None,
}
}
/// Returns [`COption::None`] if the option is [`COption::None`], otherwise calls `f` with the
/// wrapped value and returns the result.
///
/// COption::Some languages call this operation flatmap.
///
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// fn sq(x: u32) -> COption<u32> { COption::Some(x * x) }
/// fn nope(_: u32) -> COption<u32> { COption::None }
///
/// assert_eq!(COption::Some(2).and_then(sq).and_then(sq), COption::Some(16));
/// assert_eq!(COption::Some(2).and_then(sq).and_then(nope), COption::None);
/// assert_eq!(COption::Some(2).and_then(nope).and_then(sq), COption::None);
/// assert_eq!(COption::None.and_then(sq).and_then(sq), COption::None);
/// ```
#[inline]
pub fn and_then<U, F: FnOnce(T) -> COption<U>>(self, f: F) -> COption<U> {
match self {
COption::Some(x) => f(x),
COption::None => COption::None,
}
}
/// Returns [`COption::None`] if the option is [`COption::None`], otherwise calls `predicate`
/// with the wrapped value and returns:
///
/// - [`COption::Some(t)`] if `predicate` returns `true` (where `t` is the wrapped
/// value), and
/// - [`COption::None`] if `predicate` returns `false`.
///
/// This function works similar to [`Iterator::filter()`]. You can imagine
/// the `COption<T>` being an iterator over one or zero elements. `filter()`
/// lets you decide which elements to keep.
///
/// # Examples
///
/// ```ignore
/// fn is_even(n: &i32) -> bool {
/// n % 2 == 0
/// }
///
/// assert_eq!(COption::None.filter(is_even), COption::None);
/// assert_eq!(COption::Some(3).filter(is_even), COption::None);
/// assert_eq!(COption::Some(4).filter(is_even), COption::Some(4));
/// ```
///
/// [`COption::None`]: #variant.COption::None
/// [`COption::Some(t)`]: #variant.COption::Some
/// [`Iterator::filter()`]: ../../std/iter/trait.Iterator.html#method.filter
#[inline]
pub fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
if let COption::Some(x) = self {
if predicate(&x) {
return COption::Some(x);
}
}
COption::None
}
/// Returns the option if it contains a value, otherwise returns `optb`.
///
/// Arguments passed to `or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`or_else`], which is
/// lazily evaluated.
///
/// [`or_else`]: #method.or_else
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some(2);
/// let y = COption::None;
/// assert_eq!(x.or(y), COption::Some(2));
///
/// let x = COption::None;
/// let y = COption::Some(100);
/// assert_eq!(x.or(y), COption::Some(100));
///
/// let x = COption::Some(2);
/// let y = COption::Some(100);
/// assert_eq!(x.or(y), COption::Some(2));
///
/// let x: COption<u32> = COption::None;
/// let y = COption::None;
/// assert_eq!(x.or(y), COption::None);
/// ```
#[inline]
pub fn or(self, optb: COption<T>) -> COption<T> {
match self {
COption::Some(_) => self,
COption::None => optb,
}
}
/// Returns the option if it contains a value, otherwise calls `f` and
/// returns the result.
///
/// # Examples
///
/// ```ignore
/// fn nobody() -> COption<&'static str> { COption::None }
/// fn vikings() -> COption<&'static str> { COption::Some("vikings") }
///
/// assert_eq!(COption::Some("barbarians").or_else(vikings), COption::Some("barbarians"));
/// assert_eq!(COption::None.or_else(vikings), COption::Some("vikings"));
/// assert_eq!(COption::None.or_else(nobody), COption::None);
/// ```
#[inline]
pub fn or_else<F: FnOnce() -> COption<T>>(self, f: F) -> COption<T> {
match self {
COption::Some(_) => self,
COption::None => f(),
}
}
/// Returns [`COption::Some`] if exactly one of `self`, `optb` is [`COption::Some`], otherwise returns [`COption::None`].
///
/// [`COption::Some`]: #variant.COption::Some
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// let x = COption::Some(2);
/// let y: COption<u32> = COption::None;
/// assert_eq!(x.xor(y), COption::Some(2));
///
/// let x: COption<u32> = COption::None;
/// let y = COption::Some(2);
/// assert_eq!(x.xor(y), COption::Some(2));
///
/// let x = COption::Some(2);
/// let y = COption::Some(2);
/// assert_eq!(x.xor(y), COption::None);
///
/// let x: COption<u32> = COption::None;
/// let y: COption<u32> = COption::None;
/// assert_eq!(x.xor(y), COption::None);
/// ```
#[inline]
pub fn xor(self, optb: COption<T>) -> COption<T> {
match (self, optb) {
(COption::Some(a), COption::None) => COption::Some(a),
(COption::None, COption::Some(b)) => COption::Some(b),
_ => COption::None,
}
}
/////////////////////////////////////////////////////////////////////////
// Entry-like operations to insert if COption::None and return a reference
/////////////////////////////////////////////////////////////////////////
/// Inserts `v` into the option if it is [`COption::None`], then
/// returns a mutable reference to the contained value.
///
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// let mut x = COption::None;
///
/// {
/// let y: &mut u32 = x.get_or_insert(5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, COption::Some(7));
/// ```
#[inline]
pub fn get_or_insert(&mut self, v: T) -> &mut T {
self.get_or_insert_with(|| v)
}
/// Inserts a value computed from `f` into the option if it is [`COption::None`], then
/// returns a mutable reference to the contained value.
///
/// [`COption::None`]: #variant.COption::None
///
/// # Examples
///
/// ```ignore
/// let mut x = COption::None;
///
/// {
/// let y: &mut u32 = x.get_or_insert_with(|| 5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, COption::Some(7));
/// ```
#[inline]
pub fn get_or_insert_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
if let COption::None = *self {
*self = COption::Some(f())
}
match *self {
COption::Some(ref mut v) => v,
COption::None => unreachable!(),
}
}
/////////////////////////////////////////////////////////////////////////
// Misc
/////////////////////////////////////////////////////////////////////////
/// Replaces the actual value in the option by the value given in parameter,
/// returning the old value if present,
/// leaving a [`COption::Some`] in its place without deinitializing either one.
///
/// [`COption::Some`]: #variant.COption::Some
///
/// # Examples
///
/// ```ignore
/// let mut x = COption::Some(2);
/// let old = x.replace(5);
/// assert_eq!(x, COption::Some(5));
/// assert_eq!(old, COption::Some(2));
///
/// let mut x = COption::None;
/// let old = x.replace(3);
/// assert_eq!(x, COption::Some(3));
/// assert_eq!(old, COption::None);
/// ```
#[inline]
pub fn replace(&mut self, value: T) -> COption<T> {
mem::replace(self, COption::Some(value))
}
}
impl<T: Copy> COption<&T> {
/// Maps an `COption<&T>` to an `COption<T>` by copying the contents of the
/// option.
///
/// # Examples
///
/// ```ignore
/// let x = 12;
/// let opt_x = COption::Some(&x);
/// assert_eq!(opt_x, COption::Some(&12));
/// let copied = opt_x.copied();
/// assert_eq!(copied, COption::Some(12));
/// ```
pub fn copied(self) -> COption<T> {
self.map(|&t| t)
}
}
impl<T: Copy> COption<&mut T> {
/// Maps an `COption<&mut T>` to an `COption<T>` by copying the contents of the
/// option.
///
/// # Examples
///
/// ```ignore
/// let mut x = 12;
/// let opt_x = COption::Some(&mut x);
/// assert_eq!(opt_x, COption::Some(&mut 12));
/// let copied = opt_x.copied();
/// assert_eq!(copied, COption::Some(12));
/// ```
pub fn copied(self) -> COption<T> {
self.map(|&mut t| t)
}
}
impl<T: Clone> COption<&T> {
/// Maps an `COption<&T>` to an `COption<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```ignore
/// let x = 12;
/// let opt_x = COption::Some(&x);
/// assert_eq!(opt_x, COption::Some(&12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, COption::Some(12));
/// ```
pub fn cloned(self) -> COption<T> {
self.map(|t| t.clone())
}
}
impl<T: Clone> COption<&mut T> {
/// Maps an `COption<&mut T>` to an `COption<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```ignore
/// let mut x = 12;
/// let opt_x = COption::Some(&mut x);
/// assert_eq!(opt_x, COption::Some(&mut 12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, COption::Some(12));
/// ```
pub fn cloned(self) -> COption<T> {
self.map(|t| t.clone())
}
}
impl<T: Default> COption<T> {
/// Returns the contained value or a default
///
/// Consumes the `self` argument then, if [`COption::Some`], returns the contained
/// value, otherwise if [`COption::None`], returns the [default value] for that
/// type.
///
/// # Examples
///
/// Converts a string to an integer, turning poorly-formed strings
/// into 0 (the default value for integers). [`parse`] converts
/// a string to any other type that implements [`FromStr`], returning
/// [`COption::None`] on error.
///
/// ```ignore
/// let good_year_from_input = "1909";
/// let bad_year_from_input = "190blarg";
/// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
/// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
///
/// assert_eq!(1909, good_year);
/// assert_eq!(0, bad_year);
/// ```
///
/// [`COption::Some`]: #variant.COption::Some
/// [`COption::None`]: #variant.COption::None
/// [default value]: ../default/trait.Default.html#tymethod.default
/// [`parse`]: ../../std/primitive.str.html#method.parse
/// [`FromStr`]: ../../std/str/trait.FromStr.html
#[inline]
pub fn unwrap_or_default(self) -> T {
match self {
COption::Some(x) => x,
COption::None => T::default(),
}
}
}
impl<T: Deref> COption<T> {
/// Converts from `COption<T>` (or `&COption<T>`) to `COption<&T::Target>`.
///
/// Leaves the original COption in-place, creating a new one with a reference
/// to the original one, additionally coercing the contents via [`Deref`].
///
/// [`Deref`]: ../../std/ops/trait.Deref.html
///
/// # Examples
///
/// ```ignore
/// #![feature(inner_deref)]
///
/// let x: COption<String> = COption::Some("hey".to_owned());
/// assert_eq!(x.as_deref(), COption::Some("hey"));
///
/// let x: COption<String> = COption::None;
/// assert_eq!(x.as_deref(), COption::None);
/// ```
pub fn as_deref(&self) -> COption<&T::Target> {
self.as_ref().map(|t| t.deref())
}
}
impl<T: DerefMut> COption<T> {
/// Converts from `COption<T>` (or `&mut COption<T>`) to `COption<&mut T::Target>`.
///
/// Leaves the original `COption` in-place, creating a new one containing a mutable reference to
/// the inner type's `Deref::Target` type.
///
/// # Examples
///
/// ```ignore
/// #![feature(inner_deref)]
///
/// let mut x: COption<String> = COption::Some("hey".to_owned());
/// assert_eq!(x.as_deref_mut().map(|x| {
/// x.make_ascii_uppercase();
/// x
/// }), COption::Some("HEY".to_owned().as_mut_str()));
/// ```
pub fn as_deref_mut(&mut self) -> COption<&mut T::Target> {
self.as_mut().map(|t| t.deref_mut())
}
}
impl<T, E> COption<Result<T, E>> {
/// Transposes an `COption` of a [`Result`] into a [`Result`] of an `COption`.
///
/// [`COption::None`] will be mapped to [`Ok`]`(`[`COption::None`]`)`.
/// [`COption::Some`]`(`[`Ok`]`(_))` and [`COption::Some`]`(`[`Err`]`(_))` will be mapped to
/// [`Ok`]`(`[`COption::Some`]`(_))` and [`Err`]`(_)`.
///
/// [`COption::None`]: #variant.COption::None
/// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
/// [`COption::Some`]: #variant.COption::Some
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// ```ignore
/// #[derive(Debug, Eq, PartialEq)]
/// struct COption::SomeErr;
///
/// let x: Result<COption<i32>, COption::SomeErr> = Ok(COption::Some(5));
/// let y: COption<Result<i32, COption::SomeErr>> = COption::Some(Ok(5));
/// assert_eq!(x, y.transpose());
/// ```
#[inline]
pub fn transpose(self) -> Result<COption<T>, E> {
match self {
COption::Some(Ok(x)) => Ok(COption::Some(x)),
COption::Some(Err(e)) => Err(e),
COption::None => Ok(COption::None),
}
}
}
// This is a separate function to reduce the code size of .expect() itself.
#[inline(never)]
#[cold]
fn expect_failed(msg: &str) -> ! {
panic!("{}", msg)
}
// // This is a separate function to reduce the code size of .expect_none() itself.
// #[inline(never)]
// #[cold]
// fn expect_none_failed(msg: &str, value: &dyn fmt::Debug) -> ! {
// panic!("{}: {:?}", msg, value)
// }
/////////////////////////////////////////////////////////////////////////////
// Trait implementations
/////////////////////////////////////////////////////////////////////////////
impl<T: Clone> Clone for COption<T> {
#[inline]
fn clone(&self) -> Self {
match self {
COption::Some(x) => COption::Some(x.clone()),
COption::None => COption::None,
}
}
#[inline]
fn clone_from(&mut self, source: &Self) {
match (self, source) {
(COption::Some(to), COption::Some(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
}
impl<T> Default for COption<T> {
/// Returns [`COption::None`][COption::None].
///
/// # Examples
///
/// ```ignore
/// let opt: COption<u32> = COption::default();
/// assert!(opt.is_none());
/// ```
#[inline]
fn default() -> COption<T> {
COption::None
}
}
impl<T> From<T> for COption<T> {
fn from(val: T) -> COption<T> {
COption::Some(val)
}
}
impl<'a, T> From<&'a COption<T>> for COption<&'a T> {
fn from(o: &'a COption<T>) -> COption<&'a T> {
o.as_ref()
}
}
impl<'a, T> From<&'a mut COption<T>> for COption<&'a mut T> {
fn from(o: &'a mut COption<T>) -> COption<&'a mut T> {
o.as_mut()
}
}
impl<T> COption<COption<T>> {
/// Converts from `COption<COption<T>>` to `COption<T>`
///
/// # Examples
/// Basic usage:
/// ```ignore
/// #![feature(option_flattening)]
/// let x: COption<COption<u32>> = COption::Some(COption::Some(6));
/// assert_eq!(COption::Some(6), x.flatten());
///
/// let x: COption<COption<u32>> = COption::Some(COption::None);
/// assert_eq!(COption::None, x.flatten());
///
/// let x: COption<COption<u32>> = COption::None;
/// assert_eq!(COption::None, x.flatten());
/// ```
/// Flattening once only removes one level of nesting:
/// ```ignore
/// #![feature(option_flattening)]
/// let x: COption<COption<COption<u32>>> = COption::Some(COption::Some(COption::Some(6)));
/// assert_eq!(COption::Some(COption::Some(6)), x.flatten());
/// assert_eq!(COption::Some(6), x.flatten().flatten());
/// ```
#[inline]
pub fn flatten(self) -> COption<T> {
self.and_then(convert::identity)
}
}
impl<T> From<Option<T>> for COption<T> {
fn from(option: Option<T>) -> Self {
match option {
Some(value) => COption::Some(value),
None => COption::None,
}
}
}
#[rustversion::since(1.49.0)]
impl<T> From<COption<T>> for Option<T> {
fn from(coption: COption<T>) -> Self {
match coption {
COption::Some(value) => Some(value),
COption::None => None,
}
}
}
#[rustversion::before(1.49.0)] // Remove `Into` once the BPF toolchain upgrades to 1.49.0 or newer
impl<T> Into<Option<T>> for COption<T> {
fn into(self) -> Option<T> {
match self {
COption::Some(value) => Some(value),
COption::None => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_from_rust_option() {
let option = Some(99u64);
let c_option: COption<u64> = option.into();
assert_eq!(c_option, COption::Some(99u64));
let expected = c_option.into();
assert_eq!(option, expected);
let option = None;
let c_option: COption<u64> = option.into();
assert_eq!(c_option, COption::None);
let expected = c_option.into();
assert_eq!(option, expected);
}
}
| 31.44254 | 125 | 0.494886 |
f9bd2bf2f7a5f601d6edef5d8e66ff49ec4083ab | 17,861 | use failure::{Error, ResultExt};
use flate2::read::GzDecoder;
use git2::{BranchType, Repository, Sort};
use package::Checksum;
use reqwest::Client;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use sha2::{Digest, Sha256};
use std::{fmt, fs, io::BufReader, path::PathBuf, str::FromStr};
use tar::Archive;
use url::Url;
use util::{
clear_dir,
errors::{ErrorKind, Res},
git::{clone, fetch, reset, update_submodules},
hexify_hash,
lock::DirLock,
};
/// The possible places from which a package can be resolved.
///
/// There are two main sources from which a package can originate: a Direct source (a path or a
/// tarball online or a git repo) and an Index (an indirect source which accrues metadata about
/// Direct sources
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Resolution {
Direct(DirectRes),
Index(IndexRes),
}
impl From<DirectRes> for Resolution {
fn from(i: DirectRes) -> Self {
Resolution::Direct(i)
}
}
impl From<IndexRes> for Resolution {
fn from(i: IndexRes) -> Self {
Resolution::Index(i)
}
}
impl FromStr for Resolution {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let direct = DirectRes::from_str(s);
if direct.is_ok() {
direct.map(Resolution::Direct)
} else {
IndexRes::from_str(s).map(Resolution::Index)
}
}
}
impl fmt::Display for Resolution {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Resolution::Direct(d) => write!(f, "{}", d),
Resolution::Index(i) => write!(f, "{}", i),
}
}
}
impl Serialize for Resolution {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for Resolution {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(de::Error::custom)
}
}
impl Resolution {
pub fn direct(&self) -> Option<&DirectRes> {
if let Resolution::Direct(d) = &self {
Some(&d)
} else {
None
}
}
pub fn is_tar(&self) -> bool {
if let Resolution::Direct(d) = &self {
d.is_tar()
} else {
false
}
}
pub fn is_git(&self) -> bool {
if let Resolution::Direct(d) = &self {
d.is_git()
} else {
false
}
}
pub fn is_dir(&self) -> bool {
if let Resolution::Direct(d) = &self {
d.is_dir()
} else {
false
}
}
pub fn lowkey_eq(&self, other: &Resolution) -> bool {
match (self, other) {
(Resolution::Direct(d), Resolution::Direct(d2)) => d.lowkey_eq(d2),
(Resolution::Index(i), Resolution::Index(i2)) => i == i2,
(_, _) => false,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum DirectRes {
/// Git: the package originated from a git repository.
Git { repo: Url, tag: String },
/// Dir: the package is on disk in a folder directory.
Dir { path: PathBuf },
/// Tar: the package is an archive stored somewhere.
///
/// Tarballs are the only direct resolution which is allowed to have a checksum; this doesn't
/// really make sense for DirectRes::Local, and we leave validation of repositories to Git
/// itself. Checksums are stored in the fragment of the resolution url, with they key being the
/// checksum format.
Tar { url: Url, cksum: Option<Checksum> },
}
impl DirectRes {
pub fn lowkey_eq(&self, other: &DirectRes) -> bool {
match (self, other) {
(DirectRes::Git { repo: r1, .. }, DirectRes::Git { repo: r2, .. }) => r1 == r2,
(DirectRes::Dir { path: p1 }, DirectRes::Dir { path: p2 }) => p1 == p2,
(DirectRes::Tar { url: u1, cksum: c1 }, DirectRes::Tar { url: u2, cksum: c2 }) => {
u1 == u2 && c1 == c2
}
_ => false,
}
}
}
impl DirectRes {
pub fn retrieve(
&self,
client: &Client,
target: &DirLock,
eager: bool,
dl_f: impl Fn(bool) -> Res<()>,
) -> Result<Option<DirectRes>, Error> {
match self {
DirectRes::Tar { url, cksum } => match url.scheme() {
"http" | "https" => {
dl_f(true)?;
client
.get(url.clone())
.send()
.map_err(|_| Error::from(ErrorKind::CannotDownload))
.and_then(|mut r| {
let mut buf: Vec<u8> = vec![];
r.copy_to(&mut buf).context(ErrorKind::CannotDownload)?;
let hash = hexify_hash(Sha256::digest(&buf[..]).as_slice());
if let Some(cksum) = cksum {
if cksum.hash != hash {
return Err(format_err!(
"tarball checksum doesn't match real checksum"
))?;
}
}
let archive = BufReader::new(&buf[..]);
let archive = GzDecoder::new(archive);
let mut archive = Archive::new(archive);
clear_dir(target.path())?;
archive
.unpack(target.path())
.context(ErrorKind::CannotDownload)?;
Ok(None)
})
}
"file" => {
dl_f(false)?;
let mut archive =
fs::File::open(target.path()).context(ErrorKind::CannotDownload)?;
let hash = hexify_hash(
Sha256::digest_reader(&mut archive)
.context(ErrorKind::CannotDownload)?
.as_slice(),
);
if let Some(cksum) = cksum {
if cksum.hash != hash {
return Err(format_err!(
"tarball checksum doesn't match real checksum"
))?;
}
}
let archive = BufReader::new(archive);
let archive = GzDecoder::new(archive);
let mut archive = Archive::new(archive);
clear_dir(target.path())?;
archive
.unpack(target.path())
.context(ErrorKind::CannotDownload)?;
Ok(None)
}
_ => unreachable!(),
},
DirectRes::Git { repo: url, tag } => {
// If we find a directory which already has a repo, we just check out the correct
// version of it. Whether or not a new dir is created isn't our job, that's for the
// Cache. If the Cache points to a directory that already exists, it means that the
// branch data or w/e is irrelevant.
let repo = Repository::open(target.path());
let repo = match repo {
Ok(r) => {
let mut repo = r;
// This logic is for in case we are pointed to an existing git repository.
// We only want to NOT update an existing git repository if eager is false.
// We assume that the HEAD of the repo is at the current "locked" state.
//
// If the tag is a branch:
if !eager {
if let Ok(b) = repo.find_branch(&tag, BranchType::Local) {
let head = b.into_reference().resolve()?.peel_to_commit()?;
let cur = repo.head()?.resolve()?.peel_to_commit()?;
let mut revwalk = repo.revwalk()?;
revwalk.push(head.id())?;
revwalk.set_sorting(Sort::TOPOLOGICAL);
if revwalk.any(|x| x == Ok(cur.id())) {
if &cur.id().to_string() == tag {
return Ok(None);
} else {
return Ok(Some(DirectRes::Git {
repo: url.clone(),
tag: cur.id().to_string(),
}));
}
}
}
// Otherwise, if the tag is an exact pointer to a commit, we try to check out to
// it locally without fetching anything
let target =
repo.revparse_single(&tag).and_then(|x| x.peel_to_commit());
let cur = repo
.head()
.and_then(|x| x.resolve())
.and_then(|x| x.peel_to_commit());
if let Ok(t) = target {
if let Ok(c) = cur {
if t.id() == c.id() {
if tag == &c.id().to_string() {
return Ok(None);
} else {
return Ok(Some(DirectRes::Git {
repo: url.clone(),
tag: c.id().to_string(),
}));
}
} else {
// Because we know the other tag exists in our local copy of the
// repo, we can just check out into that and return
let obj = t.into_object().clone();
reset(&repo, &obj).with_context(|e| {
format_err!(
"couldn't checkout commit {}: {}",
obj.id(),
e
)
})?;
if tag == &obj.id().to_string() {
return Ok(None);
} else {
return Ok(Some(DirectRes::Git {
repo: url.clone(),
tag: obj.id().to_string(),
}));
}
}
}
}
}
// Get everything!!
dl_f(true)?;
let refspec = "refs/heads/*:refs/heads/*";
fetch(&mut repo, &url, refspec).with_context(|e| {
format_err!("couldn't fetch git repo {}: {}", url, e)
})?;
repo
}
Err(_) => {
clear_dir(target.path())?;
dl_f(true)?;
clone(url, target.path()).with_context(|e| {
format_err!("couldn't fetch git repo {}:\n{}", url, e)
})?
}
};
let obj = repo
.revparse_single(&tag)
.context(ErrorKind::CannotDownload)?;
reset(&repo, &obj)
.with_context(|e| format_err!("couldn't fetch git repo {}:\n{}", url, e))?;
update_submodules(&repo).with_context(|e| {
format_err!("couldn't update submodules for git repo {}:\n{}", url, e)
})?;
let id = obj.peel_to_commit()?.id().to_string();
Ok(Some(DirectRes::Git {
repo: url.clone(),
tag: id,
}))
}
DirectRes::Dir { path } => {
// If this package is located on disk, we don't have to do anything...
dl_f(false)?;
if path.exists() {
Ok(None)
} else {
Err(format_err!("can't find directory {}", path.display()))?
}
}
}
}
pub fn is_tar(&self) -> bool {
if let DirectRes::Tar { .. } = &self {
true
} else {
false
}
}
pub fn is_git(&self) -> bool {
if let DirectRes::Git { .. } = &self {
true
} else {
false
}
}
pub fn is_dir(&self) -> bool {
if let DirectRes::Dir { .. } = &self {
true
} else {
false
}
}
}
impl FromStr for DirectRes {
type Err = Error;
fn from_str(url: &str) -> Result<Self, Self::Err> {
let mut parts = url.splitn(2, '+');
let utype = parts.next().unwrap();
let rest = parts.next().ok_or_else(|| ErrorKind::InvalidSourceUrl)?;
match utype {
"git" => {
let mut url = Url::parse(rest).context(ErrorKind::InvalidSourceUrl)?;
let tag = url.fragment().unwrap_or_else(|| "master").to_owned();
url.set_fragment(None);
Ok(DirectRes::Git { repo: url, tag })
}
"dir" => {
let path = PathBuf::from(rest);
Ok(DirectRes::Dir { path })
}
"tar" => {
let mut url = Url::parse(rest).context(ErrorKind::InvalidSourceUrl)?;
if url.scheme() != "http" && url.scheme() != "https" && url.scheme() != "file" {
return Err(ErrorKind::InvalidSourceUrl)?;
}
let cksum = url.fragment().and_then(|x| Checksum::from_str(x).ok());
url.set_fragment(None);
Ok(DirectRes::Tar { url, cksum })
}
_ => Err(ErrorKind::InvalidSourceUrl)?,
}
}
}
impl fmt::Display for DirectRes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DirectRes::Git { repo, tag } => write!(f, "git+{}#{}", repo, tag),
DirectRes::Dir { path } => write!(f, "dir+{}", path.display()),
DirectRes::Tar { url, cksum } => {
let url = url.as_str();
write!(
f,
"tar+{}{}",
url,
if let Some(cksum) = cksum {
"#".to_string() + &cksum.to_string()
} else {
"".to_string()
},
)
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct IndexRes {
pub res: DirectRes,
}
impl From<DirectRes> for IndexRes {
fn from(d: DirectRes) -> Self {
IndexRes { res: d }
}
}
impl From<IndexRes> for DirectRes {
fn from(i: IndexRes) -> Self {
i.res
}
}
impl FromStr for IndexRes {
type Err = Error;
fn from_str(url: &str) -> Result<Self, Self::Err> {
let mut parts = url.splitn(2, '+');
let utype = parts.next().unwrap();
let url = parts.next().ok_or_else(|| ErrorKind::InvalidSourceUrl)?;
match utype {
"index" => {
let res = DirectRes::from_str(url).context(ErrorKind::InvalidSourceUrl)?;
Ok(IndexRes { res })
}
_ => Err(ErrorKind::InvalidSourceUrl)?,
}
}
}
impl fmt::Display for IndexRes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let url = self.res.to_string();
let mut s = String::with_capacity(url.len() + 10);
s.push_str("index+");
s.push_str(&url);
write!(f, "{}", s)
}
}
impl Serialize for DirectRes {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for DirectRes {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(de::Error::custom)
}
}
impl Serialize for IndexRes {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for IndexRes {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(de::Error::custom)
}
}
| 35.368317 | 108 | 0.423493 |
0e31d26b3b687a53e0cfc8dc0341e55f989360bc | 5,667 | use quicksilver::prelude::*;
use quicksilver::lifecycle::Asset;
use specs::prelude::*;
use crate::color::{Palette};
use crate::game::level::{Level};
use crate::game::level_gen;
use crate::game::fov::Fov;
use crate::game::system::{GameActor, GameActionQueue, GameAction, GameActionType, GameEventQueue, GameEvent};
use crate::game::events::{Time};
use crate::game::ecs::{Position, Rect};
use crate::game::factions::Factions;
pub enum InputMode {
Edit,
Play,
Look,
}
pub struct GameText {
pub font: Font,
pub title: String,
pub mononoki_info: String,
pub square_info: Image,
pub inventory: Image,
}
impl GameText {
pub fn load() -> Asset<GameText> {
// The Mononoki font: https://madmalik.github.io/mononoki/
// License: SIL Open Font License 1.1
let font_mononoki = "mononoki-Regular.ttf";
let font_mononoki = Font::load(font_mononoki);
Asset::new(font_mononoki.and_then(|font| {
let title = "Quicksilver Roguelike".to_string();
let mononoki_info = "Mononoki font by Matthias Tellen, terms: SIL Open Font License 1.1".to_string();
let square_info = font.render(
"Square font by Wouter Van Oortmerssen, terms: CC BY 3.0",
&FontStyle::new(20.0, Color::BLACK),
)?;
let inventory = font.render(
"Inventory:\n[A] Sword\n[B] Shield\n[C] Darts",
&FontStyle::new(20.0, Color::BLACK),
)?;
Ok(GameText {
font,
title,
mononoki_info,
square_info,
inventory,
})
}))
}
}
pub struct Data {
pub level: Level,
pub fov: Fov,
pub player: Entity,
pub turn: Option<GameActor>,
pub player_turns: u32,
pub time: Time,
pub event_queue: GameEventQueue,
pub action_queue: GameActionQueue,
pub stop: bool,
pub cursor: Option<Position>,
pub input_mode: InputMode,
pub palette: Palette,
pub factions: Factions,
}
impl Data {
pub fn new(world: &mut World) -> Self {
let mut level = Level::empty(Rect::new_sized(40, 30));
let palette = Palette::new();
let entities = level_gen::make_map(&palette, &mut level, world);
let mut fov = Fov::new(&level);
let player = level_gen::create_player(&palette, &mut level, &mut fov, world);
let mut data = Data {
level,
fov,
player,
turn: None,
player_turns: 0,
time: Time::default(),
event_queue: GameEventQueue::default(),
action_queue: GameActionQueue::default(),
stop: false,
cursor: None,
input_mode: InputMode::Play,
palette: palette,
factions: Factions::new(),
};
data.new_turn(GameActor::Player(player));
for entity in entities {
data.schedule_turn(Time::new(1, 0), GameActor::NonPlayer(entity));
}
data
}
pub fn end_turn(&mut self, actor: GameActor) {
self.turn = None;
self.action_queue.clear();
debug!(
"[{:?}] end turn: {} for {:?}", self.time, self.player_turns, actor
);
}
pub fn actor_turn(&self) -> Option<GameActor> {
self.turn.clone()
}
pub fn turns(&self) -> u32 {
self.player_turns
}
pub fn new_turn(&mut self, actor: GameActor) {
match actor {
GameActor::Player(_) => {
self.player_turns += 1;
}
_ => {}
}
self.turn = Some(actor);
debug!(
"[{:?}] new turn: {} for: {:?}", self.time, self.player_turns, actor
);
}
pub fn schedule_turn(&mut self, delay: Time, actor: GameActor) {
debug!(
"[{:?}] schedule actor turn in: {} for: {:?}", self.time, delay, actor
);
self.event_after(delay, GameEvent::Turn(actor));
}
pub fn finish(&mut self) {
debug!("[{:?}] stop", self.time);
self.stop = true;
}
pub fn is_finished(&self) -> bool {
self.stop
}
pub fn action(&mut self, actor: GameActor, action_type: GameActionType) {
self.action_queue.push(GameAction {
actor: actor,
turn: self.turns(),
action: action_type,
});
}
pub fn next_action(&mut self) -> Option<GameAction> {
if self.action_queue.is_empty() {
None
} else {
Some(self.action_queue.remove(0))
}
}
pub fn event(&mut self, event: GameEvent) {
self.event_after(Time::default(), event);
}
pub fn event_after(&mut self, delay: Time, event: GameEvent) {
self.event_at(self.time + delay, event);
}
pub fn event_at(&mut self, at: Time, event: GameEvent) {
debug!(
"[{:?}] schedule at {}: {:?}", self.time, at, event
);
self.event_queue.add(at, event);
}
pub fn next_event(&mut self) -> Option<(Time, GameEvent)> {
if let Some((time, event)) = self.event_queue.next() {
self.time = time;
Some((time, event))
} else {
None
}
}
pub fn look_mode(&mut self, cursor: Position, _path: Option<Vec<Position>>) {
debug!("[{:?}] look at: {:?}", self.time, cursor);
self.input_mode = InputMode::Look;
self.cursor = Some(cursor.clone());
}
pub fn play_mode(&mut self) {
self.input_mode = InputMode::Play;
self.cursor = None;
}
}
| 27.245192 | 113 | 0.541733 |
8a211048756f5149c3bf3acc33bbf0c0f1a9d6b4 | 1,259 | /*
* Copyright 2017 Sreejith Krishnan R
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::ops::{Add, Sub};
#[derive(Debug, PartialEq, Clone)]
pub struct Point {
x: f32,
y: f32
}
impl Point {
pub fn new(x: f32, y: f32) -> Point {
Point { x, y }
}
pub fn x(&self) -> f32 {
self.x
}
pub fn y(&self) -> f32 {
self.y
}
}
impl<'a, 'b> Add<&'b Point> for &'a Point {
type Output = Point;
fn add(self, rhs: &'b Point) -> Self::Output {
Point { x: self.x + rhs.x, y: self.y+rhs.y }
}
}
impl<'a, 'b> Sub<&'b Point> for &'a Point {
type Output = Point;
fn sub(self, rhs: &'b Point) -> Self::Output {
Point { x: self.x-rhs.x, y: self.y-rhs.y }
}
} | 22.890909 | 75 | 0.607625 |
795e8f4fdcf667b33b0009cab0c26889a002c2d1 | 4,536 | use std::fmt;
struct ANSIString {
color_code: Option<&'static str>,
s: String,
}
impl ANSIString {
pub fn new<S: Into<String>>(color_code: &'static str, s: S) -> Self {
Self {
color_code: Some(color_code),
s: s.into(),
}
}
// don't set any colors
pub fn from<S: Into<String>>(s: S) -> Self {
Self {
color_code: None,
s: s.into(),
}
}
}
impl fmt::Display for ANSIString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(color_code) = self.color_code {
write!(
f,
"{}{}{}",
color_code,
&self.s,
classicube_helpers::color::WHITE
)
} else {
write!(f, "{}", &self.s,)
}
}
}
enum Color {
Green,
RedBold,
Yellow,
}
impl Color {
pub fn paint(&self, s: &str) -> ANSIString {
let color_code = match self {
Color::Green => classicube_helpers::color::LIME,
Color::RedBold => classicube_helpers::color::RED,
Color::Yellow => classicube_helpers::color::YELLOW,
};
ANSIString::new(color_code, s)
}
}
#[doc(hidden)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ColorWhen {
Auto,
Always,
Never,
}
#[doc(hidden)]
pub struct ColorizerOption {
pub use_stderr: bool,
pub when: ColorWhen,
}
#[doc(hidden)]
pub struct Colorizer {
when: ColorWhen,
}
macro_rules! color {
($_self:ident, $c:ident, $m:expr) => {
match $_self.when {
ColorWhen::Auto => Format::$c($m),
ColorWhen::Always => Format::$c($m),
ColorWhen::Never => Format::None($m),
}
};
}
impl Colorizer {
pub fn new(option: ColorizerOption) -> Colorizer { Colorizer { when: option.when } }
pub fn good<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::good;");
color!(self, Good, msg)
}
pub fn warning<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::warning;");
color!(self, Warning, msg)
}
pub fn error<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::error;");
color!(self, Error, msg)
}
pub fn none<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::none;");
Format::None(msg)
}
}
impl Default for Colorizer {
fn default() -> Self {
Colorizer::new(ColorizerOption {
use_stderr: true,
when: ColorWhen::Auto,
})
}
}
/// Defines styles for different types of error messages. Defaults to Error=Red, Warning=Yellow,
/// and Good=Green
#[derive(Debug)]
#[doc(hidden)]
pub enum Format<T> {
/// Defines the style used for errors, defaults to Red
Error(T),
/// Defines the style used for warnings, defaults to Yellow
Warning(T),
/// Defines the style used for good values, defaults to Green
Good(T),
/// Defines no formatting style
None(T),
}
impl<T: AsRef<str>> Format<T> {
fn format(&self) -> ANSIString {
match *self {
Format::Error(ref e) => Color::RedBold.paint(e.as_ref()),
Format::Warning(ref e) => Color::Yellow.paint(e.as_ref()),
Format::Good(ref e) => Color::Green.paint(e.as_ref()),
Format::None(ref e) => ANSIString::from(e.as_ref()),
}
}
}
impl<T: AsRef<str>> fmt::Display for Format<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", &self.format()) }
}
#[cfg(all(test, feature = "color"))]
mod test {
use super::*;
#[test]
fn colored_output() {
let err = Format::Error("error");
assert_eq!(
&*format!("{}", err),
&*format!("{}", Color::RedBold.paint("error"))
);
let good = Format::Good("good");
assert_eq!(
&*format!("{}", good),
&*format!("{}", Color::Green.paint("good"))
);
let warn = Format::Warning("warn");
assert_eq!(
&*format!("{}", warn),
&*format!("{}", Color::Yellow.paint("warn"))
);
let none = Format::None("none");
assert_eq!(
&*format!("{}", none),
&*format!("{}", ANSIString::from("none"))
);
}
}
| 24.12766 | 96 | 0.511684 |
e4722021271b4302904a2485a3cc5d5597842be6 | 889 | use serde_with::SerializeDisplay;
use strum_macros::{Display, EnumString};
#[derive(Debug, Display, EnumString, SerializeDisplay)]
#[strum(serialize_all = "snake_case")]
pub enum OrderStatus {
Unfilled,
PartiallyFilled,
FullyFilled,
CanceledUnfilled,
CanceledPartiallyFilled,
}
#[cfg(test)]
mod tests {
use crate::OrderStatus;
use std::str::FromStr;
#[test]
fn string_to_enum() {
matches!(
OrderStatus::from_str("fully_filled").unwrap(),
OrderStatus::FullyFilled,
);
}
#[test]
fn enum_to_string() {
assert_eq!(
OrderStatus::FullyFilled.to_string(),
String::from("fully_filled")
);
}
#[test]
fn display_enum() {
assert_eq!(
format!("{}", OrderStatus::FullyFilled),
String::from("fully_filled")
);
}
}
| 20.674419 | 59 | 0.583802 |
212e3b13af2ddeb7720e901670ed02c8a884cba0 | 1,356 | fn main() {
let mut v: Vec<String> = Vec::new();
v.push(String::from("Cofi"));
v.push(String::from("Draga"));
v.push(String::from("Johnny"));
let v1 = vec![1,2,3,4,5];
let third = &v1[2];
let second = v1.get(1);
let mut no_element = v1.get(20);
println!("{}", no_element.get_or_insert(&0));
let mut v2 = vec![1,2,3,4,5];
// doesnt work, cant have v2.push() as mutable borrow
// after immutable ref because of println after
// let first = &v2[0];
v2.push(6);
// does work
let first = &v2[0];
print!("The first element is: {}", first);
println!();
for i in &v2 {
print!("{} ", i);
}
println!();
for i in &mut v2 {
*i += 50;
}
println!();
for i in &v2 {
print!("{} ", i);
}
println!();
enum SpreadshetCell {
Int(i32),
Float(f64),
Text(String)
}
let enum_vector = vec! [
SpreadshetCell::Int(3),
SpreadshetCell::Float(10.33),
SpreadshetCell::Text(String::from("Woah !"))
];
for i in enum_vector {
match i {
SpreadshetCell::Int(i) => println!("Just an int"),
SpreadshetCell::Float(i) => println!("Just a float"),
SpreadshetCell::Text(i) => println!("Just a text")
}
}
}
| 17.384615 | 65 | 0.493363 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.