hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
c10d68d5f5886c0d7efdfd34272a8cdc65675a6c
5,990
use crate::{ ffi, AsPyPointer, FromPyObject, IntoPy, Py, PyAny, PyObject, PyResult, PyTryFrom, Python, ToPyObject, }; use std::ops::Index; use std::os::raw::c_char; use std::slice::SliceIndex; use std::str; /// Represents a Python `bytes` object. /// /// This type is immutable. #[repr(transparent)] pub struct PyBytes(PyAny); pyobject_native_type_core!(PyBytes, ffi::PyBytes_Type, #checkfunction=ffi::PyBytes_Check); impl PyBytes { /// Creates a new Python bytestring object. /// The bytestring is initialized by copying the data from the `&[u8]`. /// /// Panics if out of memory. pub fn new<'p>(py: Python<'p>, s: &[u8]) -> &'p PyBytes { let ptr = s.as_ptr() as *const c_char; let len = s.len() as ffi::Py_ssize_t; unsafe { py.from_owned_ptr(ffi::PyBytes_FromStringAndSize(ptr, len)) } } /// Creates a new Python `bytes` object with an `init` closure to write its contents. /// Before calling `init` the bytes' contents are zero-initialised. /// * If Python raises a MemoryError on the allocation, `new_with` will return /// it inside `Err`. /// * If `init` returns `Err(e)`, `new_with` will return `Err(e)`. /// * If `init` returns `Ok(())`, `new_with` will return `Ok(&PyBytes)`. /// /// # Examples /// ``` /// use pyo3::{prelude::*, types::PyBytes}; /// Python::with_gil(|py| -> PyResult<()> { /// let py_bytes = PyBytes::new_with(py, 10, |bytes: &mut [u8]| { /// bytes.copy_from_slice(b"Hello Rust"); /// Ok(()) /// })?; /// let bytes: &[u8] = FromPyObject::extract(py_bytes)?; /// assert_eq!(bytes, b"Hello Rust"); /// Ok(()) /// }); /// ``` pub fn new_with<F>(py: Python, len: usize, init: F) -> PyResult<&PyBytes> where F: FnOnce(&mut [u8]) -> PyResult<()>, { unsafe { let pyptr = ffi::PyBytes_FromStringAndSize(std::ptr::null(), len as ffi::Py_ssize_t); // Check for an allocation error and return it let pypybytes: Py<PyBytes> = Py::from_owned_ptr_or_err(py, pyptr)?; let buffer = ffi::PyBytes_AsString(pyptr) as *mut u8; debug_assert!(!buffer.is_null()); // Zero-initialise the uninitialised bytestring std::ptr::write_bytes(buffer, 0u8, len); // (Further) Initialise the bytestring in init // If init returns an Err, pypybytearray will automatically deallocate the buffer init(std::slice::from_raw_parts_mut(buffer, len)).map(|_| pypybytes.into_ref(py)) } } /// Creates a new Python byte string object from a raw pointer and length. /// /// Panics if out of memory. /// /// # Safety /// /// This function dereferences the raw pointer `ptr` as the /// leading pointer of a slice of length `len`. [As with /// `std::slice::from_raw_parts`, this is /// unsafe](https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html#safety). pub unsafe fn from_ptr(py: Python<'_>, ptr: *const u8, len: usize) -> &PyBytes { py.from_owned_ptr(ffi::PyBytes_FromStringAndSize( ptr as *const _, len as isize, )) } /// Gets the Python string as a byte slice. #[inline] pub fn as_bytes(&self) -> &[u8] { unsafe { let buffer = ffi::PyBytes_AsString(self.as_ptr()) as *const u8; let length = ffi::PyBytes_Size(self.as_ptr()) as usize; debug_assert!(!buffer.is_null()); std::slice::from_raw_parts(buffer, length) } } } /// This is the same way [Vec] is indexed. impl<I: SliceIndex<[u8]>> Index<I> for PyBytes { type Output = I::Output; fn index(&self, index: I) -> &Self::Output { &self.as_bytes()[index] } } impl<'a> IntoPy<PyObject> for &'a [u8] { fn into_py(self, py: Python) -> PyObject { PyBytes::new(py, self).to_object(py) } } impl<'a> FromPyObject<'a> for &'a [u8] { fn extract(obj: &'a PyAny) -> PyResult<Self> { Ok(<PyBytes as PyTryFrom>::try_from(obj)?.as_bytes()) } } #[cfg(test)] mod tests { use super::PyBytes; use crate::FromPyObject; use crate::Python; #[test] fn test_extract_bytes() { let gil = Python::acquire_gil(); let py = gil.python(); let py_bytes = py.eval("b'Hello Python'", None, None).unwrap(); let bytes: &[u8] = FromPyObject::extract(py_bytes).unwrap(); assert_eq!(bytes, b"Hello Python"); } #[test] fn test_bytes_index() { let gil = Python::acquire_gil(); let py = gil.python(); let bytes = PyBytes::new(py, b"Hello World"); assert_eq!(bytes[1], b'e'); } #[test] fn test_bytes_new_with() -> super::PyResult<()> { let gil = Python::acquire_gil(); let py = gil.python(); let py_bytes = PyBytes::new_with(py, 10, |b: &mut [u8]| { b.copy_from_slice(b"Hello Rust"); Ok(()) })?; let bytes: &[u8] = FromPyObject::extract(py_bytes)?; assert_eq!(bytes, b"Hello Rust"); Ok(()) } #[test] fn test_bytes_new_with_zero_initialised() -> super::PyResult<()> { let gil = Python::acquire_gil(); let py = gil.python(); let py_bytes = PyBytes::new_with(py, 10, |_b: &mut [u8]| Ok(()))?; let bytes: &[u8] = FromPyObject::extract(py_bytes)?; assert_eq!(bytes, &[0; 10]); Ok(()) } #[test] fn test_bytes_new_with_error() { use crate::exceptions::PyValueError; let gil = Python::acquire_gil(); let py = gil.python(); let py_bytes_result = PyBytes::new_with(py, 10, |_b: &mut [u8]| { Err(PyValueError::new_err("Hello Crustaceans!")) }); assert!(py_bytes_result.is_err()); assert!(py_bytes_result .err() .unwrap() .is_instance::<PyValueError>(py)); } }
33.651685
97
0.570618
48cabf9a7a0fb71b7cda3a48ef91ee9e4cc058e2
2,643
#![allow(dead_code)] // Adapted from Druid data.rs // Copyright 2019 The Druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use quote::{quote, quote_spanned}; use syn::{spanned::Spanned, Data, DataEnum, DataStruct}; pub(crate) fn derive_data_impl( input: syn::DeriveInput, ) -> Result<proc_macro2::TokenStream, syn::Error> { match &input.data { Data::Struct(s) => derive_struct(&input, s), Data::Enum(e) => derive_enum(&input, e), Data::Union(u) => Err(syn::Error::new( u.union_token.span(), "Data implementations cannot be derived from unions", )), } } fn derive_struct( input: &syn::DeriveInput, _s: &DataStruct, ) -> Result<proc_macro2::TokenStream, syn::Error> { let ident = &input.ident; let impl_generics = generics_bounds(&input.generics); let (_, ty_generics, where_clause) = &input.generics.split_for_impl(); let res = quote! { impl<#impl_generics> tuix::Node for #ident #ty_generics #where_clause { } }; Ok(res) } fn derive_enum( input: &syn::DeriveInput, _s: &DataEnum, ) -> Result<proc_macro2::TokenStream, syn::Error> { let ident = &input.ident; let impl_generics = generics_bounds(&input.generics); let (_, ty_generics, where_clause) = &input.generics.split_for_impl(); let res = quote! { impl<#impl_generics> tuix::Node for #ident #ty_generics #where_clause { } }; Ok(res) } fn generics_bounds(generics: &syn::Generics) -> proc_macro2::TokenStream { let res = generics.params.iter().map(|gp| { use syn::GenericParam::*; match gp { Type(ty) => { let ident = &ty.ident; let bounds = &ty.bounds; if bounds.is_empty() { quote_spanned!(ty.span()=> #ident : tuix::Node) } else { quote_spanned!(ty.span()=> #ident : #bounds + tuix::Node) } } Lifetime(lf) => quote!(#lf), Const(cst) => quote!(#cst), } }); quote!( #( #res, )* ) }
29.043956
79
0.605373
915a82d351b55f769f031399f949b11e247ce713
24,233
#[doc = "Reader of register SW_PAD_CTL_PAD_GPIO_EMC_02"] pub type R = crate::R<u32, super::SW_PAD_CTL_PAD_GPIO_EMC_02>; #[doc = "Writer for register SW_PAD_CTL_PAD_GPIO_EMC_02"] pub type W = crate::W<u32, super::SW_PAD_CTL_PAD_GPIO_EMC_02>; #[doc = "Register SW_PAD_CTL_PAD_GPIO_EMC_02 `reset()`'s with value 0x10b0"] impl crate::ResetValue for super::SW_PAD_CTL_PAD_GPIO_EMC_02 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x10b0 } } #[doc = "Slew Rate Field\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SRE_A { #[doc = "0: Slow Slew Rate"] SRE_0_SLOW_SLEW_RATE = 0, #[doc = "1: Fast Slew Rate"] SRE_1_FAST_SLEW_RATE = 1, } impl From<SRE_A> for bool { #[inline(always)] fn from(variant: SRE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `SRE`"] pub type SRE_R = crate::R<bool, SRE_A>; impl SRE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SRE_A { match self.bits { false => SRE_A::SRE_0_SLOW_SLEW_RATE, true => SRE_A::SRE_1_FAST_SLEW_RATE, } } #[doc = "Checks if the value of the field is `SRE_0_SLOW_SLEW_RATE`"] #[inline(always)] pub fn is_sre_0_slow_slew_rate(&self) -> bool { *self == SRE_A::SRE_0_SLOW_SLEW_RATE } #[doc = "Checks if the value of the field is `SRE_1_FAST_SLEW_RATE`"] #[inline(always)] pub fn is_sre_1_fast_slew_rate(&self) -> bool { *self == SRE_A::SRE_1_FAST_SLEW_RATE } } #[doc = "Write proxy for field `SRE`"] pub struct SRE_W<'a> { w: &'a mut W, } impl<'a> SRE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SRE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Slow Slew Rate"] #[inline(always)] pub fn sre_0_slow_slew_rate(self) -> &'a mut W { self.variant(SRE_A::SRE_0_SLOW_SLEW_RATE) } #[doc = "Fast Slew Rate"] #[inline(always)] pub fn sre_1_fast_slew_rate(self) -> &'a mut W { self.variant(SRE_A::SRE_1_FAST_SLEW_RATE) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Drive Strength Field\n\nValue on reset: 6"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum DSE_A { #[doc = "0: output driver disabled;"] DSE_0_OUTPUT_DRIVER_DISABLED = 0, #[doc = "1: R0(150 Ohm @ 3.3V, 260 [email protected])"] DSE_1_R0_150_OHM_3_3V_260_OHM_1_8V = 1, #[doc = "2: R0/2"] DSE_2_R0_2 = 2, #[doc = "3: R0/3"] DSE_3_R0_3 = 3, #[doc = "4: R0/4"] DSE_4_R0_4 = 4, #[doc = "5: R0/5"] DSE_5_R0_5 = 5, #[doc = "6: R0/6"] DSE_6_R0_6 = 6, #[doc = "7: R0/7"] DSE_7_R0_7 = 7, } impl From<DSE_A> for u8 { #[inline(always)] fn from(variant: DSE_A) -> Self { variant as _ } } #[doc = "Reader of field `DSE`"] pub type DSE_R = crate::R<u8, DSE_A>; impl DSE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DSE_A { match self.bits { 0 => DSE_A::DSE_0_OUTPUT_DRIVER_DISABLED, 1 => DSE_A::DSE_1_R0_150_OHM_3_3V_260_OHM_1_8V, 2 => DSE_A::DSE_2_R0_2, 3 => DSE_A::DSE_3_R0_3, 4 => DSE_A::DSE_4_R0_4, 5 => DSE_A::DSE_5_R0_5, 6 => DSE_A::DSE_6_R0_6, 7 => DSE_A::DSE_7_R0_7, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `DSE_0_OUTPUT_DRIVER_DISABLED`"] #[inline(always)] pub fn is_dse_0_output_driver_disabled(&self) -> bool { *self == DSE_A::DSE_0_OUTPUT_DRIVER_DISABLED } #[doc = "Checks if the value of the field is `DSE_1_R0_150_OHM_3_3V_260_OHM_1_8V`"] #[inline(always)] pub fn is_dse_1_r0_150_ohm_3_3v_260_ohm_1_8v(&self) -> bool { *self == DSE_A::DSE_1_R0_150_OHM_3_3V_260_OHM_1_8V } #[doc = "Checks if the value of the field is `DSE_2_R0_2`"] #[inline(always)] pub fn is_dse_2_r0_2(&self) -> bool { *self == DSE_A::DSE_2_R0_2 } #[doc = "Checks if the value of the field is `DSE_3_R0_3`"] #[inline(always)] pub fn is_dse_3_r0_3(&self) -> bool { *self == DSE_A::DSE_3_R0_3 } #[doc = "Checks if the value of the field is `DSE_4_R0_4`"] #[inline(always)] pub fn is_dse_4_r0_4(&self) -> bool { *self == DSE_A::DSE_4_R0_4 } #[doc = "Checks if the value of the field is `DSE_5_R0_5`"] #[inline(always)] pub fn is_dse_5_r0_5(&self) -> bool { *self == DSE_A::DSE_5_R0_5 } #[doc = "Checks if the value of the field is `DSE_6_R0_6`"] #[inline(always)] pub fn is_dse_6_r0_6(&self) -> bool { *self == DSE_A::DSE_6_R0_6 } #[doc = "Checks if the value of the field is `DSE_7_R0_7`"] #[inline(always)] pub fn is_dse_7_r0_7(&self) -> bool { *self == DSE_A::DSE_7_R0_7 } } #[doc = "Write proxy for field `DSE`"] pub struct DSE_W<'a> { w: &'a mut W, } impl<'a> DSE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DSE_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "output driver disabled;"] #[inline(always)] pub fn dse_0_output_driver_disabled(self) -> &'a mut W { self.variant(DSE_A::DSE_0_OUTPUT_DRIVER_DISABLED) } #[doc = "R0(150 Ohm @ 3.3V, 260 [email protected])"] #[inline(always)] pub fn dse_1_r0_150_ohm_3_3v_260_ohm_1_8v(self) -> &'a mut W { self.variant(DSE_A::DSE_1_R0_150_OHM_3_3V_260_OHM_1_8V) } #[doc = "R0/2"] #[inline(always)] pub fn dse_2_r0_2(self) -> &'a mut W { self.variant(DSE_A::DSE_2_R0_2) } #[doc = "R0/3"] #[inline(always)] pub fn dse_3_r0_3(self) -> &'a mut W { self.variant(DSE_A::DSE_3_R0_3) } #[doc = "R0/4"] #[inline(always)] pub fn dse_4_r0_4(self) -> &'a mut W { self.variant(DSE_A::DSE_4_R0_4) } #[doc = "R0/5"] #[inline(always)] pub fn dse_5_r0_5(self) -> &'a mut W { self.variant(DSE_A::DSE_5_R0_5) } #[doc = "R0/6"] #[inline(always)] pub fn dse_6_r0_6(self) -> &'a mut W { self.variant(DSE_A::DSE_6_R0_6) } #[doc = "R0/7"] #[inline(always)] pub fn dse_7_r0_7(self) -> &'a mut W { self.variant(DSE_A::DSE_7_R0_7) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 3)) | (((value as u32) & 0x07) << 3); self.w } } #[doc = "Speed Field\n\nValue on reset: 2"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum SPEED_A { #[doc = "0: low(50MHz)"] SPEED_0_LOW_50MHZ = 0, #[doc = "1: medium(100MHz)"] SPEED_1_MEDIUM_100MHZ = 1, #[doc = "2: medium(100MHz)"] SPEED_2_MEDIUM_100MHZ = 2, #[doc = "3: max(200MHz)"] SPEED_3_MAX_200MHZ = 3, } impl From<SPEED_A> for u8 { #[inline(always)] fn from(variant: SPEED_A) -> Self { variant as _ } } #[doc = "Reader of field `SPEED`"] pub type SPEED_R = crate::R<u8, SPEED_A>; impl SPEED_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SPEED_A { match self.bits { 0 => SPEED_A::SPEED_0_LOW_50MHZ, 1 => SPEED_A::SPEED_1_MEDIUM_100MHZ, 2 => SPEED_A::SPEED_2_MEDIUM_100MHZ, 3 => SPEED_A::SPEED_3_MAX_200MHZ, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `SPEED_0_LOW_50MHZ`"] #[inline(always)] pub fn is_speed_0_low_50mhz(&self) -> bool { *self == SPEED_A::SPEED_0_LOW_50MHZ } #[doc = "Checks if the value of the field is `SPEED_1_MEDIUM_100MHZ`"] #[inline(always)] pub fn is_speed_1_medium_100mhz(&self) -> bool { *self == SPEED_A::SPEED_1_MEDIUM_100MHZ } #[doc = "Checks if the value of the field is `SPEED_2_MEDIUM_100MHZ`"] #[inline(always)] pub fn is_speed_2_medium_100mhz(&self) -> bool { *self == SPEED_A::SPEED_2_MEDIUM_100MHZ } #[doc = "Checks if the value of the field is `SPEED_3_MAX_200MHZ`"] #[inline(always)] pub fn is_speed_3_max_200mhz(&self) -> bool { *self == SPEED_A::SPEED_3_MAX_200MHZ } } #[doc = "Write proxy for field `SPEED`"] pub struct SPEED_W<'a> { w: &'a mut W, } impl<'a> SPEED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SPEED_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "low(50MHz)"] #[inline(always)] pub fn speed_0_low_50mhz(self) -> &'a mut W { self.variant(SPEED_A::SPEED_0_LOW_50MHZ) } #[doc = "medium(100MHz)"] #[inline(always)] pub fn speed_1_medium_100mhz(self) -> &'a mut W { self.variant(SPEED_A::SPEED_1_MEDIUM_100MHZ) } #[doc = "medium(100MHz)"] #[inline(always)] pub fn speed_2_medium_100mhz(self) -> &'a mut W { self.variant(SPEED_A::SPEED_2_MEDIUM_100MHZ) } #[doc = "max(200MHz)"] #[inline(always)] pub fn speed_3_max_200mhz(self) -> &'a mut W { self.variant(SPEED_A::SPEED_3_MAX_200MHZ) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u32) & 0x03) << 6); self.w } } #[doc = "Open Drain Enable Field\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ODE_A { #[doc = "0: Open Drain Disabled"] ODE_0_OPEN_DRAIN_DISABLED = 0, #[doc = "1: Open Drain Enabled"] ODE_1_OPEN_DRAIN_ENABLED = 1, } impl From<ODE_A> for bool { #[inline(always)] fn from(variant: ODE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `ODE`"] pub type ODE_R = crate::R<bool, ODE_A>; impl ODE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ODE_A { match self.bits { false => ODE_A::ODE_0_OPEN_DRAIN_DISABLED, true => ODE_A::ODE_1_OPEN_DRAIN_ENABLED, } } #[doc = "Checks if the value of the field is `ODE_0_OPEN_DRAIN_DISABLED`"] #[inline(always)] pub fn is_ode_0_open_drain_disabled(&self) -> bool { *self == ODE_A::ODE_0_OPEN_DRAIN_DISABLED } #[doc = "Checks if the value of the field is `ODE_1_OPEN_DRAIN_ENABLED`"] #[inline(always)] pub fn is_ode_1_open_drain_enabled(&self) -> bool { *self == ODE_A::ODE_1_OPEN_DRAIN_ENABLED } } #[doc = "Write proxy for field `ODE`"] pub struct ODE_W<'a> { w: &'a mut W, } impl<'a> ODE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ODE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Open Drain Disabled"] #[inline(always)] pub fn ode_0_open_drain_disabled(self) -> &'a mut W { self.variant(ODE_A::ODE_0_OPEN_DRAIN_DISABLED) } #[doc = "Open Drain Enabled"] #[inline(always)] pub fn ode_1_open_drain_enabled(self) -> &'a mut W { self.variant(ODE_A::ODE_1_OPEN_DRAIN_ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11); self.w } } #[doc = "Pull / Keep Enable Field\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PKE_A { #[doc = "0: Pull/Keeper Disabled"] PKE_0_PULL_KEEPER_DISABLED = 0, #[doc = "1: Pull/Keeper Enabled"] PKE_1_PULL_KEEPER_ENABLED = 1, } impl From<PKE_A> for bool { #[inline(always)] fn from(variant: PKE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PKE`"] pub type PKE_R = crate::R<bool, PKE_A>; impl PKE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PKE_A { match self.bits { false => PKE_A::PKE_0_PULL_KEEPER_DISABLED, true => PKE_A::PKE_1_PULL_KEEPER_ENABLED, } } #[doc = "Checks if the value of the field is `PKE_0_PULL_KEEPER_DISABLED`"] #[inline(always)] pub fn is_pke_0_pull_keeper_disabled(&self) -> bool { *self == PKE_A::PKE_0_PULL_KEEPER_DISABLED } #[doc = "Checks if the value of the field is `PKE_1_PULL_KEEPER_ENABLED`"] #[inline(always)] pub fn is_pke_1_pull_keeper_enabled(&self) -> bool { *self == PKE_A::PKE_1_PULL_KEEPER_ENABLED } } #[doc = "Write proxy for field `PKE`"] pub struct PKE_W<'a> { w: &'a mut W, } impl<'a> PKE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PKE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Pull/Keeper Disabled"] #[inline(always)] pub fn pke_0_pull_keeper_disabled(self) -> &'a mut W { self.variant(PKE_A::PKE_0_PULL_KEEPER_DISABLED) } #[doc = "Pull/Keeper Enabled"] #[inline(always)] pub fn pke_1_pull_keeper_enabled(self) -> &'a mut W { self.variant(PKE_A::PKE_1_PULL_KEEPER_ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12); self.w } } #[doc = "Pull / Keep Select Field\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PUE_A { #[doc = "0: Keeper"] PUE_0_KEEPER = 0, #[doc = "1: Pull"] PUE_1_PULL = 1, } impl From<PUE_A> for bool { #[inline(always)] fn from(variant: PUE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PUE`"] pub type PUE_R = crate::R<bool, PUE_A>; impl PUE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PUE_A { match self.bits { false => PUE_A::PUE_0_KEEPER, true => PUE_A::PUE_1_PULL, } } #[doc = "Checks if the value of the field is `PUE_0_KEEPER`"] #[inline(always)] pub fn is_pue_0_keeper(&self) -> bool { *self == PUE_A::PUE_0_KEEPER } #[doc = "Checks if the value of the field is `PUE_1_PULL`"] #[inline(always)] pub fn is_pue_1_pull(&self) -> bool { *self == PUE_A::PUE_1_PULL } } #[doc = "Write proxy for field `PUE`"] pub struct PUE_W<'a> { w: &'a mut W, } impl<'a> PUE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PUE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Keeper"] #[inline(always)] pub fn pue_0_keeper(self) -> &'a mut W { self.variant(PUE_A::PUE_0_KEEPER) } #[doc = "Pull"] #[inline(always)] pub fn pue_1_pull(self) -> &'a mut W { self.variant(PUE_A::PUE_1_PULL) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13); self.w } } #[doc = "Pull Up / Down Config. Field\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum PUS_A { #[doc = "0: 100K Ohm Pull Down"] PUS_0_100K_OHM_PULL_DOWN = 0, #[doc = "1: 47K Ohm Pull Up"] PUS_1_47K_OHM_PULL_UP = 1, #[doc = "2: 100K Ohm Pull Up"] PUS_2_100K_OHM_PULL_UP = 2, #[doc = "3: 22K Ohm Pull Up"] PUS_3_22K_OHM_PULL_UP = 3, } impl From<PUS_A> for u8 { #[inline(always)] fn from(variant: PUS_A) -> Self { variant as _ } } #[doc = "Reader of field `PUS`"] pub type PUS_R = crate::R<u8, PUS_A>; impl PUS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PUS_A { match self.bits { 0 => PUS_A::PUS_0_100K_OHM_PULL_DOWN, 1 => PUS_A::PUS_1_47K_OHM_PULL_UP, 2 => PUS_A::PUS_2_100K_OHM_PULL_UP, 3 => PUS_A::PUS_3_22K_OHM_PULL_UP, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `PUS_0_100K_OHM_PULL_DOWN`"] #[inline(always)] pub fn is_pus_0_100k_ohm_pull_down(&self) -> bool { *self == PUS_A::PUS_0_100K_OHM_PULL_DOWN } #[doc = "Checks if the value of the field is `PUS_1_47K_OHM_PULL_UP`"] #[inline(always)] pub fn is_pus_1_47k_ohm_pull_up(&self) -> bool { *self == PUS_A::PUS_1_47K_OHM_PULL_UP } #[doc = "Checks if the value of the field is `PUS_2_100K_OHM_PULL_UP`"] #[inline(always)] pub fn is_pus_2_100k_ohm_pull_up(&self) -> bool { *self == PUS_A::PUS_2_100K_OHM_PULL_UP } #[doc = "Checks if the value of the field is `PUS_3_22K_OHM_PULL_UP`"] #[inline(always)] pub fn is_pus_3_22k_ohm_pull_up(&self) -> bool { *self == PUS_A::PUS_3_22K_OHM_PULL_UP } } #[doc = "Write proxy for field `PUS`"] pub struct PUS_W<'a> { w: &'a mut W, } impl<'a> PUS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PUS_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "100K Ohm Pull Down"] #[inline(always)] pub fn pus_0_100k_ohm_pull_down(self) -> &'a mut W { self.variant(PUS_A::PUS_0_100K_OHM_PULL_DOWN) } #[doc = "47K Ohm Pull Up"] #[inline(always)] pub fn pus_1_47k_ohm_pull_up(self) -> &'a mut W { self.variant(PUS_A::PUS_1_47K_OHM_PULL_UP) } #[doc = "100K Ohm Pull Up"] #[inline(always)] pub fn pus_2_100k_ohm_pull_up(self) -> &'a mut W { self.variant(PUS_A::PUS_2_100K_OHM_PULL_UP) } #[doc = "22K Ohm Pull Up"] #[inline(always)] pub fn pus_3_22k_ohm_pull_up(self) -> &'a mut W { self.variant(PUS_A::PUS_3_22K_OHM_PULL_UP) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 14)) | (((value as u32) & 0x03) << 14); self.w } } #[doc = "Hyst. Enable Field\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum HYS_A { #[doc = "0: Hysteresis Disabled"] HYS_0_HYSTERESIS_DISABLED = 0, #[doc = "1: Hysteresis Enabled"] HYS_1_HYSTERESIS_ENABLED = 1, } impl From<HYS_A> for bool { #[inline(always)] fn from(variant: HYS_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `HYS`"] pub type HYS_R = crate::R<bool, HYS_A>; impl HYS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HYS_A { match self.bits { false => HYS_A::HYS_0_HYSTERESIS_DISABLED, true => HYS_A::HYS_1_HYSTERESIS_ENABLED, } } #[doc = "Checks if the value of the field is `HYS_0_HYSTERESIS_DISABLED`"] #[inline(always)] pub fn is_hys_0_hysteresis_disabled(&self) -> bool { *self == HYS_A::HYS_0_HYSTERESIS_DISABLED } #[doc = "Checks if the value of the field is `HYS_1_HYSTERESIS_ENABLED`"] #[inline(always)] pub fn is_hys_1_hysteresis_enabled(&self) -> bool { *self == HYS_A::HYS_1_HYSTERESIS_ENABLED } } #[doc = "Write proxy for field `HYS`"] pub struct HYS_W<'a> { w: &'a mut W, } impl<'a> HYS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: HYS_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Hysteresis Disabled"] #[inline(always)] pub fn hys_0_hysteresis_disabled(self) -> &'a mut W { self.variant(HYS_A::HYS_0_HYSTERESIS_DISABLED) } #[doc = "Hysteresis Enabled"] #[inline(always)] pub fn hys_1_hysteresis_enabled(self) -> &'a mut W { self.variant(HYS_A::HYS_1_HYSTERESIS_ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } impl R { #[doc = "Bit 0 - Slew Rate Field"] #[inline(always)] pub fn sre(&self) -> SRE_R { SRE_R::new((self.bits & 0x01) != 0) } #[doc = "Bits 3:5 - Drive Strength Field"] #[inline(always)] pub fn dse(&self) -> DSE_R { DSE_R::new(((self.bits >> 3) & 0x07) as u8) } #[doc = "Bits 6:7 - Speed Field"] #[inline(always)] pub fn speed(&self) -> SPEED_R { SPEED_R::new(((self.bits >> 6) & 0x03) as u8) } #[doc = "Bit 11 - Open Drain Enable Field"] #[inline(always)] pub fn ode(&self) -> ODE_R { ODE_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 12 - Pull / Keep Enable Field"] #[inline(always)] pub fn pke(&self) -> PKE_R { PKE_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Pull / Keep Select Field"] #[inline(always)] pub fn pue(&self) -> PUE_R { PUE_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bits 14:15 - Pull Up / Down Config. Field"] #[inline(always)] pub fn pus(&self) -> PUS_R { PUS_R::new(((self.bits >> 14) & 0x03) as u8) } #[doc = "Bit 16 - Hyst. Enable Field"] #[inline(always)] pub fn hys(&self) -> HYS_R { HYS_R::new(((self.bits >> 16) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Slew Rate Field"] #[inline(always)] pub fn sre(&mut self) -> SRE_W { SRE_W { w: self } } #[doc = "Bits 3:5 - Drive Strength Field"] #[inline(always)] pub fn dse(&mut self) -> DSE_W { DSE_W { w: self } } #[doc = "Bits 6:7 - Speed Field"] #[inline(always)] pub fn speed(&mut self) -> SPEED_W { SPEED_W { w: self } } #[doc = "Bit 11 - Open Drain Enable Field"] #[inline(always)] pub fn ode(&mut self) -> ODE_W { ODE_W { w: self } } #[doc = "Bit 12 - Pull / Keep Enable Field"] #[inline(always)] pub fn pke(&mut self) -> PKE_W { PKE_W { w: self } } #[doc = "Bit 13 - Pull / Keep Select Field"] #[inline(always)] pub fn pue(&mut self) -> PUE_W { PUE_W { w: self } } #[doc = "Bits 14:15 - Pull Up / Down Config. Field"] #[inline(always)] pub fn pus(&mut self) -> PUS_W { PUS_W { w: self } } #[doc = "Bit 16 - Hyst. Enable Field"] #[inline(always)] pub fn hys(&mut self) -> HYS_W { HYS_W { w: self } } }
30.178082
87
0.570132
f52b8bfcde2e9694e68f6b056db03fa94222e5b7
42,539
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::atomic_future::AtomicFuture; use crossbeam::queue::SegQueue; use fuchsia_zircon::{self as zx, AsHandleRef}; use futures::future::{self, FutureObj, LocalFutureObj}; use futures::task::{waker_ref, ArcWake, AtomicWaker, Spawn, SpawnError}; use futures::{Future, FutureExt, Poll}; use parking_lot::{Condvar, Mutex}; use pin_utils::pin_mut; use slab::Slab; use std::cell::RefCell; use std::collections::BinaryHeap; use std::marker::Unpin; use std::ops::Deref; use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU32, AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; use std::task::{Context, Waker}; use std::{cmp, fmt, mem, ops, thread, u64, usize}; const EMPTY_WAKEUP_ID: u64 = u64::MAX; const TASK_READY_WAKEUP_ID: u64 = u64::MAX - 1; /// Spawn a new task to be run on the global executor. /// /// Tasks spawned using this method must be threadsafe (implement the `Send` trait), /// as they may be run on either a singlethreaded or multithreaded executor. pub fn spawn<F>(future: F) where F: Future<Output = ()> + Send + 'static, { Inner::spawn(&EHandle::local().inner, FutureObj::new(Box::new(future))); } /// Spawn a new task to be run on the global executor. /// /// This is similar to the `spawn` function, but tasks spawned using this method /// do not have to be threadsafe (implement the `Send` trait). In return, this method /// requires that the current executor never be run in a multithreaded mode-- only /// `run_singlethreaded` can be used. pub fn spawn_local<F>(future: F) where F: Future<Output = ()> + 'static, { Inner::spawn_local(&EHandle::local().inner, LocalFutureObj::new(Box::new(future))); } /// A time relative to the executor's clock. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[repr(transparent)] pub struct Time(zx::Time); impl Time { /// Return the current time according to the global executor. /// /// This function requires that an executor has been set up. pub fn now() -> Self { EHandle::local().inner.now() } /// Compute a deadline for the time in the future that is the /// given `Duration` away. Similarly to `zx::Time::after`, /// saturates on overflow instead of wrapping around. /// /// This function requires that an executor has been set up. pub fn after(duration: zx::Duration) -> Self { Self(zx::Time::from_nanos(Self::now().0.into_nanos().saturating_add(duration.into_nanos()))) } /// Convert from `zx::Time`. This only makes sense if the time is /// taken from the same source (for the real clock, this is /// `zx::ClockId::Monotonic`). pub fn from_zx(t: zx::Time) -> Self { Time(t) } /// Convert into `zx::Time`. For the real clock, this will be a /// monotonic time. pub fn into_zx(self) -> zx::Time { self.0 } /// Convert from nanoseconds. pub fn from_nanos(nanos: i64) -> Self { Self::from_zx(zx::Time::from_nanos(nanos)) } /// Convert to nanoseconds. pub fn into_nanos(self) -> i64 { self.0.into_nanos() } /// The maximum time. pub const INFINITE: Time = Time(zx::Time::INFINITE); /// The minimum time. pub const INFINITE_PAST: Time = Time(zx::Time::INFINITE_PAST); } impl From<zx::Time> for Time { fn from(t: zx::Time) -> Time { Time(t) } } impl From<Time> for zx::Time { fn from(t: Time) -> zx::Time { t.0 } } impl ops::Add<zx::Duration> for Time { type Output = Time; fn add(self, d: zx::Duration) -> Time { Time(self.0 + d) } } impl ops::Add<Time> for zx::Duration { type Output = Time; fn add(self, t: Time) -> Time { Time(self + t.0) } } impl ops::Sub<zx::Duration> for Time { type Output = Time; fn sub(self, d: zx::Duration) -> Time { Time(self.0 - d) } } impl ops::Sub<Time> for Time { type Output = zx::Duration; fn sub(self, t: Time) -> zx::Duration { self.0 - t.0 } } impl ops::AddAssign<zx::Duration> for Time { fn add_assign(&mut self, d: zx::Duration) { self.0.add_assign(d) } } impl ops::SubAssign<zx::Duration> for Time { fn sub_assign(&mut self, d: zx::Duration) { self.0.sub_assign(d) } } /// An extension trait to provide `after_now` on `zx::Duration`. pub trait DurationExt { /// Return a `Time` which is a `Duration` after the current time. /// `duration.after_now()` is equivalent to `Time::after(duration)`. /// /// This method requires that an executor has been set up. fn after_now(self) -> Time; } impl DurationExt for zx::Duration { fn after_now(self) -> Time { Time::after(self) } } /// A trait for handling the arrival of a packet on a `zx::Port`. /// /// This trait should be implemented by users who wish to write their own /// types which receive asynchronous notifications from a `zx::Port`. /// Implementors of this trait generally contain a `futures::task::AtomicWaker` which /// is used to wake up the task which can make progress due to the arrival of /// the packet. /// /// `PacketReceiver`s should be registered with a `Core` using the /// `register_receiver` method on `Core`, `Handle`, or `Remote`. /// Upon registration, users will receive a `ReceiverRegistration` /// which provides `key` and `port` methods. These methods can be used to wait on /// asynchronous signals. /// /// Note that `PacketReceiver`s may receive false notifications intended for a /// previous receiver, and should handle these gracefully. pub trait PacketReceiver: Send + Sync + 'static { /// Receive a packet when one arrives. fn receive_packet(&self, packet: zx::Packet); } pub(crate) fn need_signal( cx: &mut Context<'_>, task: &AtomicWaker, atomic_signals: &AtomicU32, signal: zx::Signals, clear_closed: bool, handle: zx::HandleRef<'_>, port: &zx::Port, key: u64, ) -> Result<(), zx::Status> { const OBJECT_PEER_CLOSED: zx::Signals = zx::Signals::OBJECT_PEER_CLOSED; task.register(cx.waker()); let mut clear_signals = signal; if clear_closed { clear_signals |= OBJECT_PEER_CLOSED; } let old = zx::Signals::from_bits_truncate( atomic_signals.fetch_and(!clear_signals.bits(), Ordering::SeqCst), ); // We only need to schedule a new packet if one isn't already scheduled. // If the bits were already false, a packet was already scheduled. let was_signal = old.contains(signal); let was_closed = old.contains(OBJECT_PEER_CLOSED); if was_closed || was_signal { let mut signals_to_schedule = zx::Signals::empty(); if was_signal { signals_to_schedule |= signal; } if clear_closed && was_closed { signals_to_schedule |= OBJECT_PEER_CLOSED }; schedule_packet(handle, port, key, signals_to_schedule)?; } if was_closed && !clear_closed { // We just missed a channel close-- go around again. cx.waker().wake_by_ref(); } Ok(()) } pub(crate) fn schedule_packet( handle: zx::HandleRef<'_>, port: &zx::Port, key: u64, signals: zx::Signals, ) -> Result<(), zx::Status> { handle.wait_async_handle(port, key, signals, zx::WaitAsyncOpts::Once) } /// A registration of a `PacketReceiver`. /// When dropped, it will automatically deregister the `PacketReceiver`. // NOTE: purposefully does not implement `Clone`. #[derive(Debug)] pub struct ReceiverRegistration<T: PacketReceiver> { receiver: Arc<T>, ehandle: EHandle, key: u64, } impl<T> ReceiverRegistration<T> where T: PacketReceiver, { /// The key with which `Packet`s destined for this receiver should be sent on the `zx::Port`. pub fn key(&self) -> u64 { self.key } /// The internal `PacketReceiver`. pub fn receiver(&self) -> &T { &*self.receiver } /// The `zx::Port` on which packets destined for this `PacketReceiver` should be queued. pub fn port(&self) -> &zx::Port { self.ehandle.port() } } impl<T: PacketReceiver> Deref for ReceiverRegistration<T> { type Target = T; fn deref(&self) -> &Self::Target { self.receiver() } } impl<T> Drop for ReceiverRegistration<T> where T: PacketReceiver, { fn drop(&mut self) { self.ehandle.deregister_receiver(self.key); } } /// A port-based executor for Fuchsia OS. // NOTE: intentionally does not implement `Clone`. pub struct Executor { inner: Arc<Inner>, // A packet that has been dequeued but not processed. This is used by `run_one_step`. next_packet: Option<zx::Packet>, } impl fmt::Debug for Executor { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Executor").field("port", &self.inner.port).finish() } } type TimerHeap = BinaryHeap<TimeWaker>; thread_local!( static EXECUTOR: RefCell<Option<(Arc<Inner>, TimerHeap)>> = RefCell::new(None) ); fn with_local_timer_heap<F, R>(f: F) -> R where F: FnOnce(&mut TimerHeap) -> R, { EXECUTOR.with(|e| { (f)(&mut e .borrow_mut() .as_mut() .expect("can't get timer heap before fuchsia_async::Executor is initialized") .1) }) } impl Executor { fn new_with_time(time: ExecutorTime) -> Result<Self, zx::Status> { let executor = Executor { inner: Arc::new(Inner { port: zx::Port::create()?, done: AtomicBool::new(false), threadiness: Threadiness::default(), threads: Mutex::new(Vec::new()), receivers: Mutex::new(Slab::new()), ready_tasks: SegQueue::new(), time: time, }), next_packet: None, }; executor.ehandle().set_local(TimerHeap::new()); Ok(executor) } /// Create a new executor running with actual time. pub fn new() -> Result<Self, zx::Status> { Self::new_with_time(ExecutorTime::RealTime) } /// Create a new executor running with fake time. pub fn new_with_fake_time() -> Result<Self, zx::Status> { Self::new_with_time(ExecutorTime::FakeTime(AtomicI64::new( Time::INFINITE_PAST.into_nanos(), ))) } /// Return the current time according to the executor. pub fn now(&self) -> Time { self.inner.now() } /// Set the fake time to a given value. pub fn set_fake_time(&self, t: Time) { self.inner.set_fake_time(t) } /// Return a handle to the executor. pub fn ehandle(&self) -> EHandle { EHandle { inner: self.inner.clone() } } fn singlethreaded_main_task_wake(&self) -> Waker { Arc::new(SingleThreadedMainTaskWake(self.inner.clone())).into_waker() } /// Run a single future to completion on a single thread. // Takes `&mut self` to ensure that only one thread-manager is running at a time. pub fn run_singlethreaded<F>(&mut self, main_future: F) -> F::Output where F: Future, { self.inner .require_real_time() .expect("Error: called `run_singlethreaded` on an executor using fake time"); if let Some(_) = self.next_packet { panic!("Error: called `run_singlethreaded` on an executor with a packet waiting"); } pin_mut!(main_future); let waker = self.singlethreaded_main_task_wake(); let main_cx = &mut Context::from_waker(&waker); let mut res = main_future.as_mut().poll(main_cx); loop { if let Poll::Ready(res) = res { return res; } let packet = with_local_timer_heap(|timer_heap| { let deadline = next_deadline(timer_heap).map(|t| t.time).unwrap_or(Time::INFINITE); // into_zx: we are using real time, so the time is a monotonic time. match self.inner.port.wait(deadline.into_zx()) { Ok(packet) => Some(packet), Err(zx::Status::TIMED_OUT) => { let time_waker = timer_heap.pop().unwrap(); time_waker.wake(); None } Err(status) => { panic!("Error calling port wait: {:?}", status); } } }); if let Some(packet) = packet { match packet.key() { EMPTY_WAKEUP_ID => { res = main_future.as_mut().poll(main_cx); } TASK_READY_WAKEUP_ID => self.inner.poll_ready_tasks(), receiver_key => { self.inner.deliver_packet(receiver_key as usize, packet); } } } } } /// PollResult the future. If it is not ready, dispatch available packets and possibly try again. /// Timers will not fire. Never blocks. /// /// This function is for testing. DO NOT use this function in tests or applications that /// involve any interaction with other threads or processes, as those interactions /// may become stalled waiting for signals from "the outside world" which is beyond /// the knowledge of the executor. /// /// Unpin: this function requires all futures to be `Unpin`able, so any `!Unpin` /// futures must first be pinned using the `pin_mut!` macro from the `pin-utils` crate. pub fn run_until_stalled<F>(&mut self, main_future: &mut F) -> Poll<F::Output> where F: Future + Unpin, { self.wake_main_future(); while let NextStep::NextPacket = self.next_step(/*fire_timers:*/ false) { // Will not fail, because NextPacket means there is a // packet ready to be processed. let res = self.consume_packet(main_future); if res.is_ready() { return res; } } Poll::Pending } /// Schedule the main future for being woken up. This is useful in conjunction with /// `run_one_step`. pub fn wake_main_future(&mut self) { self.inner.notify_empty() } /// Run one iteration of the loop: dispatch the first available packet or timer. Returns `None` /// if nothing has been dispatched, `Some(Poll::Pending)` if execution made progress but the /// main future has not completed, and `Some(Poll::Ready(_))` if the main future has completed /// at this step. /// /// For the main future to run, `wake_main_future` needs to have been called first. /// This will fire timers that are in the past, but will not advance the executor's time. /// /// Unpin: this function requires all futures to be `Unpin`able, so any `!Unpin` /// futures must first be pinned using the `pin_mut!` macro from the `pin-utils` crate. /// /// This function is meant to be used for reproducible integration tests: multiple async /// processes can be run in a controlled way, dispatching events one at a time and randomly /// (but reproducibly) choosing which process gets to advance at each step. pub fn run_one_step<F>(&mut self, main_future: &mut F) -> Option<Poll<F::Output>> where F: Future + Unpin, { match self.next_step(/*fire_timers:*/ true) { NextStep::WaitUntil(_) => None, NextStep::NextPacket => { // Will not fail because NextPacket means there is a // packet ready to be processed. Some(self.consume_packet(main_future)) } NextStep::NextTimer => { let next_timer = with_local_timer_heap(|timer_heap| { // unwrap: will not fail because NextTimer // guarantees there is a timer in the heap. timer_heap.pop().unwrap() }); next_timer.wake(); Some(Poll::Pending) } } } /// Consumes a packet that has already been dequeued from the port. /// This must only be called when there is a packet available. fn consume_packet<F>(&mut self, main_future: &mut F) -> Poll<F::Output> where F: Future + Unpin, { let packet = self.next_packet.take().expect("consume_packet called but no packet available"); match packet.key() { EMPTY_WAKEUP_ID => self.poll_main_future(main_future), TASK_READY_WAKEUP_ID => { if let Some(task) = self.inner.ready_tasks.try_pop() { let lw = waker_ref(&task); task.future.try_poll(&mut Context::from_waker(&lw)); }; Poll::Pending } receiver_key => { self.inner.deliver_packet(receiver_key as usize, packet); Poll::Pending } } } fn poll_main_future<F>(&mut self, main_future: &mut F) -> Poll<F::Output> where F: Future + Unpin, { let waker = self.singlethreaded_main_task_wake(); let main_cx = &mut Context::from_waker(&waker); main_future.poll_unpin(main_cx) } fn next_step(&mut self, fire_timers: bool) -> NextStep { // If a packet is queued from a previous call to next_step, it must be executed first. if let Some(_) = self.next_packet { return NextStep::NextPacket; } // If we are past a deadline, run the corresponding timer. let next_deadline = with_local_timer_heap(|timer_heap| { next_deadline(timer_heap).map(|t| t.time).unwrap_or(Time::INFINITE) }); if fire_timers && next_deadline <= self.inner.now() { NextStep::NextTimer } else { // Try to unqueue a packet from the port. match self.inner.port.wait(zx::Time::INFINITE_PAST) { Ok(packet) => { self.next_packet = Some(packet); NextStep::NextPacket } Err(zx::Status::TIMED_OUT) => NextStep::WaitUntil(next_deadline), Err(status) => { panic!("Error calling port wait: {:?}", status); } } } } /// Return `Ready` if the executor has work to do, or `Waiting(next_deadline)` if there will be /// no work to do before `next_deadline` or an external event. /// /// If this returns `Ready`, `run_one_step` will return `Some(_)`. If there is no pending packet /// or timer, `Waiting(Time::INFINITE)` is returned. pub fn is_waiting(&mut self) -> WaitState { match self.next_step(/*fire_timers:*/ true) { NextStep::NextPacket | NextStep::NextTimer => WaitState::Ready, NextStep::WaitUntil(t) => WaitState::Waiting(t), } } /// Wake all tasks waiting for expired timers, and return `true` if any task was woken. /// /// This is intended for use in test code in conjunction with fake time. pub fn wake_expired_timers(&mut self) -> bool { let now = self.now(); with_local_timer_heap(|timer_heap| { let mut ret = false; while let Some(waker) = next_deadline(timer_heap).filter(|waker| waker.time <= now) { waker.wake(); timer_heap.pop(); ret = true; } ret }) } /// Wake up the next task waiting for a timer, if any, and return the time for which the /// timer was scheduled. /// /// This is intended for use in test code in conjunction with `run_until_stalled`. /// For example, here is how one could test that the Timer future fires after the given /// timeout: /// /// let deadline = 5.seconds().after_now(); /// let mut future = Timer::<Never>::new(deadline); /// assert_eq!(Poll::Pending, exec.run_until_stalled(&mut future)); /// assert_eq!(Some(deadline), exec.wake_next_timer()); /// assert_eq!(Poll::Ready(()), exec.run_until_stalled(&mut future)); pub fn wake_next_timer(&mut self) -> Option<Time> { with_local_timer_heap(|timer_heap| { let deadline = next_deadline(timer_heap).map(|waker| { waker.wake(); waker.time }); if deadline.is_some() { timer_heap.pop(); } deadline }) } /// Run a single future to completion using multiple threads. // Takes `&mut self` to ensure that only one thread-manager is running at a time. pub fn run<F>(&mut self, future: F, num_threads: usize) -> F::Output where F: Future + Send + 'static, F::Output: Send + 'static, { self.inner.require_real_time().expect("Error: called `run` on an executor using fake time"); self.inner.threadiness.require_multithreaded().expect( "Error: called `run` on executor after using `spawn_local`. \ Use `run_singlethreaded` instead.", ); if let Some(_) = self.next_packet { panic!("Error: called `run_singlethreaded` on an executor with a packet waiting"); } let pair = Arc::new((Mutex::new(None), Condvar::new())); let pair2 = pair.clone(); // Spawn a future which will set the result upon completion. Inner::spawn( &self.inner, FutureObj::new(Box::new(future.then(move |fut_result| { let (lock, cvar) = &*pair2; let mut result = lock.lock(); *result = Some(fut_result); cvar.notify_one(); future::ready(()) }))), ); // Start worker threads, handing off timers from the current thread. self.inner.done.store(false, Ordering::SeqCst); with_local_timer_heap(|timer_heap| { let timer_heap = mem::replace(timer_heap, TimerHeap::new()); self.create_worker_threads(num_threads, Some(timer_heap)); }); // Wait until the signal the future has completed. let (lock, cvar) = &*pair; let mut result = lock.lock(); while result.is_none() { cvar.wait(&mut result); } // Spin down worker threads self.inner.done.store(true, Ordering::SeqCst); self.join_all(); // Unwrap is fine because of the check to `is_none` above. result.take().unwrap() } /// Add `num_workers` worker threads to the executor's thread pool. /// `timers`: timers from the "master" thread which would otherwise be lost. fn create_worker_threads(&self, num_workers: usize, mut timers: Option<TimerHeap>) { let mut threads = self.inner.threads.lock(); for _ in 0..num_workers { threads.push(self.new_worker(timers.take())); } } fn join_all(&self) { let mut threads = self.inner.threads.lock(); // Send a user packet to wake up all the threads for _thread in threads.iter() { self.inner.notify_empty(); } // Join the worker threads for thread in threads.drain(..) { thread.join().expect("Couldn't join worker thread."); } } fn new_worker(&self, timers: Option<TimerHeap>) -> thread::JoinHandle<()> { let inner = self.inner.clone(); thread::spawn(move || Self::worker_lifecycle(inner, timers)) } fn worker_lifecycle(inner: Arc<Inner>, timers: Option<TimerHeap>) { let executor: EHandle = EHandle { inner: inner.clone() }; executor.set_local(timers.unwrap_or(TimerHeap::new())); loop { if inner.done.load(Ordering::SeqCst) { EHandle::rm_local(); return; } let packet = with_local_timer_heap(|timer_heap| { let deadline = next_deadline(timer_heap).map(|t| t.time).unwrap_or(Time::INFINITE); // into_zx: we are using real time, so the time is a monotonic time. match inner.port.wait(deadline.into_zx()) { Ok(packet) => Some(packet), Err(zx::Status::TIMED_OUT) => { let time_waker = timer_heap.pop().unwrap(); time_waker.wake(); None } Err(status) => { panic!("Error calling port wait: {:?}", status); } } }); if let Some(packet) = packet { match packet.key() { EMPTY_WAKEUP_ID => {} TASK_READY_WAKEUP_ID => inner.poll_ready_tasks(), receiver_key => { inner.deliver_packet(receiver_key as usize, packet); } } } } } } enum NextStep { WaitUntil(Time), NextPacket, NextTimer, } /// Indicates whether the executor can run, or is stuck waiting. #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum WaitState { /// The executor can run immediately. Ready, /// The executor will wait for the given time or an external event. Waiting(Time), } fn next_deadline(heap: &mut TimerHeap) -> Option<&TimeWaker> { while is_defunct_timer(heap.peek()) { heap.pop(); } heap.peek() } fn is_defunct_timer(timer: Option<&TimeWaker>) -> bool { match timer { None => false, Some(timer) => timer.waker_and_bool.upgrade().is_none(), } } // Since there are no other threads running, we don't have to use the EMPTY_WAKEUP_ID, // so instead we save it for use as the main task wakeup id. struct SingleThreadedMainTaskWake(Arc<Inner>); impl ArcWake for SingleThreadedMainTaskWake { fn wake_by_ref(arc_self: &Arc<Self>) { arc_self.0.notify_empty(); } } impl Drop for Executor { fn drop(&mut self) { // Done flag must be set before dropping packet receivers // so that future receivers that attempt to deregister themselves // know that it's okay if their entries are already missing. self.inner.done.store(true, Ordering::SeqCst); // Wake the threads so they can kill themselves. self.join_all(); // Drop all of the packet receivers self.inner.receivers.lock().clear(); // Drop all of the uncompleted tasks while let Some(_) = self.inner.ready_tasks.try_pop() {} // Remove the thread-local executor set in `new`. EHandle::rm_local(); } } /// A handle to an executor. #[derive(Clone)] pub struct EHandle { inner: Arc<Inner>, } impl fmt::Debug for EHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("EHandle").field("port", &self.inner.port).finish() } } impl Spawn for EHandle { fn spawn_obj(&mut self, f: FutureObj<'static, ()>) -> Result<(), SpawnError> { <&EHandle>::spawn_obj(&mut &*self, f) } } impl<'a> Spawn for &'a EHandle { fn spawn_obj(&mut self, f: FutureObj<'static, ()>) -> Result<(), SpawnError> { Inner::spawn(&self.inner, f); Ok(()) } } impl EHandle { /// Returns the thread-local executor. pub fn local() -> Self { let inner = EXECUTOR .with(|e| e.borrow().as_ref().map(|x| x.0.clone())) .expect("Fuchsia Executor must be created first"); EHandle { inner } } fn set_local(self, timers: TimerHeap) { let inner = self.inner.clone(); EXECUTOR.with(|e| { let mut e = e.borrow_mut(); assert!(e.is_none(), "Cannot create multiple Fuchsia Executors"); *e = Some((inner, timers)); }); } fn rm_local() { EXECUTOR.with(|e| *e.borrow_mut() = None); } /// Get a reference to the Fuchsia `zx::Port` being used to listen for events. pub fn port(&self) -> &zx::Port { &self.inner.port } /// Registers a `PacketReceiver` with the executor and returns a registration. /// The `PacketReceiver` will be deregistered when the `Registration` is dropped. pub fn register_receiver<T>(&self, receiver: Arc<T>) -> ReceiverRegistration<T> where T: PacketReceiver, { let key = self.inner.receivers.lock().insert(receiver.clone()) as u64; ReceiverRegistration { ehandle: self.clone(), key, receiver } } fn deregister_receiver(&self, key: u64) { let key = key as usize; let mut lock = self.inner.receivers.lock(); if lock.contains(key) { lock.remove(key); } else { // The executor is shutting down and already removed the entry. assert!(self.inner.done.load(Ordering::SeqCst), "Missing receiver to deregister"); } } pub(crate) fn register_timer( &self, time: Time, waker_and_bool: &Arc<(AtomicWaker, AtomicBool)>, ) { with_local_timer_heap(|timer_heap| { let waker_and_bool = Arc::downgrade(waker_and_bool); timer_heap.push(TimeWaker { time, waker_and_bool }) }) } } /// The executor has not been run in multithreaded mode and no thread-unsafe /// futures have been spawned. const THREADINESS_ANY: usize = 0; /// The executor has not been run in multithreaded mode, but thread-unsafe /// futures have been spawned, so it cannot ever be run in multithreaded mode. const THREADINESS_SINGLE: usize = 1; /// The executor has been run in multithreaded mode. /// No thread-unsafe futures can be spawned. const THREADINESS_MULTI: usize = 2; /// Tracks the multithreaded-compatibility state of the executor. struct Threadiness(AtomicUsize); impl Default for Threadiness { fn default() -> Self { Threadiness(AtomicUsize::new(THREADINESS_ANY)) } } impl Threadiness { fn try_become(&self, target: usize) -> Result<(), ()> { match self.0.compare_exchange( /* current */ THREADINESS_ANY, /* new */ target, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => Ok(()), Err(x) if x == target => Ok(()), Err(_) => Err(()), } } /// Attempts to switch the threadiness to singlethreaded-only mode. /// Will fail iff a prior call to `require_multithreaded` was made. fn require_singlethreaded(&self) -> Result<(), ()> { self.try_become(THREADINESS_SINGLE) } /// Attempts to switch the threadiness to multithreaded mode. /// Will fail iff a prior call to `require_singlethreaded` was made. fn require_multithreaded(&self) -> Result<(), ()> { self.try_become(THREADINESS_MULTI) } } enum ExecutorTime { RealTime, FakeTime(AtomicI64), } struct Inner { port: zx::Port, done: AtomicBool, threadiness: Threadiness, threads: Mutex<Vec<thread::JoinHandle<()>>>, receivers: Mutex<Slab<Arc<dyn PacketReceiver>>>, ready_tasks: SegQueue<Arc<Task>>, time: ExecutorTime, } struct TimeWaker { time: Time, waker_and_bool: Weak<(AtomicWaker, AtomicBool)>, } impl TimeWaker { fn wake(&self) { if let Some(wb) = self.waker_and_bool.upgrade() { wb.1.store(true, Ordering::SeqCst); wb.0.wake(); } } } impl Ord for TimeWaker { fn cmp(&self, other: &Self) -> cmp::Ordering { self.time.cmp(&other.time).reverse() // Reverse to get min-heap rather than max } } impl PartialOrd for TimeWaker { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl Eq for TimeWaker {} // N.B.: two TimerWakers can be equal even if they don't have the same // waker_and_bool. This is fine since BinaryHeap doesn't deduplicate. impl PartialEq for TimeWaker { fn eq(&self, other: &Self) -> bool { self.time == other.time } } impl Inner { fn poll_ready_tasks(&self) { // TODO: loop but don't starve if let Some(task) = self.ready_tasks.try_pop() { let w = waker_ref(&task); task.future.try_poll(&mut Context::from_waker(&w)); } } fn spawn(arc_self: &Arc<Self>, future: FutureObj<'static, ()>) { let task = Arc::new(Task { future: AtomicFuture::new(future), executor: arc_self.clone() }); arc_self.ready_tasks.push(task); arc_self.notify_task_ready(); } fn spawn_local(arc_self: &Arc<Self>, future: LocalFutureObj<'static, ()>) { arc_self.threadiness.require_singlethreaded().expect( "Error: called `spawn_local` after calling `run` on executor. \ Use `spawn` or `run_singlethreaded` instead.", ); Inner::spawn( arc_self, // Unsafety: we've confirmed that the boxed futures here will never be used // across multiple threads, so we can safely convert from a non-`Send`able // future to a `Send`able one. unsafe { future.into_future_obj() }, ) } fn notify_task_ready(&self) { // TODO: optimize so that this function doesn't push new items onto // the queue if all worker threads are already awake self.notify_id(TASK_READY_WAKEUP_ID); } fn notify_empty(&self) { self.notify_id(EMPTY_WAKEUP_ID); } fn notify_id(&self, id: u64) { let up = zx::UserPacket::from_u8_array([0; 32]); let packet = zx::Packet::from_user_packet(id, 0 /* status??? */, up); if let Err(e) = self.port.queue(&packet) { // TODO: logging eprintln!("Failed to queue notify in port: {:?}", e); } } fn deliver_packet(&self, key: usize, packet: zx::Packet) { let receiver = match self.receivers.lock().get(key) { // Clone the `Arc` so that we don't hold the lock // any longer than absolutely necessary. // The `receive_packet` impl may be arbitrarily complex. Some(receiver) => receiver.clone(), None => return, }; receiver.receive_packet(packet); } fn now(&self) -> Time { match &self.time { ExecutorTime::RealTime => Time::from_zx(zx::Time::get(zx::ClockId::Monotonic)), ExecutorTime::FakeTime(t) => Time::from_nanos(t.load(Ordering::Relaxed)), } } fn set_fake_time(&self, new: Time) { match &self.time { ExecutorTime::RealTime => { panic!("Error: called `advance_fake_time` on an executor using actual time.") } ExecutorTime::FakeTime(t) => t.store(new.into_nanos(), Ordering::Relaxed), } } fn require_real_time(&self) -> Result<(), ()> { match self.time { ExecutorTime::RealTime => Ok(()), ExecutorTime::FakeTime(_) => Err(()), } } } struct Task { future: AtomicFuture, executor: Arc<Inner>, } impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc<Self>) { arc_self.executor.ready_tasks.push(arc_self.clone()); arc_self.executor.notify_task_ready(); } } #[cfg(test)] mod tests { use core::task::{Context, Waker}; use fuchsia_zircon::{self as zx, AsHandleRef, DurationNum}; use futures::{future::poll_fn, Future, Poll}; use std::cell::{Cell, RefCell}; use std::rc::Rc; use super::*; use crate::{on_signals::OnSignals, timer::Timer}; fn time_operations_param(zxt1: zx::Time, zxt2: zx::Time, d: zx::Duration) { let t1 = Time::from_zx(zxt1); let t2 = Time::from_zx(zxt2); assert_eq!(t1.into_zx(), zxt1); assert_eq!(Time::from_zx(zx::Time::INFINITE), Time::INFINITE); assert_eq!(Time::from_zx(zx::Time::INFINITE_PAST), Time::INFINITE_PAST); assert_eq!(zxt1 - zxt2, t1 - t2); assert_eq!(zxt1 + d, (t1 + d).into_zx()); assert_eq!(d + zxt1, (d + t1).into_zx()); assert_eq!(zxt1 - d, (t1 - d).into_zx()); let mut zxt = zxt1; let mut t = t1; t += d; zxt += d; assert_eq!(zxt, t.into_zx()); t -= d; zxt -= d; assert_eq!(zxt, t.into_zx()); } #[test] fn time_operations() { time_operations_param(zx::Time::from_nanos(0), zx::Time::from_nanos(1000), 12.seconds()); time_operations_param( zx::Time::from_nanos(-100000), zx::Time::from_nanos(65324), (-785).hours(), ); } #[test] fn time_now_real_time() { let _executor = Executor::new().unwrap(); let t1 = zx::Time::after(0.seconds()); let t2 = Time::now().into_zx(); let t3 = zx::Time::after(0.seconds()); assert!(t1 <= t2); assert!(t2 <= t3); } #[test] fn time_now_fake_time() { let executor = Executor::new_with_fake_time().unwrap(); let t1 = Time::from_zx(zx::Time::from_nanos(0)); executor.set_fake_time(t1); assert_eq!(Time::now(), t1); let t2 = Time::from_zx(zx::Time::from_nanos(1000)); executor.set_fake_time(t2); assert_eq!(Time::now(), t2); } #[test] fn time_after_overflow() { let executor = Executor::new_with_fake_time().unwrap(); executor.set_fake_time(Time::INFINITE - 100.nanos()); assert_eq!(Time::after(200.seconds()), Time::INFINITE); executor.set_fake_time(Time::INFINITE_PAST + 100.nanos()); assert_eq!(Time::after((-200).seconds()), Time::INFINITE_PAST); } fn run_until_stalled<F>(executor: &mut Executor, fut: &mut F) where F: Future + Unpin, { loop { match executor.run_one_step(fut) { None => return, Some(Poll::Pending) => { /* continue */ } Some(Poll::Ready(_)) => panic!("executor stopped"), } } } fn run_until_done<F>(executor: &mut Executor, fut: &mut F) -> F::Output where F: Future + Unpin, { loop { match executor.run_one_step(fut) { None => panic!("executor stalled"), Some(Poll::Pending) => { /* continue */ } Some(Poll::Ready(res)) => return res, } } } // Runs a future that suspends and returns after being resumed. #[test] fn stepwise_two_steps() { let fut_step = Cell::new(0); let fut_waker: Rc<RefCell<Option<Waker>>> = Rc::new(RefCell::new(None)); let fut_fn = |cx: &mut Context| { fut_waker.borrow_mut().replace(cx.waker().clone()); match fut_step.get() { 0 => { fut_step.set(1); Poll::Pending } 1 => { fut_step.set(2); Poll::Ready(()) } _ => panic!("future called after done"), } }; let fut = poll_fn(fut_fn); pin_mut!(fut); let mut executor = Executor::new_with_fake_time().unwrap(); executor.wake_main_future(); assert_eq!(executor.is_waiting(), WaitState::Ready); assert_eq!(fut_step.get(), 0); assert_eq!(executor.run_one_step(&mut fut), Some(Poll::Pending)); assert_eq!(executor.is_waiting(), WaitState::Waiting(Time::INFINITE)); assert_eq!(executor.run_one_step(&mut fut), None); assert_eq!(fut_step.get(), 1); fut_waker.borrow_mut().take().unwrap().wake(); assert_eq!(executor.is_waiting(), WaitState::Ready); assert_eq!(executor.run_one_step(&mut fut), Some(Poll::Ready(()))); assert_eq!(fut_step.get(), 2); } #[test] // Runs a future that waits on a timer. fn stepwise_timer() { let mut executor = Executor::new_with_fake_time().unwrap(); executor.set_fake_time(Time::from_nanos(0)); let fut = Timer::new(Time::after(1000.nanos())); pin_mut!(fut); executor.wake_main_future(); run_until_stalled(&mut executor, &mut fut); assert_eq!(Time::now(), Time::from_nanos(0)); assert_eq!(executor.is_waiting(), WaitState::Waiting(Time::from_nanos(1000))); executor.set_fake_time(Time::from_nanos(1000)); assert_eq!(Time::now(), Time::from_nanos(1000)); assert_eq!(executor.is_waiting(), WaitState::Ready); assert_eq!(run_until_done(&mut executor, &mut fut), ()); } // Runs a future that waits on an event. #[test] fn stepwise_event() { let mut executor = Executor::new_with_fake_time().unwrap(); let event = zx::Event::create().unwrap(); let fut = OnSignals::new(&event, zx::Signals::USER_0); pin_mut!(fut); executor.wake_main_future(); run_until_stalled(&mut executor, &mut fut); assert_eq!(executor.is_waiting(), WaitState::Waiting(Time::INFINITE)); event.signal_handle(zx::Signals::NONE, zx::Signals::USER_0).unwrap(); assert!(run_until_done(&mut executor, &mut fut).is_ok()); } // Using `run_until_stalled` does not modify the order of events // compared to normal execution. #[test] fn run_until_stalled_preserves_order() { let mut executor = Executor::new_with_fake_time().unwrap(); let spawned_fut_completed = Arc::new(AtomicBool::new(false)); let spawned_fut_completed_writer = spawned_fut_completed.clone(); let spawned_fut = Box::pin(async move { await!(Timer::new(Time::after(5.seconds()))); spawned_fut_completed_writer.store(true, Ordering::SeqCst); }); let main_fut = async { await!(Timer::new(Time::after(10.seconds()))); }; pin_mut!(main_fut); spawn(spawned_fut); assert_eq!(executor.run_until_stalled(&mut main_fut), Poll::Pending); executor.set_fake_time(Time::after(15.seconds())); executor.wake_expired_timers(); // The timer in `spawned_fut` should fire first, then the // timer in `main_fut`. assert_eq!(executor.run_until_stalled(&mut main_fut), Poll::Ready(())); assert_eq!(spawned_fut_completed.load(Ordering::SeqCst), true); } }
33.761111
101
0.588824
567945b99f267571ed3b39dd4096c29b06c050a4
5,666
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use gio_sys; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::GString; use glib_sys; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; use TlsPasswordFlags; glib_wrapper! { pub struct TlsPassword(Object<gio_sys::GTlsPassword, gio_sys::GTlsPasswordClass, TlsPasswordClass>); match fn { get_type => || gio_sys::g_tls_password_get_type(), } } impl TlsPassword { pub fn new(flags: TlsPasswordFlags, description: &str) -> TlsPassword { unsafe { from_glib_full(gio_sys::g_tls_password_new( flags.to_glib(), description.to_glib_none().0, )) } } } pub const NONE_TLS_PASSWORD: Option<&TlsPassword> = None; pub trait TlsPasswordExt: 'static { fn get_description(&self) -> Option<GString>; fn get_flags(&self) -> TlsPasswordFlags; fn get_warning(&self) -> Option<GString>; fn set_description(&self, description: &str); fn set_flags(&self, flags: TlsPasswordFlags); //fn set_value_full(&self, value: &[u8]); fn set_warning(&self, warning: &str); fn connect_property_description_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_flags_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_warning_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<TlsPassword>> TlsPasswordExt for O { fn get_description(&self) -> Option<GString> { unsafe { from_glib_none(gio_sys::g_tls_password_get_description( self.as_ref().to_glib_none().0, )) } } fn get_flags(&self) -> TlsPasswordFlags { unsafe { from_glib(gio_sys::g_tls_password_get_flags( self.as_ref().to_glib_none().0, )) } } fn get_warning(&self) -> Option<GString> { unsafe { from_glib_none(gio_sys::g_tls_password_get_warning( self.as_ref().to_glib_none().0, )) } } fn set_description(&self, description: &str) { unsafe { gio_sys::g_tls_password_set_description( self.as_ref().to_glib_none().0, description.to_glib_none().0, ); } } fn set_flags(&self, flags: TlsPasswordFlags) { unsafe { gio_sys::g_tls_password_set_flags(self.as_ref().to_glib_none().0, flags.to_glib()); } } //fn set_value_full(&self, value: &[u8]) { // unsafe { TODO: call gio_sys:g_tls_password_set_value_full() } //} fn set_warning(&self, warning: &str) { unsafe { gio_sys::g_tls_password_set_warning( self.as_ref().to_glib_none().0, warning.to_glib_none().0, ); } } fn connect_property_description_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_description_trampoline<P, F: Fn(&P) + 'static>( this: *mut gio_sys::GTlsPassword, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<TlsPassword>, { let f: &F = &*(f as *const F); f(&TlsPassword::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::description\0".as_ptr() as *const _, Some(transmute(notify_description_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } fn connect_property_flags_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_flags_trampoline<P, F: Fn(&P) + 'static>( this: *mut gio_sys::GTlsPassword, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<TlsPassword>, { let f: &F = &*(f as *const F); f(&TlsPassword::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::flags\0".as_ptr() as *const _, Some(transmute(notify_flags_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } fn connect_property_warning_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_warning_trampoline<P, F: Fn(&P) + 'static>( this: *mut gio_sys::GTlsPassword, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<TlsPassword>, { let f: &F = &*(f as *const F); f(&TlsPassword::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::warning\0".as_ptr() as *const _, Some(transmute(notify_warning_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } } impl fmt::Display for TlsPassword { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "TlsPassword") } }
30.462366
104
0.559478
4bcda3d91e9589e917c98557a682e473c5323651
6,107
#[doc = "Reader of register ATESTCTL"] pub type R = crate::R<u32, super::ATESTCTL>; #[doc = "Writer for register ATESTCTL"] pub type W = crate::W<u32, super::ATESTCTL>; #[doc = "Register ATESTCTL `reset()`'s with value 0"] impl crate::ResetValue for super::ATESTCTL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `SCLK_LF_AUX_EN`"] pub type SCLK_LF_AUX_EN_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SCLK_LF_AUX_EN`"] pub struct SCLK_LF_AUX_EN_W<'a> { w: &'a mut W, } impl<'a> SCLK_LF_AUX_EN_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } #[doc = "Reader of field `RESERVED16`"] pub type RESERVED16_R = crate::R<u16, u16>; #[doc = "Write proxy for field `RESERVED16`"] pub struct RESERVED16_W<'a> { w: &'a mut W, } impl<'a> RESERVED16_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0x7fff << 16)) | (((value as u32) & 0x7fff) << 16); self.w } } #[doc = "Reader of field `TEST_RCOSCMF`"] pub type TEST_RCOSCMF_R = crate::R<u8, u8>; #[doc = "Write proxy for field `TEST_RCOSCMF`"] pub struct TEST_RCOSCMF_W<'a> { w: &'a mut W, } impl<'a> TEST_RCOSCMF_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 14)) | (((value as u32) & 0x03) << 14); self.w } } #[doc = "Reader of field `ATEST_RCOSCMF`"] pub type ATEST_RCOSCMF_R = crate::R<u8, u8>; #[doc = "Write proxy for field `ATEST_RCOSCMF`"] pub struct ATEST_RCOSCMF_W<'a> { w: &'a mut W, } impl<'a> ATEST_RCOSCMF_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12); self.w } } #[doc = "Reader of field `RESERVED0`"] pub type RESERVED0_R = crate::R<u16, u16>; #[doc = "Write proxy for field `RESERVED0`"] pub struct RESERVED0_W<'a> { w: &'a mut W, } impl<'a> RESERVED0_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x0fff) | ((value as u32) & 0x0fff); self.w } } impl R { #[doc = "Bit 31 - 31:31\\] Enable 32 kHz clock to AUX_COMPB."] #[inline(always)] pub fn sclk_lf_aux_en(&self) -> SCLK_LF_AUX_EN_R { SCLK_LF_AUX_EN_R::new(((self.bits >> 31) & 0x01) != 0) } #[doc = "Bits 16:30 - 30:16\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline(always)] pub fn reserved16(&self) -> RESERVED16_R { RESERVED16_R::new(((self.bits >> 16) & 0x7fff) as u16) } #[doc = "Bits 14:15 - 15:14\\] Test mode control for RCOSC_MF 0x0: test modes disabled 0x1: boosted bias current into self biased inverter 0x2: clock qualification disabled 0x3: boosted bias current into self biased inverter + clock qualification disabled"] #[inline(always)] pub fn test_rcoscmf(&self) -> TEST_RCOSCMF_R { TEST_RCOSCMF_R::new(((self.bits >> 14) & 0x03) as u8) } #[doc = "Bits 12:13 - 13:12\\] ATEST control for RCOSC_MF 0x0: ATEST disabled 0x1: ATEST enabled, VDD_LOCAL connected, ATEST internal to **RCOSC_MF* enabled to send out 2MHz clock. 0x2: ATEST disabled 0x3: ATEST enabled, bias current connected, ATEST internal to **RCOSC_MF* enabled to send out 2MHz clock."] #[inline(always)] pub fn atest_rcoscmf(&self) -> ATEST_RCOSCMF_R { ATEST_RCOSCMF_R::new(((self.bits >> 12) & 0x03) as u8) } #[doc = "Bits 0:11 - 11:0\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline(always)] pub fn reserved0(&self) -> RESERVED0_R { RESERVED0_R::new((self.bits & 0x0fff) as u16) } } impl W { #[doc = "Bit 31 - 31:31\\] Enable 32 kHz clock to AUX_COMPB."] #[inline(always)] pub fn sclk_lf_aux_en(&mut self) -> SCLK_LF_AUX_EN_W { SCLK_LF_AUX_EN_W { w: self } } #[doc = "Bits 16:30 - 30:16\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline(always)] pub fn reserved16(&mut self) -> RESERVED16_W { RESERVED16_W { w: self } } #[doc = "Bits 14:15 - 15:14\\] Test mode control for RCOSC_MF 0x0: test modes disabled 0x1: boosted bias current into self biased inverter 0x2: clock qualification disabled 0x3: boosted bias current into self biased inverter + clock qualification disabled"] #[inline(always)] pub fn test_rcoscmf(&mut self) -> TEST_RCOSCMF_W { TEST_RCOSCMF_W { w: self } } #[doc = "Bits 12:13 - 13:12\\] ATEST control for RCOSC_MF 0x0: ATEST disabled 0x1: ATEST enabled, VDD_LOCAL connected, ATEST internal to **RCOSC_MF* enabled to send out 2MHz clock. 0x2: ATEST disabled 0x3: ATEST enabled, bias current connected, ATEST internal to **RCOSC_MF* enabled to send out 2MHz clock."] #[inline(always)] pub fn atest_rcoscmf(&mut self) -> ATEST_RCOSCMF_W { ATEST_RCOSCMF_W { w: self } } #[doc = "Bits 0:11 - 11:0\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline(always)] pub fn reserved0(&mut self) -> RESERVED0_W { RESERVED0_W { w: self } } }
38.898089
277
0.62764
03e67fdba78075921ac5d9906f9dca2000770e89
600
use pulldown_cmark::{Parser, Options, html}; use wasm_bindgen::prelude::*; // Markdown -> HTML #[wasm_bindgen] pub fn render(markdown_input: &str) -> String { // Set up options and parser. Strikethroughs are not part of the CommonMark standard // and we therefore must enable it explicitly. let mut options = Options::empty(); options.insert(Options::ENABLE_STRIKETHROUGH); let parser = Parser::new_ext(markdown_input, options); // Write to String buffer. let mut html_output = String::new(); html::push_html(&mut html_output, parser); return html_output; }
33.333333
88
0.701667
e45595cd97c79c4738c92a905c4f330a6336d54b
14,952
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use anyhow::Error; use argh::FromArgs; use carnelian::{ color::Color, drawing::{ load_font, path_for_corner_knockouts, path_for_rectangle, DisplayRotation, FontFace, GlyphMap, Paint, Text, }, input::{self}, make_app_assistant, make_message, render::{ BlendMode, Composition, Context as RenderContext, Fill, FillRule, Layer, PreClear, Raster, RenderExt, Style, }, App, AppAssistant, Coord, Message, Point, Rect, Size, ViewAssistant, ViewAssistantContext, ViewAssistantPtr, ViewKey, }; use euclid::default::Vector2D; use fuchsia_zircon::{AsHandleRef, Event, Signals, Time}; use std::path::PathBuf; fn display_rotation_from_str(s: &str) -> Result<DisplayRotation, String> { match s { "0" => Ok(DisplayRotation::Deg0), "90" => Ok(DisplayRotation::Deg90), "180" => Ok(DisplayRotation::Deg180), "270" => Ok(DisplayRotation::Deg270), _ => Err(format!("Invalid DisplayRotation {}", s)), } } /// Button Sample #[derive(Debug, FromArgs)] #[argh(name = "recovery")] struct Args { /// rotate #[argh(option, from_str_fn(display_rotation_from_str))] rotation: Option<DisplayRotation>, } /// enum that defines all messages sent with `App::queue_message` that /// the button view assistant will understand and process. pub enum ButtonMessages { Pressed(Time), } #[derive(Default)] struct ButtonAppAssistant { display_rotation: DisplayRotation, } impl AppAssistant for ButtonAppAssistant { fn setup(&mut self) -> Result<(), Error> { let args: Args = argh::from_env(); self.display_rotation = args.rotation.unwrap_or(DisplayRotation::Deg0); Ok(()) } fn create_view_assistant(&mut self, _: ViewKey) -> Result<ViewAssistantPtr, Error> { Ok(Box::new(ButtonViewAssistant::new()?)) } fn get_display_rotation(&self) -> DisplayRotation { self.display_rotation } } fn raster_for_rectangle(bounds: &Rect, render_context: &mut RenderContext) -> Raster { let mut raster_builder = render_context.raster_builder().expect("raster_builder"); raster_builder.add(&path_for_rectangle(bounds, render_context), None); raster_builder.build() } fn raster_for_corner_knockouts( bounds: &Rect, corner_radius: Coord, render_context: &mut RenderContext, ) -> Raster { let path = path_for_corner_knockouts(bounds, corner_radius, render_context); let mut raster_builder = render_context.raster_builder().expect("raster_builder"); raster_builder.add(&path, None); raster_builder.build() } struct RasterAndStyle { location: Point, raster: Raster, style: Style, } struct Button { pub font_size: u32, pub padding: f32, bounds: Rect, bg_color: Color, bg_color_active: Color, bg_color_disabled: Color, fg_color: Color, fg_color_disabled: Color, tracking_pointer: Option<input::pointer::PointerId>, active: bool, focused: bool, glyphs: GlyphMap, label_text: String, face: FontFace, label: Option<Text>, } impl Button { pub fn new(text: &str) -> Result<Button, Error> { let face = load_font(PathBuf::from("/pkg/data/fonts/RobotoSlab-Regular.ttf"))?; let button = Button { font_size: 20, padding: 5.0, bounds: Rect::zero(), fg_color: Color::white(), bg_color: Color::from_hash_code("#B7410E")?, bg_color_active: Color::from_hash_code("#f0703c")?, fg_color_disabled: Color::from_hash_code("#A0A0A0")?, bg_color_disabled: Color::from_hash_code("#C0C0C0")?, tracking_pointer: None, active: false, focused: false, glyphs: GlyphMap::new(), label_text: text.to_string(), face, label: None, }; Ok(button) } pub fn set_focused(&mut self, focused: bool) { self.focused = focused; if !focused { self.active = false; self.tracking_pointer = None; } } fn create_rasters_and_styles( &mut self, render_context: &mut RenderContext, ) -> Result<(RasterAndStyle, RasterAndStyle), Error> { // set up paint with different backgrounds depending on whether the button // is active. The active state is true when a pointer has gone down in the // button's bounds and the pointer has not moved outside the bounds since. let paint = if self.focused { Paint { fg: self.fg_color, bg: if self.active { self.bg_color_active } else { self.bg_color }, } } else { Paint { fg: self.fg_color_disabled, bg: self.bg_color_disabled } }; self.label = Some(Text::new( render_context, &self.label_text, self.font_size as f32, 100, &self.face, &mut self.glyphs, )); let label = self.label.as_ref().expect("label"); // calculate button size based on label's text size // plus padding. let bounding_box_size = label.bounding_box.size; let button_label_size = Size::new(bounding_box_size.width, self.font_size as f32); let double_padding = 2.0 * self.padding; let button_size = button_label_size + Size::new(double_padding, double_padding); let half_size = Size::new(button_size.width * 0.5, button_size.height * 0.5); let button_origin = Point::zero() - half_size.to_vector(); let button_bounds = Rect::new(button_origin, button_size).round_out(); // record bounds for hit testing self.bounds = button_bounds; // Calculate the label offset in display aligned coordinates, since the label, // as a raster, is pre-rotated and we just need to translate it to align with the buttons // bounding box. let center = self.bounds.center(); let label_center = label.bounding_box.center().to_vector(); let label_offset = center - label_center; let raster = raster_for_rectangle(&self.bounds, render_context); let button_raster_and_style = RasterAndStyle { location: Point::zero(), raster, style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(paint.bg), blend_mode: BlendMode::Over, }, }; let label_raster_and_style = RasterAndStyle { location: label_offset, raster: label.raster.clone(), style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(paint.fg), blend_mode: BlendMode::Over, }, }; Ok((button_raster_and_style, label_raster_and_style)) } pub fn handle_pointer_event( &mut self, context: &mut ViewAssistantContext, pointer_event: &input::pointer::Event, ) { if !self.focused { return; } let bounds = self .bounds .translate(Vector2D::new(context.size.width * 0.5, context.size.height * 0.7)); if self.tracking_pointer.is_none() { match pointer_event.phase { input::pointer::Phase::Down(location) => { self.active = bounds.contains(location.to_f32()); if self.active { self.tracking_pointer = Some(pointer_event.pointer_id.clone()); } } _ => (), } } else { let tracking_pointer = self.tracking_pointer.as_ref().expect("tracking_pointer"); if tracking_pointer == &pointer_event.pointer_id { match pointer_event.phase { input::pointer::Phase::Moved(location) => { self.active = bounds.contains(location.to_f32()); } input::pointer::Phase::Up => { if self.active { context.queue_message(make_message(ButtonMessages::Pressed( Time::get_monotonic(), ))); } self.tracking_pointer = None; self.active = false; } input::pointer::Phase::Remove => { self.active = false; self.tracking_pointer = None; } input::pointer::Phase::Cancel => { self.active = false; self.tracking_pointer = None; } _ => (), } } } } } struct ButtonViewAssistant { focused: bool, bg_color: Color, button: Button, red_light: bool, composition: Composition, } const BUTTON_LABEL: &'static str = "Depress Me"; impl ButtonViewAssistant { fn new() -> Result<ButtonViewAssistant, Error> { let bg_color = Color::from_hash_code("#EBD5B3")?; let composition = Composition::new(bg_color); Ok(ButtonViewAssistant { focused: false, bg_color, button: Button::new(BUTTON_LABEL)?, red_light: false, composition, }) } fn target_size(&self, size: Size) -> Size { size } fn button_center(&self, size: Size) -> Point { Point::new(size.width * 0.5, size.height * 0.7) } } impl ViewAssistant for ButtonViewAssistant { fn render( &mut self, render_context: &mut RenderContext, ready_event: Event, context: &ViewAssistantContext, ) -> Result<(), Error> { // Emulate the size that Carnelian passes when the display is rotated let target_size = self.target_size(context.size); // Calculate all locations in the presentation-aligned coordinate space let center_x = target_size.width * 0.5; let min_dimension = target_size.width.min(target_size.height); let font_size = (min_dimension / 5.0).ceil().min(64.0) as u32; let padding = (min_dimension / 20.0).ceil().max(8.0); self.button.padding = padding; self.button.font_size = font_size; let corner_knockouts = raster_for_corner_knockouts(&Rect::from_size(target_size), 10.0, render_context); let corner_knockouts_layer = Layer { raster: corner_knockouts, style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(Color::new()), blend_mode: BlendMode::Over, }, }; // Position and size the indicator in presentation space let indicator_y = target_size.height / 5.0; let indicator_len = target_size.height.min(target_size.width) / 8.0; let indicator_size = Size::new(indicator_len * 2.0, indicator_len); let indicator_pos = Point::new(center_x - indicator_len, indicator_y - indicator_len / 2.0); let indicator_raster = raster_for_rectangle(&Rect::new(Point::zero(), indicator_size), render_context) .translate(indicator_pos.to_vector().to_i32()); let indicator_color = if self.red_light { Color::from_hash_code("#ff0000")? } else { Color::from_hash_code("#00ff00")? }; // Create a layer for the indicator using its pre-transformed raster and // transformed position. let indicator_layer = Layer { raster: indicator_raster, style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(indicator_color), blend_mode: BlendMode::Over, }, }; let button_center = self.button_center(target_size); self.button.set_focused(self.focused); // Let the button render itself, returning rasters, styles and zero-relative // positions. let (button_raster_and_style, label_raster_and_style) = self.button.create_rasters_and_styles(render_context)?; // Calculate the button location in presentation space let button_location = button_center + button_raster_and_style.location.to_vector(); // Calculate the label location in presentation space let label_location = button_center + label_raster_and_style.location.to_vector(); // Create layers from the rasters, styles and transformed locations. let button_layer = Layer { raster: button_raster_and_style.raster.translate(button_location.to_vector().to_i32()), style: button_raster_and_style.style, }; let label_layer = Layer { raster: label_raster_and_style.raster.translate(label_location.to_vector().to_i32()), style: label_raster_and_style.style, }; self.composition.replace( .., std::iter::once(corner_knockouts_layer) .chain(std::iter::once(label_layer)) .chain(std::iter::once(button_layer)) .chain(std::iter::once(indicator_layer)), ); let image = render_context.get_current_image(context); let ext = RenderExt { pre_clear: Some(PreClear { color: self.bg_color }), ..Default::default() }; render_context.render(&self.composition, None, image, &ext); ready_event.as_handle_ref().signal(Signals::NONE, Signals::EVENT_SIGNALED)?; Ok(()) } fn handle_message(&mut self, message: Message) { if let Some(button_message) = message.downcast_ref::<ButtonMessages>() { match button_message { ButtonMessages::Pressed(value) => { println!("value = {:#?}", value); self.red_light = !self.red_light } } } } fn handle_pointer_event( &mut self, context: &mut ViewAssistantContext, _event: &input::Event, pointer_event: &input::pointer::Event, ) -> Result<(), Error> { self.button.handle_pointer_event(context, &pointer_event); context.request_render(); Ok(()) } fn handle_focus_event( &mut self, context: &mut ViewAssistantContext, focused: bool, ) -> Result<(), Error> { self.focused = focused; context.request_render(); Ok(()) } } fn main() -> Result<(), Error> { fuchsia_trace_provider::trace_provider_create_with_fdio(); App::run(make_app_assistant::<ButtonAppAssistant>()) }
34.215103
100
0.590222
e822444b6a3c24532019d335832f82e78d4433a4
20,214
use ::libc; pub use crate::jmorecfg_h::boolean; pub use crate::jmorecfg_h::FALSE; pub use crate::jmorecfg_h::JCOEF; pub use crate::jmorecfg_h::JDIMENSION; pub use crate::jmorecfg_h::JOCTET; pub use crate::jmorecfg_h::JSAMPLE; pub use crate::jmorecfg_h::UINT16; pub use crate::jmorecfg_h::UINT8; pub use crate::jpegint_h::jpeg_c_coef_controller; pub use crate::jpegint_h::jpeg_c_main_controller; pub use crate::jpegint_h::jpeg_c_prep_controller; pub use crate::jpegint_h::jpeg_color_converter; pub use crate::jpegint_h::jpeg_comp_master; pub use crate::jpegint_h::jpeg_downsampler; pub use crate::jpegint_h::jpeg_entropy_encoder; pub use crate::jpegint_h::jpeg_forward_dct; pub use crate::jpegint_h::jpeg_marker_writer; pub use crate::jpegint_h::CSTATE_RAW_OK; pub use crate::jpegint_h::CSTATE_SCANNING; pub use crate::jpegint_h::CSTATE_START; pub use crate::jpegint_h::JBUF_CRANK_DEST; pub use crate::jpegint_h::JBUF_PASS_THRU; pub use crate::jpegint_h::JBUF_REQUANT; pub use crate::jpegint_h::JBUF_SAVE_AND_PASS; pub use crate::jpegint_h::JBUF_SAVE_SOURCE; pub use crate::jpegint_h::J_BUF_MODE; pub use crate::jpeglib_h::j_common_ptr; pub use crate::jpeglib_h::j_compress_ptr; pub use crate::jpeglib_h::jpeg_common_struct; pub use crate::jpeglib_h::jpeg_component_info; pub use crate::jpeglib_h::jpeg_compress_struct; pub use crate::jpeglib_h::jpeg_destination_mgr; pub use crate::jpeglib_h::jpeg_error_mgr; pub use crate::jpeglib_h::jpeg_memory_mgr; pub use crate::jpeglib_h::jpeg_progress_mgr; pub use crate::jpeglib_h::jpeg_scan_info; pub use crate::jpeglib_h::jvirt_barray_control; pub use crate::jpeglib_h::jvirt_barray_ptr; pub use crate::jpeglib_h::jvirt_sarray_control; pub use crate::jpeglib_h::jvirt_sarray_ptr; pub use crate::jpeglib_h::C2RustUnnamed_2; pub use crate::jpeglib_h::JCS_YCbCr; pub use crate::jpeglib_h::DCTSIZE; pub use crate::jpeglib_h::JBLOCK; pub use crate::jpeglib_h::JBLOCKARRAY; pub use crate::jpeglib_h::JBLOCKROW; pub use crate::jpeglib_h::JCS_CMYK; pub use crate::jpeglib_h::JCS_EXT_ABGR; pub use crate::jpeglib_h::JCS_EXT_ARGB; pub use crate::jpeglib_h::JCS_EXT_BGR; pub use crate::jpeglib_h::JCS_EXT_BGRA; pub use crate::jpeglib_h::JCS_EXT_BGRX; pub use crate::jpeglib_h::JCS_EXT_RGB; pub use crate::jpeglib_h::JCS_EXT_RGBA; pub use crate::jpeglib_h::JCS_EXT_RGBX; pub use crate::jpeglib_h::JCS_EXT_XBGR; pub use crate::jpeglib_h::JCS_EXT_XRGB; pub use crate::jpeglib_h::JCS_GRAYSCALE; pub use crate::jpeglib_h::JCS_RGB; pub use crate::jpeglib_h::JCS_RGB565; pub use crate::jpeglib_h::JCS_UNKNOWN; pub use crate::jpeglib_h::JCS_YCCK; pub use crate::jpeglib_h::JDCT_FLOAT; pub use crate::jpeglib_h::JDCT_IFAST; pub use crate::jpeglib_h::JDCT_ISLOW; pub use crate::jpeglib_h::JHUFF_TBL; pub use crate::jpeglib_h::JQUANT_TBL; pub use crate::jpeglib_h::JSAMPARRAY; pub use crate::jpeglib_h::JSAMPIMAGE; pub use crate::jpeglib_h::JSAMPROW; pub use crate::jpeglib_h::J_COLOR_SPACE; pub use crate::jpeglib_h::J_DCT_METHOD; pub use crate::src::jcapimin::jpeg_suppress_tables; pub use crate::src::jcinit::jinit_compress_master; pub use crate::src::jerror::JERR_ARITH_NOTIMPL; pub use crate::src::jerror::JERR_BAD_ALIGN_TYPE; pub use crate::src::jerror::JERR_BAD_ALLOC_CHUNK; pub use crate::src::jerror::JERR_BAD_BUFFER_MODE; pub use crate::src::jerror::JERR_BAD_COMPONENT_ID; pub use crate::src::jerror::JERR_BAD_CROP_SPEC; pub use crate::src::jerror::JERR_BAD_DCTSIZE; pub use crate::src::jerror::JERR_BAD_DCT_COEF; pub use crate::src::jerror::JERR_BAD_HUFF_TABLE; pub use crate::src::jerror::JERR_BAD_IN_COLORSPACE; pub use crate::src::jerror::JERR_BAD_J_COLORSPACE; pub use crate::src::jerror::JERR_BAD_LENGTH; pub use crate::src::jerror::JERR_BAD_LIB_VERSION; pub use crate::src::jerror::JERR_BAD_MCU_SIZE; pub use crate::src::jerror::JERR_BAD_PARAM; pub use crate::src::jerror::JERR_BAD_PARAM_VALUE; pub use crate::src::jerror::JERR_BAD_POOL_ID; pub use crate::src::jerror::JERR_BAD_PRECISION; pub use crate::src::jerror::JERR_BAD_PROGRESSION; pub use crate::src::jerror::JERR_BAD_PROG_SCRIPT; pub use crate::src::jerror::JERR_BAD_SAMPLING; pub use crate::src::jerror::JERR_BAD_SCAN_SCRIPT; pub use crate::src::jerror::JERR_BAD_STATE; pub use crate::src::jerror::JERR_BAD_STRUCT_SIZE; pub use crate::src::jerror::JERR_BAD_VIRTUAL_ACCESS; pub use crate::src::jerror::JERR_BUFFER_SIZE; pub use crate::src::jerror::JERR_CANT_SUSPEND; pub use crate::src::jerror::JERR_CCIR601_NOTIMPL; pub use crate::src::jerror::JERR_COMPONENT_COUNT; pub use crate::src::jerror::JERR_CONVERSION_NOTIMPL; pub use crate::src::jerror::JERR_DAC_INDEX; pub use crate::src::jerror::JERR_DAC_VALUE; pub use crate::src::jerror::JERR_DHT_INDEX; pub use crate::src::jerror::JERR_DQT_INDEX; pub use crate::src::jerror::JERR_EMPTY_IMAGE; pub use crate::src::jerror::JERR_EMS_READ; pub use crate::src::jerror::JERR_EMS_WRITE; pub use crate::src::jerror::JERR_EOI_EXPECTED; pub use crate::src::jerror::JERR_FILE_READ; pub use crate::src::jerror::JERR_FILE_WRITE; pub use crate::src::jerror::JERR_FRACT_SAMPLE_NOTIMPL; pub use crate::src::jerror::JERR_HUFF_CLEN_OVERFLOW; pub use crate::src::jerror::JERR_HUFF_MISSING_CODE; pub use crate::src::jerror::JERR_IMAGE_TOO_BIG; pub use crate::src::jerror::JERR_INPUT_EMPTY; pub use crate::src::jerror::JERR_INPUT_EOF; pub use crate::src::jerror::JERR_MISMATCHED_QUANT_TABLE; pub use crate::src::jerror::JERR_MISSING_DATA; pub use crate::src::jerror::JERR_MODE_CHANGE; pub use crate::src::jerror::JERR_NOTIMPL; pub use crate::src::jerror::JERR_NOT_COMPILED; pub use crate::src::jerror::JERR_NO_BACKING_STORE; pub use crate::src::jerror::JERR_NO_HUFF_TABLE; pub use crate::src::jerror::JERR_NO_IMAGE; pub use crate::src::jerror::JERR_NO_QUANT_TABLE; pub use crate::src::jerror::JERR_NO_SOI; pub use crate::src::jerror::JERR_OUT_OF_MEMORY; pub use crate::src::jerror::JERR_QUANT_COMPONENTS; pub use crate::src::jerror::JERR_QUANT_FEW_COLORS; pub use crate::src::jerror::JERR_QUANT_MANY_COLORS; pub use crate::src::jerror::JERR_SOF_DUPLICATE; pub use crate::src::jerror::JERR_SOF_NO_SOS; pub use crate::src::jerror::JERR_SOF_UNSUPPORTED; pub use crate::src::jerror::JERR_SOI_DUPLICATE; pub use crate::src::jerror::JERR_SOS_NO_SOF; pub use crate::src::jerror::JERR_TFILE_CREATE; pub use crate::src::jerror::JERR_TFILE_READ; pub use crate::src::jerror::JERR_TFILE_SEEK; pub use crate::src::jerror::JERR_TFILE_WRITE; pub use crate::src::jerror::JERR_TOO_LITTLE_DATA; pub use crate::src::jerror::JERR_UNKNOWN_MARKER; pub use crate::src::jerror::JERR_UNSUPPORTED_SUSPEND; pub use crate::src::jerror::JERR_VIRTUAL_BUG; pub use crate::src::jerror::JERR_WIDTH_OVERFLOW; pub use crate::src::jerror::JERR_XMS_READ; pub use crate::src::jerror::JERR_XMS_WRITE; pub use crate::src::jerror::JMSG_COPYRIGHT; pub use crate::src::jerror::JMSG_LASTMSGCODE; pub use crate::src::jerror::JMSG_NOMESSAGE; pub use crate::src::jerror::JMSG_VERSION; pub use crate::src::jerror::JTRC_16BIT_TABLES; pub use crate::src::jerror::JTRC_ADOBE; pub use crate::src::jerror::JTRC_APP0; pub use crate::src::jerror::JTRC_APP14; pub use crate::src::jerror::JTRC_DAC; pub use crate::src::jerror::JTRC_DHT; pub use crate::src::jerror::JTRC_DQT; pub use crate::src::jerror::JTRC_DRI; pub use crate::src::jerror::JTRC_EMS_CLOSE; pub use crate::src::jerror::JTRC_EMS_OPEN; pub use crate::src::jerror::JTRC_EOI; pub use crate::src::jerror::JTRC_HUFFBITS; pub use crate::src::jerror::JTRC_JFIF; pub use crate::src::jerror::JTRC_JFIF_BADTHUMBNAILSIZE; pub use crate::src::jerror::JTRC_JFIF_EXTENSION; pub use crate::src::jerror::JTRC_JFIF_THUMBNAIL; pub use crate::src::jerror::JTRC_MISC_MARKER; pub use crate::src::jerror::JTRC_PARMLESS_MARKER; pub use crate::src::jerror::JTRC_QUANTVALS; pub use crate::src::jerror::JTRC_QUANT_3_NCOLORS; pub use crate::src::jerror::JTRC_QUANT_NCOLORS; pub use crate::src::jerror::JTRC_QUANT_SELECTED; pub use crate::src::jerror::JTRC_RECOVERY_ACTION; pub use crate::src::jerror::JTRC_RST; pub use crate::src::jerror::JTRC_SMOOTH_NOTIMPL; pub use crate::src::jerror::JTRC_SOF; pub use crate::src::jerror::JTRC_SOF_COMPONENT; pub use crate::src::jerror::JTRC_SOI; pub use crate::src::jerror::JTRC_SOS; pub use crate::src::jerror::JTRC_SOS_COMPONENT; pub use crate::src::jerror::JTRC_SOS_PARAMS; pub use crate::src::jerror::JTRC_TFILE_CLOSE; pub use crate::src::jerror::JTRC_TFILE_OPEN; pub use crate::src::jerror::JTRC_THUMB_JPEG; pub use crate::src::jerror::JTRC_THUMB_PALETTE; pub use crate::src::jerror::JTRC_THUMB_RGB; pub use crate::src::jerror::JTRC_UNKNOWN_IDS; pub use crate::src::jerror::JTRC_XMS_CLOSE; pub use crate::src::jerror::JTRC_XMS_OPEN; pub use crate::src::jerror::JWRN_ADOBE_XFORM; pub use crate::src::jerror::JWRN_BOGUS_ICC; pub use crate::src::jerror::JWRN_BOGUS_PROGRESSION; pub use crate::src::jerror::JWRN_EXTRANEOUS_DATA; pub use crate::src::jerror::JWRN_HIT_MARKER; pub use crate::src::jerror::JWRN_HUFF_BAD_CODE; pub use crate::src::jerror::JWRN_JFIF_MAJOR; pub use crate::src::jerror::JWRN_JPEG_EOF; pub use crate::src::jerror::JWRN_MUST_RESYNC; pub use crate::src::jerror::JWRN_NOT_SEQUENTIAL; pub use crate::src::jerror::JWRN_TOO_MUCH_DATA; pub use crate::stddef_h::size_t; pub use crate::stddef_h::NULL; pub use crate::stdlib::C2RustUnnamed_0; /* Main entry points for compression */ /* * jcapistd.c * * Copyright (C) 1994-1996, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * mozjpeg Modifications: * Copyright (C) 2014, Mozilla Corporation. * For conditions of distribution and use, see the accompanying README file. * * This file contains application interface code for the compression half * of the JPEG library. These are the "standard" API routines that are * used in the normal full-compression case. They are not used by a * transcoding-only application. Note that if an application links in * jpeg_start_compress, it will end up linking in the entire compressor. * We thus must separate this file from jcapimin.c to avoid linking the * whole compression library into a transcoder. */ /* * Compression initialization. * Before calling this, all parameters and a data destination must be set up. * * We require a write_all_tables parameter as a failsafe check when writing * multiple datastreams from the same compression object. Since prior runs * will have left all the tables marked sent_table=TRUE, a subsequent run * would emit an abbreviated stream (no tables) by default. This may be what * is wanted, but for safety's sake it should not be the default behavior: * programmers should have to make a deliberate choice to emit abbreviated * images. Therefore the documentation and examples should encourage people * to pass write_all_tables=TRUE; then it will take active thought to do the * wrong thing. */ #[no_mangle] pub unsafe extern "C" fn jpeg_start_compress( mut cinfo: crate::jpeglib_h::j_compress_ptr, mut write_all_tables: crate::jmorecfg_h::boolean, ) { if (*cinfo).global_state != crate::jpegint_h::CSTATE_START { (*(*cinfo).err).msg_code = crate::src::jerror::JERR_BAD_STATE as libc::c_int; /* mark all tables to be written */ (*(*cinfo).err).msg_parm.i[0 as libc::c_int as usize] = (*cinfo).global_state; Some( (*(*cinfo).err) .error_exit .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); } if write_all_tables != 0 { crate::src::jcapimin::jpeg_suppress_tables(cinfo, crate::jmorecfg_h::FALSE); } /* setting up scan optimisation pattern failed, disable scan optimisation */ if (*(*cinfo).master).num_scans_luma == 0 as libc::c_int || (*cinfo).scan_info.is_null() || (*cinfo).num_scans == 0 as libc::c_int { (*(*cinfo).master).optimize_scans = crate::jmorecfg_h::FALSE } /* (Re)initialize error mgr and destination modules */ Some( (*(*cinfo).err) .reset_error_mgr .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); Some( (*(*cinfo).dest) .init_destination .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo); /* Perform master selection of active modules */ crate::src::jcinit::jinit_compress_master(cinfo); /* Set up for the first pass */ Some( (*(*cinfo).master) .prepare_for_pass .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo); /* Ready for application to drive first pass through jpeg_write_scanlines * or jpeg_write_raw_data. */ (*cinfo).next_scanline = 0 as libc::c_int as crate::jmorecfg_h::JDIMENSION; (*cinfo).global_state = if (*cinfo).raw_data_in != 0 { crate::jpegint_h::CSTATE_RAW_OK } else { crate::jpegint_h::CSTATE_SCANNING }; } /* * Write some scanlines of data to the JPEG compressor. * * The return value will be the number of lines actually written. * This should be less than the supplied num_lines only in case that * the data destination module has requested suspension of the compressor, * or if more than image_height scanlines are passed in. * * Note: we warn about excess calls to jpeg_write_scanlines() since * this likely signals an application programmer error. However, * excess scanlines passed in the last valid call are *silently* ignored, * so that the application need not adjust num_lines for end-of-image * when using a multiple-scanline buffer. */ #[no_mangle] pub unsafe extern "C" fn jpeg_write_scanlines( mut cinfo: crate::jpeglib_h::j_compress_ptr, mut scanlines: crate::jpeglib_h::JSAMPARRAY, mut num_lines: crate::jmorecfg_h::JDIMENSION, ) -> crate::jmorecfg_h::JDIMENSION { let mut row_ctr: crate::jmorecfg_h::JDIMENSION = 0; let mut rows_left: crate::jmorecfg_h::JDIMENSION = 0; if (*cinfo).global_state != crate::jpegint_h::CSTATE_SCANNING { (*(*cinfo).err).msg_code = crate::src::jerror::JERR_BAD_STATE as libc::c_int; (*(*cinfo).err).msg_parm.i[0 as libc::c_int as usize] = (*cinfo).global_state; Some( (*(*cinfo).err) .error_exit .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); } if (*cinfo).next_scanline >= (*cinfo).image_height { (*(*cinfo).err).msg_code = crate::src::jerror::JWRN_TOO_MUCH_DATA as libc::c_int; Some( (*(*cinfo).err) .emit_message .expect("non-null function pointer"), ) .expect("non-null function pointer")( cinfo as crate::jpeglib_h::j_common_ptr, -(1 as libc::c_int), ); } /* Call progress monitor hook if present */ if !(*cinfo).progress.is_null() { (*(*cinfo).progress).pass_counter = (*cinfo).next_scanline as libc::c_long; (*(*cinfo).progress).pass_limit = (*cinfo).image_height as libc::c_long; Some( (*(*cinfo).progress) .progress_monitor .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); } /* Give master control module another chance if this is first call to * jpeg_write_scanlines. This lets output of the frame/scan headers be * delayed so that application can write COM, etc, markers between * jpeg_start_compress and jpeg_write_scanlines. */ if (*(*cinfo).master).call_pass_startup != 0 { Some( (*(*cinfo).master) .pass_startup .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo); } /* Ignore any extra scanlines at bottom of image. */ rows_left = (*cinfo).image_height.wrapping_sub((*cinfo).next_scanline); if num_lines > rows_left { num_lines = rows_left } row_ctr = 0 as libc::c_int as crate::jmorecfg_h::JDIMENSION; Some( (*(*cinfo).main) .process_data .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo, scanlines, &mut row_ctr, num_lines); (*cinfo).next_scanline = ((*cinfo).next_scanline as libc::c_uint).wrapping_add(row_ctr) as crate::jmorecfg_h::JDIMENSION as crate::jmorecfg_h::JDIMENSION; return row_ctr; } /* Replaces jpeg_write_scanlines when writing raw downsampled data. */ /* * Alternate entry point to write raw data. * Processes exactly one iMCU row per call, unless suspended. */ #[no_mangle] pub unsafe extern "C" fn jpeg_write_raw_data( mut cinfo: crate::jpeglib_h::j_compress_ptr, mut data: crate::jpeglib_h::JSAMPIMAGE, mut num_lines: crate::jmorecfg_h::JDIMENSION, ) -> crate::jmorecfg_h::JDIMENSION { let mut lines_per_iMCU_row: crate::jmorecfg_h::JDIMENSION = 0; if (*cinfo).global_state != crate::jpegint_h::CSTATE_RAW_OK { (*(*cinfo).err).msg_code = crate::src::jerror::JERR_BAD_STATE as libc::c_int; (*(*cinfo).err).msg_parm.i[0 as libc::c_int as usize] = (*cinfo).global_state; Some( (*(*cinfo).err) .error_exit .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); } if (*cinfo).next_scanline >= (*cinfo).image_height { (*(*cinfo).err).msg_code = crate::src::jerror::JWRN_TOO_MUCH_DATA as libc::c_int; Some( (*(*cinfo).err) .emit_message .expect("non-null function pointer"), ) .expect("non-null function pointer")( cinfo as crate::jpeglib_h::j_common_ptr, -(1 as libc::c_int), ); return 0 as libc::c_int as crate::jmorecfg_h::JDIMENSION; } /* Call progress monitor hook if present */ if !(*cinfo).progress.is_null() { (*(*cinfo).progress).pass_counter = (*cinfo).next_scanline as libc::c_long; (*(*cinfo).progress).pass_limit = (*cinfo).image_height as libc::c_long; Some( (*(*cinfo).progress) .progress_monitor .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); } /* Give master control module another chance if this is first call to * jpeg_write_raw_data. This lets output of the frame/scan headers be * delayed so that application can write COM, etc, markers between * jpeg_start_compress and jpeg_write_raw_data. */ if (*(*cinfo).master).call_pass_startup != 0 { Some( (*(*cinfo).master) .pass_startup .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo); } /* Verify that at least one iMCU row has been passed. */ lines_per_iMCU_row = ((*cinfo).max_v_samp_factor * crate::jpeglib_h::DCTSIZE) as crate::jmorecfg_h::JDIMENSION; if num_lines < lines_per_iMCU_row { (*(*cinfo).err).msg_code = crate::src::jerror::JERR_BUFFER_SIZE as libc::c_int; Some( (*(*cinfo).err) .error_exit .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo as crate::jpeglib_h::j_common_ptr); } /* Directly compress the row. */ if Some( (*(*cinfo).coef) .compress_data .expect("non-null function pointer"), ) .expect("non-null function pointer")(cinfo, data) == 0 { /* If compressor did not consume the whole row, suspend processing. */ return 0 as libc::c_int as crate::jmorecfg_h::JDIMENSION; } /* OK, we processed one iMCU row. */ (*cinfo).next_scanline = ((*cinfo).next_scanline as libc::c_uint).wrapping_add(lines_per_iMCU_row) as crate::jmorecfg_h::JDIMENSION as crate::jmorecfg_h::JDIMENSION; return lines_per_iMCU_row; }
42.466387
121
0.707925
3325a1c451972ba7f9deca235d3aa7e40f4997e8
655
use std::iter::IntoIterator; pub mod models; pub mod schema; // Idea from https://github.com/dani-garcia/vaultwarden/blob/main/src/db/mod.rs pub trait IntoModel { type Output; type Error; fn into_model(self) -> Result<Self::Output, Self::Error>; } impl<I: IntoIterator<Item = T>, T: IntoModel> IntoModel for I { type Output = Vec<T::Output>; type Error = T::Error; #[inline(always)] fn into_model(self) -> Result<Self::Output, Self::Error> { self.into_iter().map(IntoModel::into_model).collect() } } pub trait FromModel<M>: Sized { type Error; fn from_model(model: M) -> Result<Self, Self::Error>; }
22.586207
79
0.653435
ef28e67267b207b855be29b41b29e3a02607fc53
3,371
use std::ops::Index; use std::fmt; use board::Direction; use board::Direction::*; #[derive(Hash, Clone)] pub struct Position { // NB not using proper notation as it's a faff to work out with the way I'm generating the board pub id: String, piece: i8, // TODO: should this be a map? north: Option<usize>, east: Option<usize>, south: Option<usize>, west: Option<usize>, connections: Vec<usize> } impl Position { // TODO: consider PositionFactory pub fn new(id: String, north: Option<usize>, east: Option<usize>, south: Option<usize>, west: Option<usize>) -> Position { let connections = Vec::with_capacity(4); let mut position = Position { id: id, piece: 0, north: north, south: south, east: east, west: west, connections: connections }; position.add_connection(north); position.add_connection(east); position.add_connection(south); position.add_connection(west); position } pub fn blank(id: String) -> Position { return Position::new(id, None, None, None, None); } fn add_connection(&mut self, connection: Option<usize>) { if let Some(p) = connection { self.connections.push(p); } } pub fn add_neighbour(&mut self, direction: Direction, position: Option<usize>) { match direction { North => { self.north = position; self.add_connection(position); }, East => { self.east = position; self.add_connection(position); }, South => { self.south = position; self.add_connection(position); }, West => { self.west = position; self.add_connection(position); }, } } pub fn place(&mut self, player_id: i8) { match self.piece { 0 => self.piece = player_id, _ => panic!("Position already has piece belonging to Player {}", self.piece) } } pub fn remove(&mut self) { self.piece = 0; } pub fn piece(&self) -> i8 { self.piece } pub fn is_empty(&self) -> bool { self.piece == 0 } pub fn owned_by(&self, player_id: i8) -> bool { self.piece == player_id } pub fn connected_to(&self, other_option: Option<&usize>) -> bool { if let Some(other) = other_option { self.connections.contains(other) } else { false } } pub fn connections(&self) -> &Vec<usize> { &self.connections } pub fn connections_string(&self) -> String { format!("{:?}", self.connections) } } impl Index<Direction> for Position { type Output = Option<usize>; fn index(&self, direction: Direction) -> &Option<usize> { match direction { North => &self.north, East => &self.east, South => &self.south, West => &self.west, } } } impl PartialEq for Position { fn eq(&self, other: &Position) -> bool { self.id == other.id } } impl Eq for Position {} impl fmt::Debug for Position { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.id) } }
25.732824
100
0.537526
de5161f927024e27c8c319434ece3810981833e1
6,578
use std::iter::Peekable; use crate::actor_ref::{ActorReference, BasicActorRef}; use crate::{ actor::Sender, system::SystemMsg, validate::{validate_path, InvalidPath}, Message, }; /// A selection represents part of the actor heirarchy, allowing /// messages to be sent to all actors in the selection. /// /// There are several use cases where you would interact with actors /// via a selection instead of actor references: /// /// - You know the path of an actor but you don't have its `ActorRef` /// - You want to broadcast a message to all actors within a path /// /// `ActorRef` is almost always the better choice for actor interaction, /// since messages are directly sent to the actor's mailbox without /// any preprocessing or cloning. /// /// `ActorSelection` provides flexibility for the cases where at runtime /// the `ActorRef`s can't be known. This comes at the cost of traversing /// part of the actor heirarchy and cloning messages. /// /// A selection is anchored to an `ActorRef` and the path is relative /// to that actor's path. /// /// `selection.try_tell()` is used to message actors in the selection. /// Since a selection is a collection of `BasicActorRef`s messaging is /// un-typed. Messages not supported by any actor in the selection will /// be dropped. #[derive(Debug)] pub struct ActorSelection { anchor: BasicActorRef, // dl: BasicActorRef, path_vec: Vec<Selection>, path: String, } impl ActorSelection { pub fn new( anchor: BasicActorRef, // dl: &BasicActorRef, path: String, ) -> Result<Self, InvalidPath> { validate_path(&path)?; let path_vec: Vec<Selection> = path .split_terminator('/') .map({ |seg| match seg { ".." => Selection::Parent, "*" => Selection::AllChildren, name => Selection::ChildName(name.to_string()), } }) .collect(); Ok(Self { anchor, // dl: dl.clone(), path_vec, path, }) } pub fn try_tell<Msg>(&self, msg: Msg, sender: impl Into<Option<BasicActorRef>>) where Msg: Message, { fn walk<'a, I, Msg>( anchor: &BasicActorRef, // dl: &BasicActorRef, mut path_vec: Peekable<I>, msg: Msg, sender: &Sender, path: &str, ) where I: Iterator<Item = &'a Selection>, Msg: Message, { let seg = path_vec.next(); match seg { Some(&Selection::Parent) => { if path_vec.peek().is_none() { let parent = anchor.parent(); let _ = parent.try_tell(msg, sender.clone()); } else { walk(&anchor.parent(), path_vec, msg, sender, path); } } Some(&Selection::AllChildren) => { for child in anchor.children() { let _ = child.try_tell(msg.clone(), sender.clone()); } } Some(&Selection::ChildName(ref name)) => { let child = anchor.children().filter({ |c| c.name() == name }).last(); if path_vec.peek().is_none() { if let Some(actor_ref) = child { actor_ref.try_tell(msg, sender.clone()).unwrap(); } } else if path_vec.peek().is_some() && child.is_some() { walk( child.as_ref().unwrap(), // dl, path_vec, msg, sender, path, ); } else { // todo send to deadletters? } } None => {} } } walk( &self.anchor, // &self.dl, self.path_vec.iter().peekable(), msg, &sender.into(), &self.path, ); } pub fn sys_tell(&self, msg: SystemMsg, sender: impl Into<Option<BasicActorRef>>) { fn walk<'a, I>( anchor: &BasicActorRef, // dl: &BasicActorRef, mut path_vec: Peekable<I>, msg: SystemMsg, sender: &Sender, path: &str, ) where I: Iterator<Item = &'a Selection>, { let seg = path_vec.next(); match seg { Some(&Selection::Parent) => { if path_vec.peek().is_none() { let parent = anchor.parent(); parent.sys_tell(msg); } else { walk(&anchor.parent(), path_vec, msg, sender, path); } } Some(&Selection::AllChildren) => { for child in anchor.children() { child.sys_tell(msg.clone()); } } Some(&Selection::ChildName(ref name)) => { let child = anchor.children().filter({ |c| c.name() == name }).last(); if path_vec.peek().is_none() { if let Some(actor_ref) = child { actor_ref.try_tell(msg, sender.clone()).unwrap(); } } else if path_vec.peek().is_some() && child.is_some() { walk( child.as_ref().unwrap(), // dl, path_vec, msg, sender, path, ); } else { // todo send to deadletters? } } None => {} } } walk( &self.anchor, // &self.dl, self.path_vec.iter().peekable(), msg, &sender.into(), &self.path, ); } } #[derive(Debug)] enum Selection { Parent, ChildName(String), AllChildren, } pub trait ActorSelectionFactory { fn select(&self, path: &str) -> Result<ActorSelection, InvalidPath>; }
32.087805
90
0.447552
5be4b9ec34e0132444647983548142ecb40314ab
4,957
//! Routines of `PETSc` spectral transforms object [`slepc_sys::ST`] use crate::ksp::PetscKSP; use crate::vec::PetscVec; use crate::world::SlepcWorld; use crate::{check_error, with_uninitialized, Result}; use std::mem::ManuallyDrop; pub struct PetscST { // Pointer to ST object pub st_p: *mut slepc_sys::_p_ST, } impl PetscST { /// Initialize from raw pointer pub fn from_raw(st_p: *mut slepc_sys::_p_ST) -> Self { Self { st_p } } // Return raw ST pointer pub fn as_raw(&self) -> *mut slepc_sys::_p_ST { self.st_p } /// Wrapper for [`slepc_sys::STCreate`] /// /// # Errors /// `PETSc` returns error pub fn create(world: &SlepcWorld) -> Result<Self> { let (ierr, st_p) = unsafe { with_uninitialized(|st_p| slepc_sys::STCreate(world.as_raw(), st_p)) }; check_error(ierr)?; Ok(Self::from_raw(st_p)) } /// Wrapper for [`slepc_sys::STSetFromOptions`] /// /// Sets `ST` options from the options database. This routine must be called /// before `STSetUp()` if the user is to be allowed to set the type of transformation. /// /// # Errors /// `PETSc` returns error pub fn set_from_options(&mut self) -> Result<()> { let ierr = unsafe { slepc_sys::STSetFromOptions(self.as_raw()) }; check_error(ierr)?; Ok(()) } /// Wrapper for [`slepc_sys::STSetType`] /// /// Builds ST for a particular spectral transformation. /// /// ```text /// STSHELL "shell" /// STSHIFT "shift" /// STSINVERT "sinvert" /// STCAYLEY "cayley" /// STPRECOND "precond" /// STFILTER "filter" /// ``` /// /// # Errors /// `PETSc` returns error pub fn set_type(&mut self, st_type: &str) -> Result<()> { let st_type_c = std::ffi::CString::new(st_type) .expect("CString::new failed in spectral_transform::set_type"); let ierr = unsafe { slepc_sys::STSetType(self.as_raw(), st_type_c.as_ptr()) }; check_error(ierr)?; Ok(()) } /// Wrapper for [`slepc_sys::STSetShift`] /// /// Sets the shift associated with the spectral transformation. /// /// # Errors /// `PETSc` returns error pub fn set_shift(&mut self, shift: slepc_sys::PetscScalar) -> Result<()> { let ierr = unsafe { slepc_sys::STSetShift(self.as_raw(), shift) }; check_error(ierr)?; Ok(()) } /// Wrapper for [`slepc_sys::STSetKSP`] /// /// Sets the KSP object associated with the spectral transformation. /// /// # Errors /// `PETSc` returns error pub fn set_ksp(&mut self, ksp: &PetscKSP) -> Result<()> { let ierr = unsafe { slepc_sys::STSetKSP(self.as_raw(), ksp.as_raw()) }; check_error(ierr)?; Ok(()) } /// Wrapper for [`slepc_sys::STSetUp`] /// /// Prepares for the use of a spectral transformation. /// /// # Errors /// `PETSc` returns error pub fn set_up(&mut self) -> Result<()> { let ierr = unsafe { slepc_sys::STSetUp(self.as_raw()) }; check_error(ierr)?; Ok(()) } /// Wrapper for [`slepc_sys::STApply`] /// /// Applies the spectral transformation operator to a vector, /// for instance (A - sB)^-1 B in the case of the shift-and-invert /// transformation and generalized eigenproblem. /// /// # Errors /// `PETSc` returns error pub fn apply(&mut self, x: &PetscVec, y: &mut PetscVec) -> Result<()> { let ierr = unsafe { slepc_sys::STApply(self.as_raw(), x.as_raw(), y.as_raw()) }; check_error(ierr)?; Ok(()) } /// Wrapper for [`slepc_sys::STGetType`] /// /// # Panics /// Casting `&str` to `CSring` fails /// /// # Errors /// `PETSc` returns error pub fn get_type(&self) -> Result<&str> { let (ierr, st_type) = unsafe { with_uninitialized(|st_type| slepc_sys::STGetType(self.as_raw(), st_type)) }; check_error(ierr)?; // Transform c string to rust string Ok(unsafe { std::ffi::CStr::from_ptr(st_type).to_str().unwrap() }) } /// Wrapper for [`slepc_sys::STGetKSP`] /// /// Gets the KSP object associated with the spectral transformation. /// /// # Errors /// `PETSc` returns error pub fn get_ksp(&self) -> Result<ManuallyDrop<PetscKSP>> { let (ierr, ksp) = unsafe { with_uninitialized(|ksp| slepc_sys::STGetKSP(self.as_raw(), ksp)) }; check_error(ierr)?; Ok(ManuallyDrop::new(PetscKSP::from_raw(ksp))) } } impl Drop for PetscST { /// Wrapper for [`slepc_sys::STDestroy`] /// /// Frees space taken by a vector. fn drop(&mut self) { let ierr = unsafe { slepc_sys::STDestroy(&mut self.as_raw() as *mut _) }; if ierr != 0 { println!("error code {} from STDestroy", ierr); } } }
30.78882
98
0.569094
f93ee70c490a58939ff474ee7d2818fc6a4c6e46
4,911
use alvr_common::{parking_lot::Mutex, prelude::*}; use alvr_session::AudioConfig; use alvr_sockets::{StreamReceiver, StreamSender, AUDIO}; use oboe::{ AudioInputCallback, AudioInputStreamSafe, AudioOutputCallback, AudioOutputStreamSafe, AudioStream, AudioStreamBuilder, DataCallbackResult, InputPreset, Mono, PerformanceMode, SampleRateConversionQuality, Stereo, Usage, }; use std::{ collections::VecDeque, mem, sync::{mpsc as smpsc, Arc}, thread, }; use tokio::sync::mpsc as tmpsc; struct RecorderCallback { sender: tmpsc::UnboundedSender<Vec<u8>>, } impl AudioInputCallback for RecorderCallback { type FrameType = (i16, Mono); fn on_audio_ready( &mut self, _: &mut dyn AudioInputStreamSafe, frames: &[i16], ) -> DataCallbackResult { let mut sample_buffer = Vec::with_capacity(frames.len() * mem::size_of::<i16>()); for frame in frames { sample_buffer.extend(&frame.to_ne_bytes()); } self.sender.send(sample_buffer).ok(); DataCallbackResult::Continue } } pub async fn record_audio_loop(sample_rate: u32, mut sender: StreamSender<()>) -> StrResult { let (_shutdown_notifier, shutdown_receiver) = smpsc::channel::<()>(); let (data_sender, mut data_receiver) = tmpsc::unbounded_channel(); thread::spawn(move || -> StrResult { let mut stream = AudioStreamBuilder::default() .set_shared() .set_performance_mode(PerformanceMode::LowLatency) .set_sample_rate(sample_rate as _) .set_sample_rate_conversion_quality(SampleRateConversionQuality::Fastest) .set_mono() .set_i16() .set_input() .set_usage(Usage::VoiceCommunication) .set_input_preset(InputPreset::VoiceCommunication) .set_callback(RecorderCallback { sender: data_sender, }) .open_stream() .map_err(err!())?; stream.start().map_err(err!())?; shutdown_receiver.recv().ok(); // This call gets stuck if the headset goes to sleep, but finishes when the headset wakes up stream.stop_with_timeout(0).ok(); Ok(()) }); while let Some(data) = data_receiver.recv().await { let mut buffer = sender.new_buffer(&(), data.len())?; buffer.get_mut().extend(data); sender.send_buffer(buffer).await.ok(); } Ok(()) } struct PlayerCallback { sample_buffer: Arc<Mutex<VecDeque<f32>>>, batch_frames_count: usize, } impl AudioOutputCallback for PlayerCallback { type FrameType = (f32, Stereo); fn on_audio_ready( &mut self, _: &mut dyn AudioOutputStreamSafe, out_frames: &mut [(f32, f32)], ) -> DataCallbackResult { let samples = alvr_audio::get_next_frame_batch( &mut *self.sample_buffer.lock(), 2, self.batch_frames_count, ); for f in 0..out_frames.len() { out_frames[f] = (samples[f * 2], samples[f * 2 + 1]); } DataCallbackResult::Continue } } pub async fn play_audio_loop( sample_rate: u32, config: AudioConfig, receiver: StreamReceiver<()>, ) -> StrResult { let batch_frames_count = sample_rate as usize * config.batch_ms as usize / 1000; let average_buffer_frames_count = sample_rate as usize * config.average_buffering_ms as usize / 1000; let sample_buffer = Arc::new(Mutex::new(VecDeque::new())); // store the stream in a thread (because !Send) and extract the playback handle let (_shutdown_notifier, shutdown_receiver) = smpsc::channel::<()>(); thread::spawn({ let sample_buffer = Arc::clone(&sample_buffer); move || -> StrResult { let mut stream = AudioStreamBuilder::default() .set_shared() .set_performance_mode(PerformanceMode::LowLatency) .set_sample_rate(sample_rate as _) .set_sample_rate_conversion_quality(SampleRateConversionQuality::Fastest) .set_stereo() .set_f32() .set_frames_per_callback(batch_frames_count as _) .set_output() .set_usage(Usage::Game) .set_callback(PlayerCallback { sample_buffer, batch_frames_count, }) .open_stream() .map_err(err!())?; stream.start().map_err(err!())?; shutdown_receiver.recv().ok(); // Note: Oboe crahes if stream.stop() is NOT called on AudioPlayer stream.stop_with_timeout(0).ok(); Ok(()) } }); alvr_audio::receive_samples_loop( receiver, sample_buffer, 2, batch_frames_count, average_buffer_frames_count, ) .await }
30.69375
100
0.606801
d921367b91775f3c114471e5e16b170f1025d0f4
443
#![feature(if_let_guard)] #![allow(incomplete_features)] #[deny(irrefutable_let_patterns)] fn irrefutable_let_guard() { match Some(()) { Some(x) if let () = x => {} //~^ ERROR irrefutable `if let` guard _ => {} } } #[deny(unreachable_patterns)] fn unreachable_pattern() { match Some(()) { x if let None | None = x => {} //~^ ERROR unreachable pattern _ => {} } } fn main() {}
19.26087
45
0.541761
645fe7df179a8bb051fed238b81117a562f55dc4
10,055
use crate::config; use glob::glob; use std::cmp::Ordering; use std::fs; use std::path::PathBuf; pub fn get_matches(config: &config::Config, input: &str) -> Vec<(String, String)> { let mut matches = get_unsorted_matches(config, input); matches.sort_by(hint_sorter); matches } fn get_unsorted_matches(conf: &config::Config, input: &str) -> Vec<(String, String)> { if input.contains(':') { let broken_input: Vec<&str> = input.split(':').collect(); let domain = broken_input[0]; let path = broken_input[1]; return scan_single_tld(&conf, domain, path, true, true); } if input.contains('/') { let domain = conf.get_domain(); return scan_single_tld(&conf, domain, input, true, false); } if input == "" { return get_top_level_hints(&conf); } search_for_component(&conf, input) } fn get_all_non_default_tlds(conf: &config::Config) -> Vec<String> { let default_tld = conf.get_domain(); let tlds = get_all_tlds(conf); let mut new_tlds = Vec::new(); for tld in tlds { if &tld != default_tld { new_tlds.push(tld); } } new_tlds } fn get_all_tlds(conf: &config::Config) -> Vec<String> { let base_path = conf.get_base_path(); let mut tlds = Vec::new(); let cur_dir_res = fs::read_dir(base_path); if cur_dir_res.is_err() { return tlds; } let base_dir = cur_dir_res.unwrap(); for dir in base_dir { if dir.is_ok() { let dir_path = dir.unwrap(); if dir_path.path().is_dir() { tlds.push( dir_path .path() .file_name() .unwrap() .to_str() .unwrap() .to_string(), ); } } } tlds } fn scan_single_tld( conf: &config::Config, tld: &str, path: &str, add_short_desc: bool, add_tld_prefix: bool, ) -> Vec<(String, String)> { let mut completions = Vec::new(); let base_path = conf.get_base_path(); let full_path = format!("{}/{}/{}*", base_path.to_str().unwrap(), tld, path); let path_prefix = PathBuf::from(format!("{}/{}", base_path.to_str().unwrap(), tld)); let glob_results = glob(&full_path); if glob_results.is_err() { eprintln!("invalid glob pattern"); return completions; } for entry in glob_results.unwrap().filter_map(Result::ok) { let glob_path = entry; if glob_path.is_dir() { let relative_path = glob_path.strip_prefix(path_prefix.clone()); if relative_path.is_ok() { let rel_path = relative_path.unwrap(); let input_name = if add_tld_prefix { format!("{}:{}/", tld, rel_path.display()) } else { format!("{}/", rel_path.display()) }; let short_name = if add_short_desc { format!("{}/", glob_path.file_name().unwrap().to_str().unwrap()) } else { input_name.clone() }; completions.push((input_name, short_name)); } } } completions } fn get_top_level_hints(conf: &config::Config) -> Vec<(String, String)> { let mut hints = Vec::new(); let non_default_tlds = get_all_tlds(conf); for domain in non_default_tlds { let domain_colon = format!("{}:", domain); hints.push((domain_colon.clone(), domain_colon)); } let default_domain = conf.get_domain(); let base_path = conf.get_base_path(); let search_path = base_path.join(default_domain); let search_dir_res = fs::read_dir(search_path); if search_dir_res.is_err() { return hints; } let search_dir = search_dir_res.unwrap(); for dir in search_dir { if dir.is_ok() { let dir_path = dir.unwrap(); if dir_path.path().is_dir() { let file_name = format!("{}/", dir_path.file_name().to_str().unwrap()); hints.push((file_name.clone(), file_name)); } } } hints } fn search_for_component(conf: &config::Config, input: &str) -> Vec<(String, String)> { let mut hints = Vec::new(); let default_domain = conf.get_domain(); let max_depth = conf.get_default_search_depth(); let base_path = conf.get_base_path(); let default_path = format!("{}", base_path.join(default_domain).display()); let default_hints = list_components(&default_path, "", input, 0, max_depth); if default_domain.starts_with(input) { hints.push(( format!("{}:", default_domain), format!("{}:", default_domain), )); } for (hint, short) in default_hints { hints.push((hint.clone(), short.clone())); } for tld in get_all_non_default_tlds(conf) { if tld.starts_with(input) { hints.push((format!("{}:", tld), format!("{}:", tld))); } let search_path = format!("{}/{}", base_path.display(), tld); for hint in list_components(&search_path, &format!("{}:", tld), input, 0, max_depth) { hints.push(hint); } } hints } fn list_components( path: &str, prefix: &str, input: &str, depth: usize, max_depth: usize, ) -> Vec<(String, String)> { if depth >= max_depth { return Vec::new(); } let mut hints = Vec::new(); let search_dir_res = fs::read_dir(path); if search_dir_res.is_err() { return hints; } let search_dir = search_dir_res.unwrap(); for dir in search_dir { if dir.is_ok() { let dir_path = dir.unwrap(); if dir_path.path().is_dir() { let file_path = dir_path.path(); let file_name = file_path.file_name().unwrap().to_str().unwrap(); if file_name.starts_with(input) { let hint_name = format!("{}{}/", prefix, file_name); hints.push((hint_name.clone(), hint_name.clone())); } let sub_path = format!("{}/{}", path, file_name); let sub_hints = list_components( &sub_path, &format!("{}{}/", prefix, file_name), input, depth + 1, max_depth, ); for sub_hint in sub_hints { hints.push(sub_hint); } } } } hints } pub fn hint_sorter(tup1: &(String, String), tup2: &(String, String)) -> Ordering { let desc1 = &tup1.1; let desc2 = &tup2.1; if desc1.to_lowercase() < desc2.to_lowercase() { return Ordering::Less; } if desc2.to_lowercase() < desc1.to_lowercase() { return Ordering::Greater; } Ordering::Equal } #[cfg(test)] mod test { use super::get_matches; use crate::config; fn get_testing_config() -> config::Config { let mut conf = config::get_config(); let mut base_path = std::env::current_dir().unwrap(); base_path.push("test"); base_path.push("completions"); conf.set_base_path(base_path); conf.set_default_domain("github.com".to_string()); conf } fn conv_matches(expected_mathes: Vec<(&str, &str)>) -> Vec<(String, String)> { let mut new_matches = Vec::new(); for (a, b) in expected_mathes { new_matches.push((a.to_string(), b.to_string())); } new_matches } #[test] fn test_no_input() { let conf = get_testing_config(); let matches = get_matches(&conf, ""); let expected_matches = vec![ ("allonsy/", "allonsy/"), ("aur.archlinux.org:", "aur.archlinux.org:"), ("github.com:", "github.com:"), ("gitlab.com:", "gitlab.com:"), ]; assert_eq!(matches, conv_matches(expected_matches)); } #[test] fn test_short_path() { let conf = get_testing_config(); let matches = get_matches(&conf, "allonsy/gclone/"); let expected_matches = vec![ ("allonsy/gclone/src/", "src/"), ("allonsy/gclone/test/", "test/"), ]; assert_eq!(matches, conv_matches(expected_matches)); } #[test] fn test_short_path_incomplete() { let conf = get_testing_config(); let matches = get_matches(&conf, "allonsy/gclone/src/co"); let expected_matches = vec![("allonsy/gclone/src/code/", "code/")]; assert_eq!(matches, conv_matches(expected_matches)); } #[test] fn test_full_path() { let conf = get_testing_config(); let matches = get_matches(&conf, "gitlab.com:allonsy/"); let expected_matches = vec![("gitlab.com:allonsy/repo/", "repo/")]; assert_eq!(matches, conv_matches(expected_matches)); } #[test] fn test_full_path_incomplete() { let conf = get_testing_config(); let matches = get_matches(&conf, "gitlab.com:allonsy/re"); let expected_matches = vec![("gitlab.com:allonsy/repo/", "repo/")]; assert_eq!(matches, conv_matches(expected_matches)); } #[test] fn test_repo_name() { let conf = get_testing_config(); let matches = get_matches(&conf, "gcl"); let expected_matches = vec![ ("allonsy/gclone/", "allonsy/gclone/"), ("aur.archlinux.org:gclone/", "aur.archlinux.org:gclone/"), ]; assert_eq!(matches, conv_matches(expected_matches)); } #[test] fn test_user_search() { let conf = get_testing_config(); let matches = get_matches(&conf, "all"); let expected_matches = vec![ ("allonsy/", "allonsy/"), ("gitlab.com:allonsy/", "gitlab.com:allonsy/"), ]; assert_eq!(matches, conv_matches(expected_matches)); } }
30.014925
94
0.547489
2f7e3967c2ee53ba14a29006f5d23dd1b97ddf84
588
#[doc = "Reader of register EXT_LF_CLK"] pub type R = crate::R<u32, super::EXT_LF_CLK>; #[doc = "Reader of field `DIO`"] pub type DIO_R = crate::R<u8, u8>; #[doc = "Reader of field `RTC_INCREMENT`"] pub type RTC_INCREMENT_R = crate::R<u32, u32>; impl R { #[doc = "Bits 24:31 - DIO"] #[inline(always)] pub fn dio(&self) -> DIO_R { DIO_R::new(((self.bits >> 24) & 0xff) as u8) } #[doc = "Bits 0:23 - RTC_INCREMENT"] #[inline(always)] pub fn rtc_increment(&self) -> RTC_INCREMENT_R { RTC_INCREMENT_R::new((self.bits & 0x00ff_ffff) as u32) } }
30.947368
62
0.60034
0a88ee426297b1f8da2348395e2934431d55e0f6
2,183
//! A "bare wasm" target representing a WebAssembly output that makes zero //! assumptions about its environment. //! //! The `wasm32-unknown-unknown` target is intended to encapsulate use cases //! that do not rely on any imported functionality. The binaries generated are //! entirely self-contained by default when using the standard library. Although //! the standard library is available, most of it returns an error immediately //! (e.g. trying to create a TCP stream or something like that). //! //! This target is more or less managed by the Rust and WebAssembly Working //! Group nowadays at <https://github.com/rustwasm>. use super::wasm_base; use super::{LinkerFlavor, LldFlavor, Target}; pub fn target() -> Target { let mut options = wasm_base::options(); options.os = "unknown".to_string(); options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm); let clang_args = options.pre_link_args.entry(LinkerFlavor::Gcc).or_default(); // Make sure clang uses LLD as its linker and is configured appropriately // otherwise clang_args.push("--target=wasm32-unknown-unknown".to_string()); // For now this target just never has an entry symbol no matter the output // type, so unconditionally pass this. clang_args.push("-Wl,--no-entry".to_string()); // Rust really needs a way for users to specify exports and imports in // the source code. --export-dynamic isn't the right tool for this job, // however it does have the side effect of automatically exporting a lot // of symbols, which approximates what people want when compiling for // wasm32-unknown-unknown expect, so use it for now. clang_args.push("-Wl,--export-dynamic".to_string()); // Add the flags to wasm-ld's args too. let lld_args = options.pre_link_args.entry(LinkerFlavor::Lld(LldFlavor::Wasm)).or_default(); lld_args.push("--no-entry".to_string()); lld_args.push("--export-dynamic".to_string()); Target { llvm_target: "wasm32-unknown-unknown".to_string(), pointer_width: 32, data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(), arch: "wasm32".to_string(), options, } }
43.66
96
0.704535
761041ad7198aa6cdd3d146d3199280bc6d5998b
10,939
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // The crate store - a central repo for information collected about external // crates and libraries use locator; use schema; use rustc::dep_graph::DepGraph; use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, CrateNum, DefIndex, DefId}; use rustc::hir::map::definitions::DefPathTable; use rustc::hir::svh::Svh; use rustc::middle::cstore::{DepKind, ExternCrate}; use rustc_back::PanicStrategy; use rustc_data_structures::indexed_vec::IndexVec; use rustc::util::nodemap::{FxHashMap, FxHashSet, NodeMap, DefIdMap}; use std::cell::{RefCell, Cell}; use std::rc::Rc; use flate::Bytes; use syntax::{ast, attr}; use syntax::ext::base::SyntaxExtension; use syntax::symbol::Symbol; use syntax_pos; pub use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind, LinkagePreference}; pub use rustc::middle::cstore::{NativeStatic, NativeFramework, NativeUnknown}; pub use rustc::middle::cstore::{CrateSource, LinkMeta, LibSource}; // A map from external crate numbers (as decoded from some crate file) to // local crate numbers (as generated during this session). Each external // crate may refer to types in other external crates, and each has their // own crate numbers. pub type CrateNumMap = IndexVec<CrateNum, CrateNum>; pub enum MetadataBlob { Inflated(Bytes), Archive(locator::ArchiveMetadata), Raw(Vec<u8>), } /// Holds information about a syntax_pos::FileMap imported from another crate. /// See `imported_filemaps()` for more information. pub struct ImportedFileMap { /// This FileMap's byte-offset within the codemap of its original crate pub original_start_pos: syntax_pos::BytePos, /// The end of this FileMap within the codemap of its original crate pub original_end_pos: syntax_pos::BytePos, /// The imported FileMap's representation within the local codemap pub translated_filemap: Rc<syntax_pos::FileMap>, } pub struct CrateMetadata { pub name: Symbol, /// Information about the extern crate that caused this crate to /// be loaded. If this is `None`, then the crate was injected /// (e.g., by the allocator) pub extern_crate: Cell<Option<ExternCrate>>, pub blob: MetadataBlob, pub cnum_map: RefCell<CrateNumMap>, pub cnum: CrateNum, pub codemap_import_info: RefCell<Vec<ImportedFileMap>>, pub root: schema::CrateRoot, /// For each public item in this crate, we encode a key. When the /// crate is loaded, we read all the keys and put them in this /// hashmap, which gives the reverse mapping. This allows us to /// quickly retrace a `DefPath`, which is needed for incremental /// compilation support. pub def_path_table: DefPathTable, pub exported_symbols: FxHashSet<DefIndex>, pub dep_kind: Cell<DepKind>, pub source: CrateSource, pub proc_macros: Option<Vec<(ast::Name, Rc<SyntaxExtension>)>>, // Foreign items imported from a dylib (Windows only) pub dllimport_foreign_items: FxHashSet<DefIndex>, } pub struct CStore { pub dep_graph: DepGraph, metas: RefCell<FxHashMap<CrateNum, Rc<CrateMetadata>>>, /// Map from NodeId's of local extern crate statements to crate numbers extern_mod_crate_map: RefCell<NodeMap<CrateNum>>, used_libraries: RefCell<Vec<NativeLibrary>>, used_link_args: RefCell<Vec<String>>, statically_included_foreign_items: RefCell<FxHashSet<DefIndex>>, pub dllimport_foreign_items: RefCell<FxHashSet<DefIndex>>, pub visible_parent_map: RefCell<DefIdMap<DefId>>, } impl CStore { pub fn new(dep_graph: &DepGraph) -> CStore { CStore { dep_graph: dep_graph.clone(), metas: RefCell::new(FxHashMap()), extern_mod_crate_map: RefCell::new(FxHashMap()), used_libraries: RefCell::new(Vec::new()), used_link_args: RefCell::new(Vec::new()), statically_included_foreign_items: RefCell::new(FxHashSet()), dllimport_foreign_items: RefCell::new(FxHashSet()), visible_parent_map: RefCell::new(FxHashMap()), } } pub fn next_crate_num(&self) -> CrateNum { CrateNum::new(self.metas.borrow().len() + 1) } pub fn get_crate_data(&self, cnum: CrateNum) -> Rc<CrateMetadata> { self.metas.borrow().get(&cnum).unwrap().clone() } pub fn get_crate_hash(&self, cnum: CrateNum) -> Svh { self.get_crate_data(cnum).hash() } pub fn set_crate_data(&self, cnum: CrateNum, data: Rc<CrateMetadata>) { self.metas.borrow_mut().insert(cnum, data); } pub fn iter_crate_data<I>(&self, mut i: I) where I: FnMut(CrateNum, &Rc<CrateMetadata>) { for (&k, v) in self.metas.borrow().iter() { i(k, v); } } pub fn reset(&self) { self.metas.borrow_mut().clear(); self.extern_mod_crate_map.borrow_mut().clear(); self.used_libraries.borrow_mut().clear(); self.used_link_args.borrow_mut().clear(); self.statically_included_foreign_items.borrow_mut().clear(); } pub fn crate_dependencies_in_rpo(&self, krate: CrateNum) -> Vec<CrateNum> { let mut ordering = Vec::new(); self.push_dependencies_in_postorder(&mut ordering, krate); ordering.reverse(); ordering } pub fn push_dependencies_in_postorder(&self, ordering: &mut Vec<CrateNum>, krate: CrateNum) { if ordering.contains(&krate) { return; } let data = self.get_crate_data(krate); for &dep in data.cnum_map.borrow().iter() { if dep != krate { self.push_dependencies_in_postorder(ordering, dep); } } ordering.push(krate); } // This method is used when generating the command line to pass through to // system linker. The linker expects undefined symbols on the left of the // command line to be defined in libraries on the right, not the other way // around. For more info, see some comments in the add_used_library function // below. // // In order to get this left-to-right dependency ordering, we perform a // topological sort of all crates putting the leaves at the right-most // positions. pub fn do_get_used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)> { let mut ordering = Vec::new(); for (&num, _) in self.metas.borrow().iter() { self.push_dependencies_in_postorder(&mut ordering, num); } info!("topological ordering: {:?}", ordering); ordering.reverse(); let mut libs = self.metas .borrow() .iter() .filter_map(|(&cnum, data)| { if data.dep_kind.get().macros_only() { return None; } let path = match prefer { LinkagePreference::RequireDynamic => data.source.dylib.clone().map(|p| p.0), LinkagePreference::RequireStatic => data.source.rlib.clone().map(|p| p.0), }; let path = match path { Some(p) => LibSource::Some(p), None => { if data.source.rmeta.is_some() { LibSource::MetadataOnly } else { LibSource::None } } }; Some((cnum, path)) }) .collect::<Vec<_>>(); libs.sort_by(|&(a, _), &(b, _)| { let a = ordering.iter().position(|x| *x == a); let b = ordering.iter().position(|x| *x == b); a.cmp(&b) }); libs } pub fn add_used_library(&self, lib: NativeLibrary) { assert!(!lib.name.as_str().is_empty()); self.used_libraries.borrow_mut().push(lib); } pub fn get_used_libraries(&self) -> &RefCell<Vec<NativeLibrary>> { &self.used_libraries } pub fn add_used_link_args(&self, args: &str) { for s in args.split(' ').filter(|s| !s.is_empty()) { self.used_link_args.borrow_mut().push(s.to_string()); } } pub fn get_used_link_args<'a>(&'a self) -> &'a RefCell<Vec<String>> { &self.used_link_args } pub fn add_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId, cnum: CrateNum) { self.extern_mod_crate_map.borrow_mut().insert(emod_id, cnum); } pub fn add_statically_included_foreign_item(&self, id: DefIndex) { self.statically_included_foreign_items.borrow_mut().insert(id); } pub fn do_is_statically_included_foreign_item(&self, def_id: DefId) -> bool { assert!(def_id.krate == LOCAL_CRATE); self.statically_included_foreign_items.borrow().contains(&def_id.index) } pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option<CrateNum> { self.extern_mod_crate_map.borrow().get(&emod_id).cloned() } } impl CrateMetadata { pub fn name(&self) -> Symbol { self.root.name } pub fn hash(&self) -> Svh { self.root.hash } pub fn disambiguator(&self) -> Symbol { self.root.disambiguator } pub fn is_staged_api(&self) -> bool { self.get_item_attrs(CRATE_DEF_INDEX) .iter() .any(|attr| attr.name() == "stable" || attr.name() == "unstable") } pub fn is_allocator(&self) -> bool { let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "allocator") } pub fn needs_allocator(&self) -> bool { let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_allocator") } pub fn is_panic_runtime(&self) -> bool { let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "panic_runtime") } pub fn needs_panic_runtime(&self) -> bool { let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_panic_runtime") } pub fn is_compiler_builtins(&self) -> bool { let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "compiler_builtins") } pub fn is_no_builtins(&self) -> bool { let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "no_builtins") } pub fn panic_strategy(&self) -> PanicStrategy { self.root.panic_strategy.clone() } }
35.401294
97
0.634244
9bf0060c997e36f8674772c7c71440e5a2c7d426
1,728
use std::collections::HashMap; use std::io::{Error, ErrorKind}; pub fn lzw_encode(data: &[u8]) -> Result<Vec<u32>, Error> { // Build initial list. let mut list: HashMap<Vec<u8>, u32> = (0u32..=255).map(|i| (vec![i as u8], i)).collect(); let mut w = Vec::new(); let mut compressed = Vec::new(); for &b in data { let mut wc = w.clone(); wc.push(b); if list.contains_key(&wc) { w = wc; } else { // Write w to output. compressed.push(list[&w]); // wc is a new sequence; add it to the list. list.insert(wc, list.len() as u32); w.clear(); w.push(b); } } // Write remaining output if necessary. if !w.is_empty() { compressed.push(list[&w]); } Ok(compressed) } pub fn lzw_decode(mut data: &[u32]) -> Result<Vec<u8>, Error> { // Build the list. let mut list: HashMap<u32, Vec<u8>> = (0u32..=255).map(|i| (i, vec![i as u8])).collect(); let mut w = list[&data[0]].clone(); data = &data[1..]; let mut decompressed = w.clone(); for &k in data { let entry = if list.contains_key(&k) { list[&k].clone() } else if k == list.len() as u32 { let mut entry = w.clone(); entry.push(w[0]); entry } else { return Err(Error::new( ErrorKind::InvalidInput, "Error during LZW decompression.", )); }; decompressed.extend_from_slice(&entry); // New sequence; add it to the list. w.push(entry[0]); list.insert(list.len() as u32, w); w = entry; } Ok(decompressed) }
25.411765
93
0.495949
ff961f8dbb80d8e813ffb677e0c18e9a847ed2fa
371
#[derive(Debug)] enum IpAddrKind { V4, V6, } #[derive(Debug)] struct IpAddr { kind: IpAddrKind, address: String, } pub fn exec() { let _four = IpAddrKind::V4; let _six = IpAddrKind::V6; let localhost = IpAddr { kind: IpAddrKind::V4, address: String::from("127.0.0.1"), }; println!("localhost = {:?}", localhost); }
15.458333
44
0.563342
bb7cd069b9c76c5f077ea7e3f2a15190dfd85feb
49
net.sf.jasperreports.charts.fill.JRFillItemLabel
24.5
48
0.877551
b9fe22cb6e101cb97c14ee580008d7a4bdb9fa22
98
pub use super::{ button::{dom::*, state::*}, dom::*, line::state::*, state::*, };
14
31
0.428571
9076756f5bf3f3d33eb605491547b97a6590ea94
575
extern crate dialoguer; use dialoguer::theme::ColoredTheme; use dialoguer::{Input, KeyPrompt}; fn main() { let rv = KeyPrompt::with_theme(&ColoredTheme::default()) .with_text("Do you want to continue?") .items(&['y', 'n', 'p']) .default(1) .interact() .unwrap(); if rv == 'y' { println!("Looks like you want to continue"); } else { println!("nevermind then :("); return; } let input: String = Input::new().with_prompt("Your name").interact().unwrap(); println!("Hello {}!", input); }
25
82
0.56
18db8f1a5b75f5ff0bf50b6175699fb8dcc269ee
2,319
use crate::bindings::data_center::online::{ DataCenterFractionalSimplifiedSmoothedConvexOptimization, Response, StepResponse, }; use crate::{ algorithms::online::uni_dimensional::probabilistic::{ probabilistic, Memory, Options, }, breakpoints::Breakpoints, model::data_center::{ model::{ DataCenterModel, DataCenterOfflineInput, DataCenterOnlineInput, }, DataCenterModelOutputFailure, DataCenterModelOutputSuccess, }, streaming::online::{self, OfflineResponse}, }; use pyo3::{exceptions::PyAssertionError, prelude::*}; /// Starts backend in a new thread. #[pyfunction] #[allow(clippy::type_complexity)] fn start( py: Python, addr: String, model: DataCenterModel, input: DataCenterOfflineInput, w: i32, options: Options, ) -> PyResult<Response<f64, Memory<'static>>> { py.allow_threads(|| { let OfflineResponse { xs: (xs, cost), int_xs: (int_xs, int_cost), m, runtime, } = online::start( addr.parse().unwrap(), model, &probabilistic, options, w, input, None, ) .unwrap(); Ok(((xs.to_vec(), cost), (int_xs.to_vec(), int_cost), m, runtime)) }) } /// Executes next iteration of the algorithm. #[pyfunction] fn next( py: Python, addr: String, input: DataCenterOnlineInput, ) -> PyResult<StepResponse<f64, Memory<'static>>> { py.allow_threads(|| { let ((x, cost), (int_x, int_cost), m, runtime) = online::next::< f64, DataCenterFractionalSimplifiedSmoothedConvexOptimization, Memory, DataCenterOnlineInput, DataCenterModelOutputSuccess, DataCenterModelOutputFailure, >(addr.parse().unwrap(), input) .map_err(PyAssertionError::new_err)?; Ok(((x.to_vec(), cost), (int_x.to_vec(), int_cost), m, runtime)) }) } /// Memoryless Algorithm pub fn submodule(_py: Python, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(start, m)?)?; m.add_function(wrap_pyfunction!(next, m)?)?; m.add_class::<Options>()?; m.add_class::<Breakpoints>()?; Ok(()) }
27.939759
75
0.58646
1681a7bb718f54edc642bc63520c65b403b06096
16,391
/* * bmp/decorder.rs (C) 2022 Mith@mmk * * */ use crate::metadata::DataMap; use bin_rs::reader::BinaryReader; type Error = Box<dyn std::error::Error>; use crate::bmp::header::BitmapInfo::Windows; use crate::error::{ImgError,ImgErrorKind}; use crate::draw::*; use bin_rs::io::*; use crate::warning::ImgWarnings; use crate::bmp::header::BitmapHeader; use crate::bmp::header::Compressions; fn convert_rgba32(buffer:&[u8],line: &mut Vec<u8>,header:&BitmapHeader,bit_count: usize) -> Result<(),Error> { let mut offset = 0; let width = header.width.abs() as usize; match bit_count { 32 => { // bgra for x in 0..width{ let b = buffer[offset]; let g = buffer[offset + 1]; let r = buffer[offset + 2]; line[x*4] = r; line[x*4+1] = g; line[x*4+2] = b; offset += 4; } }, 24 => { // bgra for x in 0..width{ let b = buffer[offset]; let g = buffer[offset + 1]; let r = buffer[offset + 2]; line[x*4] = r; line[x*4+1] = g; line[x*4+2] = b; offset += 3; } }, 16 => { // rgb555 for x in 0..width{ let color = read_u16_le(buffer,offset); let r = ((color & 0x7c00) >> 10) as u8; let g = ((color & 0x03e0) >> 5) as u8; let b = (color & 0x001f) as u8; line[x*4] = r << 3 | r >> 2; line[x*4+1] = g << 3 | g >> 2; line[x*4+2] = b << 3 | b >> 2; offset += 2; } }, 8 => { for x in 0..width{ let color = read_byte(buffer,offset) as usize; let r = header.color_table.as_ref().unwrap()[color].red.clone(); let g = header.color_table.as_ref().unwrap()[color].green.clone(); let b = header.color_table.as_ref().unwrap()[color].blue.clone(); line[x*4] = r; line[x*4+1] = g; line[x*4+2] = b; offset += 1; } }, 4 => { for x_ in 0..(width + 1) /2{ let mut x = x_ * 2; let color_ = read_byte(buffer,offset) as usize; let color = color_ >> 4; let r = header.color_table.as_ref().unwrap()[color].red.clone(); let g = header.color_table.as_ref().unwrap()[color].green.clone(); let b = header.color_table.as_ref().unwrap()[color].blue.clone(); line[x*4] = r; line[x*4+1] = g; line[x*4+2] = b; x += 1; let color = color_ & 0xf; let r = header.color_table.as_ref().unwrap()[color].red.clone(); let g = header.color_table.as_ref().unwrap()[color].green.clone(); let b = header.color_table.as_ref().unwrap()[color].blue.clone(); line[x*4] = r; line[x*4+1] = g; line[x*4+2] = b; offset += 1; } }, 1 => { for x_ in 0..(width + 7) /8{ let mut x = x_ * 8; let color_ = read_byte(buffer,offset) as usize; for i in [7,6,5,4,3,2,1,0] { let color = ((color_ >> i) & 0x1) as usize; let r = header.color_table.as_ref().unwrap()[color].red.clone(); let g = header.color_table.as_ref().unwrap()[color].green.clone(); let b = header.color_table.as_ref().unwrap()[color].blue.clone(); line[x*4] = r; line[x*4+1] = g; line[x*4+2] = b; x += 1; } offset += 1; } }, _ => { return Err(Box::new(ImgError::new_const(ImgErrorKind::NoSupportFormat,"Not Support bit count".to_string()))) } } Ok(()) } fn decode_rgb<B:BinaryReader>(reader:&mut B,header:&BitmapHeader,option:&mut DecodeOptions) -> Result<Option<ImgWarnings>,Error> { let width = header.width.abs() as usize; let height = header.height.abs() as usize; option.drawer.init(width,height,InitOptions::new())?; let mut line :Vec<u8> = (0..width*4).map(|i| if i%4==3 {0xff} else {0}).collect(); if header.bit_count <= 8 && header.color_table.is_none() { return Err(Box::new( ImgError::new_const(ImgErrorKind::NoSupportFormat,"Not Support under 255 color and no color table".to_string()) )) } let line_size = ((width as usize * header.bit_count + 31) / 32) * 4; for y_ in 0..height { if cfg!(debug_assertions) { println!("{}",y_); } let buffer = reader.read_bytes_as_vec(line_size)?; let y = height -1 - y_ ; // let offset = y_ * line_size; convert_rgba32(&buffer,&mut line,header,header.bit_count)?; if header.height > 0 { option.drawer.draw(0,y,width,1,&line,None)?; } else { option.drawer.draw(0,y_,width,1,&line,None)?; } } option.drawer.terminate(None)?; Ok(None) } fn decode_rle<B:BinaryReader>(reader:&mut B,header:&BitmapHeader,option:&mut DecodeOptions) -> Result<Option<ImgWarnings>,Error> { let width = header.width.abs() as usize; let height = header.height.abs() as usize; option.drawer.init(width,height,InitOptions::new())?; let mut line :Vec<u8> = (0..header.width*4).map(|i| if i%4==3 {0xff} else {0}).collect(); let mut y:usize = height - 1; let rev_bytes = (8 / header.bit_count) as usize; 'y: loop{ let mut x:usize = 0; let mut buf :Vec<u8> = (0..(width + 1)).map(|_| 0).collect(); 'x: loop { let data0 = reader.read_byte()?; let data1 = reader.read_byte()?; if data0 == 0 { if data1==0 { break } // EOL if data1==1 { break 'y } // EOB if data1 == 2 { // Jump let data0 = reader.read_byte()?; let data1 = reader.read_byte()?; if data1 == 0 { x += data0 as usize; } else { convert_rgba32(&buf, &mut line, header,8)?; option.drawer.draw(0,y,width,1,&line,None)?; if y == 0 {break;} y -= 1; buf = (0..((width + rev_bytes -1) / rev_bytes)).map(|_| 0).collect(); for _ in 0..data1 as usize { convert_rgba32(&buf, &mut line, header,8)?; option.drawer.draw(0,y,width,1,&line,None)?; if y == 0 {break;} y -= 1; } x = data0 as usize; continue 'x } } let bytes = (data1 as usize + rev_bytes -1) / rev_bytes; // pixel let rbytes = (bytes + 1) /2 * 2; // even bytes let rbuf = reader.read_bytes_as_vec(rbytes)?; if header.bit_count == 8 { for i in 0..bytes { buf[x] = rbuf[i]; x += 1; } } else if header.bit_count == 4{ for i in 0..bytes { buf[x ] = rbuf[i] >> 4; buf[x+1] = rbuf[i] & 0xf; x += 2; } } else { return Err(Box::new(ImgError::new_const(ImgErrorKind::NoSupportFormat,"Unknwon".to_string()))) } } else { if header.bit_count == 8 { for _ in 0..data0{ buf[x] = data1; x += 1; if x >= buf.len() { break 'x; } } } else if header.bit_count == 4 { for _ in 0..data0 as usize / rev_bytes { buf[x] = data1 >> 4; x +=1; if x >= buf.len() { break 'x; } buf[x] = data1 & 0xf; x +=1; if x >= buf.len() { break 'x; } } if data0 % 2 == 1 { buf[x] = data1 >> 4; x +=1; } } else { return Err(Box::new(ImgError::new_const(ImgErrorKind::NoSupportFormat,"Unknwon".to_string()))) } } } convert_rgba32(&buf, &mut line, header,8)?; if header.height > 0 { option.drawer.draw(0,y,width,1,&line,None)?; } else { option.drawer.draw(0,height - 1 - y,width,1,&line,None)?; } if y == 0 { break; } y -= 1; } option.drawer.terminate(None)?; return Ok(None) } fn get_shift(mask :u32) -> (u32,u32) { let mut temp = mask; let mut shift = 0; while temp & 0x1 == 0 { temp >>= 1; shift += 1; if shift > 32 { return (0,8); } } let mut bits = 0; while temp & 0x1 == 1 { temp >>= 1; bits += 1; if bits + shift > 32 { return (0,8); } } if bits >= 8 { shift += bits - 8; bits = 0; } (shift,bits) } fn decode_bit_fileds<B:BinaryReader>(reader:&mut B,header:&BitmapHeader,option:&mut DecodeOptions) -> Result<Option<ImgWarnings>,Error> { let width = header.width.abs() as usize; let height = header.height.abs() as usize; let info; if header.bit_count != 16 && header.bit_count != 32 { return Err(Box::new(ImgError::new_const(ImgErrorKind::NoSupportFormat, "Illigal bit field / bit count".to_string()))) } if let Windows(info_) = &header.bitmap_info { info = info_; } else { return Err(Box::new(ImgError::new_const(ImgErrorKind::NoSupportFormat, "Illigal bit field / not Windows Bitmap".to_string()))) } if info.b_v4_header.is_none() { return Err(Box::new(ImgError::new_const(ImgErrorKind::NoSupportFormat, "Illigal bit field / no V4 Header".to_string()))) } let v4 = info.b_v4_header.as_ref().unwrap(); let red_mask = v4.b_v4_red_mask; let (red_shift,red_bits) = get_shift(red_mask); let green_mask = v4.b_v4_green_mask; let (green_shift,green_bits) = get_shift(green_mask); let blue_mask = v4.b_v4_blue_mask; let (blue_shift,blue_bits) = get_shift(blue_mask); let alpha_mask = v4.b_v4_alpha_mask; let (alpha_shift,alpha_bits) = get_shift(alpha_mask); if cfg!(debug_assertions) { println!("{:>04x} {:>032b} >>{} {}",red_mask,red_mask,red_shift,red_bits); println!("{:>04x} {:>032b} >>{} {}",green_mask,green_mask,green_shift,green_bits); println!("{:>04x} {:>032b} >>{} {}",blue_mask,blue_mask,blue_shift,blue_bits); println!("{:>04x} {:>032b} >>{} {}",alpha_mask,alpha_mask,alpha_shift,alpha_bits); println!("{} {}", width,height); } option.drawer.init(width,height,InitOptions::new())?; let mut line :Vec<u8> = (0..width*4).map(|i| if i%4==3 {0xff} else {0}).collect(); let line_size = ((width as usize * header.bit_count + 31) / 32) * 4; for y_ in 0..height { let buffer = reader.read_bytes_as_vec(line_size)?; let y = height -1 - y_ ; // let offset = y_ * line_size; for x in 0..width { let color = if header.bit_count == 32 { read_u32_le(&buffer, x * 4) as u32 } else { read_u16_le(&buffer, x * 2) as u32 }; let red = ((color & red_mask) >> red_shift) as u32; let green = ((color & green_mask) >> green_shift) as u32; let blue = ((color & blue_mask) >> blue_shift) as u32; let alpha = if alpha_mask != 0 { ((color & alpha_mask) >> alpha_shift) as u32 } else {0xff}; line[x*4 ] = (red << (8 - red_bits) | red >> red_bits) as u8; line[x*4+1] = (green << (8 - green_bits) | green >> green_bits) as u8; line[x*4+2] = (blue << (8 - blue_bits) | blue >> blue_bits) as u8; line[x*4+3] = (alpha << (8 - alpha_bits) | alpha >> alpha_bits) as u8; } if header.height > 0 { option.drawer.draw(0,y,width,1,&line,None)?; } else { option.drawer.draw(0,y_,width,1,&line,None)?; } } option.drawer.terminate(None)?; Ok(None) } fn decode_jpeg<B:BinaryReader>(reader:&mut B,_:&BitmapHeader,option:&mut DecodeOptions) -> Result<Option<ImgWarnings>,Error> { return crate::jpeg::decoder::decode(reader,option); } fn decode_png<B:BinaryReader>(reader:&mut B,_header:&BitmapHeader,option:&mut DecodeOptions) -> Result<Option<ImgWarnings>,Error> { return crate::png::decoder::decode(reader,option); } pub fn decode<'decode, B:BinaryReader>(reader:&mut B ,option:&mut DecodeOptions) -> Result<Option<ImgWarnings>,Error> { let header = BitmapHeader::new(reader,option.debug_flag)?; if option.debug_flag > 0 { let s1 = format!("BITMAP Header size {}",header.bitmap_file_header.bf_offbits); let s2 = format!("width {} height {} {} bits per sample\n",header.width,header.height,header.bit_count); let s3 = format!("Compression {:?}\n",header.compression); let s = s1 + &s2 + &s3; option.drawer.verbose(&s,None)?; } let offset = header.image_offset; reader.seek(std::io::SeekFrom::Start(offset as u64))?; let result; if let Some(compression) = &header.compression { match compression { Compressions::BiRGB => { result = decode_rgb(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("None".to_owned()))?; }, Compressions::BiRLE8 => { result = decode_rle(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("RLE".to_owned()))?; }, Compressions::BiRLE4 => { result = decode_rle(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("RLE".to_owned()))?; }, Compressions::BiBitFileds => { result = decode_bit_fileds(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("Bit fields".to_owned()))?; }, Compressions::BiJpeg => { result = decode_jpeg(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("Jpeg".to_owned()))?; }, Compressions::BiPng => { result = decode_png(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("PNG".to_owned()))?; }, } } else { result = decode_rgb(reader,&header,option); option.drawer.set_metadata("compression",DataMap::Ascii("OS2".to_owned()))?; } if header.height < 0 { option.drawer.set_metadata("negative height",DataMap::Ascii("true".to_string()))?; } option.drawer.set_metadata("bits per pixel",DataMap::UInt(header.bit_count as u64))?; option.drawer.set_metadata("Format",DataMap::Ascii("BMP".to_owned()))?; option.drawer.set_metadata("width",DataMap::UInt(header.width as u64))?; option.drawer.set_metadata("height",DataMap::UInt(header.height.abs() as u64))?; result }
38.207459
139
0.482155
8911e763174a6c4a5cf83f73cda9d7f4a229cff5
1,152
use cloudwatch_metrics_agent::config::CloudwatchConfig; use cloudwatch_metrics_agent::main_runner; use log::info; use structopt::StructOpt; #[derive(Debug, StructOpt)] struct Opt { /// Metric namespace #[structopt(short, long)] namespace: String, /// Metric dimension value for ServiceName #[structopt(short, long)] service_name: String, /// Metric period #[structopt(short, long, default_value = "60")] period: u32, /// Whether to run without sending to CloudWatch #[structopt(short, long)] dryrun: bool, } #[tokio::main] async fn main() -> Result<(), aws_sdk_cloudwatch::Error> { env_logger::Builder::from_default_env() .write_style(if atty::is(atty::Stream::Stdout) { env_logger::WriteStyle::Auto } else { env_logger::WriteStyle::Never }) .init(); let opt = Opt::from_args(); let cloudwatch_config = CloudwatchConfig { namespace: opt.namespace, service_name: opt.service_name, }; main_runner(cloudwatch_config, opt.dryrun, opt.period) .await .unwrap(); info!("Done"); Ok(()) }
24
58
0.631944
ab6904927e87065ff06378a5f0677a7e09fba272
613
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. mod rusti { extern "rust-intrinsic" { pub fn uninit<T>() -> T; } } pub fn main() { let _a : int = unsafe {rusti::uninit()}; }
32.263158
68
0.688418
675af4bb1d7d3a321d0bdfcc3fe8ef28983994d4
231
#[cfg(test)] mod test; use liblumen_alloc::erts::process::Process; use liblumen_alloc::erts::term::prelude::Term; #[native_implemented::function(erlang:self/0)] pub fn result(process: &Process) -> Term { process.pid_term() }
21
46
0.718615
0171cea2644e0ecfac9a0d045ab9fb69c8a130c3
20,367
use insta::assert_snapshot; use crate::panes::PositionAndSize; use crate::tests::fakes::FakeInputOutput; use crate::tests::utils::{get_next_to_last_snapshot, get_output_frame_snapshots}; use crate::{start, CliArgs}; use crate::tests::utils::commands::{ MOVE_FOCUS_IN_PANE_MODE, PANE_MODE, QUIT, RESIZE_DOWN_IN_RESIZE_MODE, RESIZE_LEFT_IN_RESIZE_MODE, RESIZE_MODE, SPLIT_DOWN_IN_PANE_MODE, SPLIT_RIGHT_IN_PANE_MODE, }; fn get_fake_os_input(fake_win_size: &PositionAndSize) -> FakeInputOutput { FakeInputOutput::new(*fake_win_size) } #[test] pub fn resize_down_with_pane_above() { // ┌───────────┐ ┌───────────┐ // │ │ │ │ // │ │ │ │ // ├───────────┤ ==resize=down==> │ │ // │███████████│ ├───────────┤ // │███████████│ │███████████│ // │███████████│ │███████████│ // └───────────┘ └───────────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_pane_below() { // ┌───────────┐ ┌───────────┐ // │███████████│ │███████████│ // │███████████│ │███████████│ // ├───────────┤ ==resize=down==> │███████████│ // │ │ ├───────────┤ // │ │ │ │ // └───────────┘ └───────────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_above_and_below() { // ┌───────────┐ ┌───────────┐ // │ │ │ │ // │ │ │ │ // ├───────────┤ │ │ // │███████████│ ==resize=down==> ├───────────┤ // │███████████│ │███████████│ // ├───────────┤ ├───────────┤ // │ │ │ │ // │ │ │ │ // └───────────┘ └───────────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_multiple_panes_above() { // // ┌─────┬─────┐ ┌─────┬─────┐ // │ │ │ │ │ │ // ├─────┴─────┤ ==resize=down==> │ │ │ // │███████████│ ├─────┴─────┤ // │███████████│ │███████████│ // └───────────┘ └───────────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_above_aligned_left_with_current_pane() { // ┌─────┬─────┐ ┌─────┬─────┐ // │ │ │ │ │ │ // │ │ │ │ │ │ // ├─────┼─────┤ ==resize=down==> ├─────┤ │ // │ │█████│ │ ├─────┤ // │ │█████│ │ │█████│ // └─────┴─────┘ └─────┴─────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_below_aligned_left_with_current_pane() { // ┌─────┬─────┐ ┌─────┬─────┐ // │ │█████│ │ │█████│ // │ │█████│ │ │█████│ // ├─────┼─────┤ ==resize=down==> ├─────┤█████│ // │ │ │ │ ├─────┤ // │ │ │ │ │ │ // └─────┴─────┘ └─────┴─────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_above_aligned_right_with_current_pane() { // ┌─────┬─────┐ ┌─────┬─────┐ // │ │ │ │ │ │ // │ │ │ │ │ │ // ├─────┼─────┤ ==resize=down==> │ ├─────┤ // │█████│ │ ├─────┤ │ // │█████│ │ │█████│ │ // └─────┴─────┘ └─────┴─────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_below_aligned_right_with_current_pane() { // ┌─────┬─────┐ ┌─────┬─────┐ // │█████│ │ │█████│ │ // │█████│ │ │█████│ │ // ├─────┼─────┤ ==resize=down==> │█████├─────┤ // │ │ │ ├─────┤ │ // │ │ │ │ │ │ // └─────┴─────┘ └─────┴─────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_above_aligned_left_and_right_with_current_pane() { // ┌───┬───┬───┐ ┌───┬───┬───┐ // │ │ │ │ │ │ │ │ // │ │ │ │ │ │ │ │ // ├───┼───┼───┤ ==resize=down==> ├───┤ ├───┤ // │ │███│ │ │ ├───┤ │ // │ │███│ │ │ │███│ │ // └───┴───┴───┘ └───┴───┴───┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_below_aligned_left_and_right_with_current_pane() { // ┌───┬───┬───┐ ┌───┬───┬───┐ // │ │███│ │ │ │███│ │ // │ │███│ │ │ │███│ │ // ├───┼───┼───┤ ==resize=down==> ├───┤███├───┤ // │ │ │ │ │ ├───┤ │ // │ │ │ │ │ │ │ │ // └───┴───┴───┘ └───┴───┴───┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 20, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_above_aligned_left_and_right_with_panes_to_the_left_and_right() { // ┌─┬───────┬─┐ ┌─┬───────┬─┐ // │ │ │ │ │ │ │ │ // │ │ │ │ │ │ │ │ // ├─┼─┬───┬─┼─┤ ==resize=down==> ├─┤ ├─┤ // │ │ │███│ │ │ │ ├─┬───┬─┤ │ // │ │ │███│ │ │ │ │ │███│ │ │ // └─┴─┴───┴─┴─┘ └─┴─┴───┴─┴─┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 40, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn resize_down_with_panes_below_aligned_left_and_right_with_to_the_left_and_right() { // ┌─┬─┬───┬─┬─┐ ┌─┬─┬───┬─┬─┐ // │ │ │███│ │ │ │ │ │███│ │ │ // │ │ │███│ │ │ │ │ │███│ │ │ // ├─┼─┴───┴─┼─┤ ==resize=down==> ├─┤ │███│ ├─┤ // │ │ │ │ │ ├─┴───┴─┤ │ // │ │ │ │ │ │ │ │ // └─┴───────┴─┘ └─┴───────┴─┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 40, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &SPLIT_RIGHT_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &MOVE_FOCUS_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_LEFT_IN_RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); } #[test] pub fn cannot_resize_down_when_pane_below_is_at_minimum_height() { // ┌───────────┐ ┌───────────┐ // │███████████│ │███████████│ // ├───────────┤ ==resize=down==> ├───────────┤ // │ │ │ │ // └───────────┘ └───────────┘ // █ == focused pane let fake_win_size = PositionAndSize { columns: 121, rows: 5, x: 0, y: 0, }; let mut fake_input_output = get_fake_os_input(&fake_win_size); fake_input_output.add_terminal_input(&[ &PANE_MODE, &SPLIT_DOWN_IN_PANE_MODE, &RESIZE_MODE, &RESIZE_DOWN_IN_RESIZE_MODE, &QUIT, ]); start(Box::new(fake_input_output.clone()), CliArgs::default()); let output_frames = fake_input_output .stdout_writer .output_frames .lock() .unwrap(); let snapshots = get_output_frame_snapshots(&output_frames, &fake_win_size); let snapshot_before_quit = get_next_to_last_snapshot(snapshots).expect("could not find snapshot"); assert_snapshot!(snapshot_before_quit); }
33.225122
95
0.509746
2967cf8131ea33e927a2fb936f52b348a5294ba4
39,206
#![doc(html_root_url = "https://docs.rs/httparse/1.3.5")] #![cfg_attr(not(feature = "std"), no_std)] #![deny(missing_docs)] #![cfg_attr(test, deny(warnings))] // we can't upgrade while supporting Rust 1.3 #![allow(deprecated)] #![cfg_attr(httparse_min_2018, allow(rust_2018_idioms))] //! # httparse //! //! A push library for parsing HTTP/1.x requests and responses. //! //! The focus is on speed and safety. Unsafe code is used to keep parsing fast, //! but unsafety is contained in a submodule, with invariants enforced. The //! parsing internals use an `Iterator` instead of direct indexing, while //! skipping bounds checks. //! //! With Rust 1.27.0 or later, support for SIMD is enabled automatically. //! If building an executable to be run on multiple platforms, and thus //! not passing `target_feature` or `target_cpu` flags to the compiler, //! runtime detection can still detect SSE4.2 or AVX2 support to provide //! massive wins. //! //! If compiling for a specific target, remembering to include //! `-C target_cpu=native` allows the detection to become compile time checks, //! making it *even* faster. #[cfg(feature = "std")] extern crate std as core; use core::{fmt, result, str, slice}; use iter::Bytes; mod iter; #[macro_use] mod macros; mod simd; #[inline] fn shrink<T>(slice: &mut &mut [T], len: usize) { debug_assert!(slice.len() >= len); let ptr = slice.as_mut_ptr(); *slice = unsafe { slice::from_raw_parts_mut(ptr, len) }; } /// Determines if byte is a token char. /// /// > ```notrust /// > token = 1*tchar /// > /// > tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" /// > / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" /// > / DIGIT / ALPHA /// > ; any VCHAR, except delimiters /// > ``` #[inline] pub fn is_token(b: u8) -> bool { b > 0x20 && b < 0x7F } // ASCII codes to accept URI string. // i.e. A-Z a-z 0-9 !#$%&'*+-._();:@=,/?[]~^ // TODO: Make a stricter checking for URI string? static URI_MAP: [bool; 256] = byte_map![ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // \0 \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // commands 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // \w ! " # $ % & ' ( ) * + , - . / 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, // 0 1 2 3 4 5 6 7 8 9 : ; < = > ? 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // @ A B C D E F G H I J K L M N O 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // P Q R S T U V W X Y Z [ \ ] ^ _ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // ` a b c d e f g h i j k l m n o 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, // p q r s t u v w x y z { | } ~ del // ====== Extended ASCII (aka. obs-text) ====== 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; #[inline] pub fn is_uri_token(b: u8) -> bool { URI_MAP[b as usize] } static HEADER_NAME_MAP: [bool; 256] = byte_map![ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; #[inline] pub fn is_header_name_token(b: u8) -> bool { HEADER_NAME_MAP[b as usize] } static HEADER_VALUE_MAP: [bool; 256] = byte_map![ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ]; #[inline] pub fn is_header_value_token(b: u8) -> bool { HEADER_VALUE_MAP[b as usize] } /// An error in parsing. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Error { /// Invalid byte in header name. HeaderName, /// Invalid byte in header value. HeaderValue, /// Invalid byte in new line. NewLine, /// Invalid byte in Response status. Status, /// Invalid byte where token is required. Token, /// Parsed more headers than provided buffer can contain. TooManyHeaders, /// Invalid byte in HTTP version. Version, } impl Error { #[inline] fn description_str(&self) -> &'static str { match *self { Error::HeaderName => "invalid header name", Error::HeaderValue => "invalid header value", Error::NewLine => "invalid new line", Error::Status => "invalid response status", Error::Token => "invalid token", Error::TooManyHeaders => "too many headers", Error::Version => "invalid HTTP version", } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.description_str()) } } #[cfg(feature = "std")] impl std::error::Error for Error { fn description(&self) -> &str { self.description_str() } } /// An error in parsing a chunk size. // Note: Move this into the error enum once v2.0 is released. #[derive(Debug, PartialEq, Eq)] pub struct InvalidChunkSize; impl fmt::Display for InvalidChunkSize { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("invalid chunk size") } } /// A Result of any parsing action. /// /// If the input is invalid, an `Error` will be returned. Note that incomplete /// data is not considered invalid, and so will not return an error, but rather /// a `Ok(Status::Partial)`. pub type Result<T> = result::Result<Status<T>, Error>; /// The result of a successful parse pass. /// /// `Complete` is used when the buffer contained the complete value. /// `Partial` is used when parsing did not reach the end of the expected value, /// but no invalid data was found. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum Status<T> { /// The completed result. Complete(T), /// A partial result. Partial } impl<T> Status<T> { /// Convenience method to check if status is complete. #[inline] pub fn is_complete(&self) -> bool { match *self { Status::Complete(..) => true, Status::Partial => false } } /// Convenience method to check if status is partial. #[inline] pub fn is_partial(&self) -> bool { match *self { Status::Complete(..) => false, Status::Partial => true } } /// Convenience method to unwrap a Complete value. Panics if the status is /// `Partial`. #[inline] pub fn unwrap(self) -> T { match self { Status::Complete(t) => t, Status::Partial => panic!("Tried to unwrap Status::Partial") } } } /// A parsed Request. /// /// The optional values will be `None` if a parse was not complete, and did not /// parse the associated property. This allows you to inspect the parts that /// could be parsed, before reading more, in case you wish to exit early. /// /// # Example /// /// ```no_run /// let buf = b"GET /404 HTTP/1.1\r\nHost:"; /// let mut headers = [httparse::EMPTY_HEADER; 16]; /// let mut req = httparse::Request::new(&mut headers); /// let res = req.parse(buf).unwrap(); /// if res.is_partial() { /// match req.path { /// Some(ref path) => { /// // check router for path. /// // /404 doesn't exist? we could stop parsing /// }, /// None => { /// // must read more and parse again /// } /// } /// } /// ``` #[derive(Debug, Eq, PartialEq)] pub struct Request<'headers, 'buf: 'headers> { /// The request method, such as `GET`. pub method: Option<&'buf str>, /// The request path, such as `/about-us`. pub path: Option<&'buf str>, /// The request version, such as `HTTP/1.1`. pub version: Option<u8>, /// The request headers. pub headers: &'headers mut [Header<'buf>] } impl<'h, 'b> Request<'h, 'b> { /// Creates a new Request, using a slice of headers you allocate. #[inline] pub fn new(headers: &'h mut [Header<'b>]) -> Request<'h, 'b> { Request { method: None, path: None, version: None, headers: headers, } } /// Try to parse a buffer of bytes into the Request. /// /// Returns byte offset in `buf` to start of HTTP body. pub fn parse(&mut self, buf: &'b [u8]) -> Result<usize> { let orig_len = buf.len(); let mut bytes = Bytes::new(buf); complete!(skip_empty_lines(&mut bytes)); self.method = Some(complete!(parse_token(&mut bytes))); self.path = Some(complete!(parse_uri(&mut bytes))); self.version = Some(complete!(parse_version(&mut bytes))); newline!(bytes); let len = orig_len - bytes.len(); let headers_len = complete!(parse_headers_iter(&mut self.headers, &mut bytes)); Ok(Status::Complete(len + headers_len)) } } #[inline] fn skip_empty_lines(bytes: &mut Bytes) -> Result<()> { loop { let b = bytes.peek(); match b { Some(b'\r') => { // there's `\r`, so it's safe to bump 1 pos unsafe { bytes.bump() }; expect!(bytes.next() == b'\n' => Err(Error::NewLine)); }, Some(b'\n') => { // there's `\n`, so it's safe to bump 1 pos unsafe { bytes.bump(); } }, Some(..) => { bytes.slice(); return Ok(Status::Complete(())); }, None => return Ok(Status::Partial) } } } /// A parsed Response. /// /// See `Request` docs for explanation of optional values. #[derive(Debug, Eq, PartialEq)] pub struct Response<'headers, 'buf: 'headers> { /// The response version, such as `HTTP/1.1`. pub version: Option<u8>, /// The response code, such as `200`. pub code: Option<u16>, /// The response reason-phrase, such as `OK`. /// /// Contains an empty string if the reason-phrase was missing or contained invalid characters. pub reason: Option<&'buf str>, /// The response headers. pub headers: &'headers mut [Header<'buf>] } impl<'h, 'b> Response<'h, 'b> { /// Creates a new `Response` using a slice of `Header`s you have allocated. #[inline] pub fn new(headers: &'h mut [Header<'b>]) -> Response<'h, 'b> { Response { version: None, code: None, reason: None, headers: headers, } } /// Try to parse a buffer of bytes into this `Response`. pub fn parse(&mut self, buf: &'b [u8]) -> Result<usize> { let orig_len = buf.len(); let mut bytes = Bytes::new(buf); complete!(skip_empty_lines(&mut bytes)); self.version = Some(complete!(parse_version(&mut bytes))); space!(bytes or Error::Version); self.code = Some(complete!(parse_code(&mut bytes))); // RFC7230 says there must be 'SP' and then reason-phrase, but admits // its only for legacy reasons. With the reason-phrase completely // optional (and preferred to be omitted) in HTTP2, we'll just // handle any response that doesn't include a reason-phrase, because // it's more lenient, and we don't care anyways. // // So, a SP means parse a reason-phrase. // A newline means go to headers. // Anything else we'll say is a malformed status. match next!(bytes) { b' ' => { bytes.slice(); self.reason = Some(complete!(parse_reason(&mut bytes))); }, b'\r' => { expect!(bytes.next() == b'\n' => Err(Error::Status)); bytes.slice(); self.reason = Some(""); }, b'\n' => self.reason = Some(""), _ => return Err(Error::Status), } let len = orig_len - bytes.len(); let headers_len = complete!(parse_headers_iter(&mut self.headers, &mut bytes)); Ok(Status::Complete(len + headers_len)) } } /// Represents a parsed header. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct Header<'a> { /// The name portion of a header. /// /// A header name must be valid ASCII-US, so it's safe to store as a `&str`. pub name: &'a str, /// The value portion of a header. /// /// While headers **should** be ASCII-US, the specification allows for /// values that may not be, and so the value is stored as bytes. pub value: &'a [u8], } /// An empty header, useful for constructing a `Header` array to pass in for /// parsing. /// /// # Example /// /// ``` /// let headers = [httparse::EMPTY_HEADER; 64]; /// ``` pub const EMPTY_HEADER: Header<'static> = Header { name: "", value: b"" }; #[inline] fn parse_version(bytes: &mut Bytes) -> Result<u8> { if let Some(mut eight) = bytes.next_8() { expect!(eight._0() => b'H' |? Err(Error::Version)); expect!(eight._1() => b'T' |? Err(Error::Version)); expect!(eight._2() => b'T' |? Err(Error::Version)); expect!(eight._3() => b'P' |? Err(Error::Version)); expect!(eight._4() => b'/' |? Err(Error::Version)); expect!(eight._5() => b'1' |? Err(Error::Version)); expect!(eight._6() => b'.' |? Err(Error::Version)); let v = match eight._7() { b'0' => 0, b'1' => 1, _ => return Err(Error::Version) }; return Ok(Status::Complete(v)) } // else (but not in `else` because of borrow checker) // If there aren't at least 8 bytes, we still want to detect early // if this is a valid version or not. If it is, we'll return Partial. expect!(bytes.next() == b'H' => Err(Error::Version)); expect!(bytes.next() == b'T' => Err(Error::Version)); expect!(bytes.next() == b'T' => Err(Error::Version)); expect!(bytes.next() == b'P' => Err(Error::Version)); expect!(bytes.next() == b'/' => Err(Error::Version)); expect!(bytes.next() == b'1' => Err(Error::Version)); expect!(bytes.next() == b'.' => Err(Error::Version)); Ok(Status::Partial) } /// From [RFC 7230](https://tools.ietf.org/html/rfc7230): /// /// > ```notrust /// > reason-phrase = *( HTAB / SP / VCHAR / obs-text ) /// > HTAB = %x09 ; horizontal tab /// > VCHAR = %x21-7E ; visible (printing) characters /// > obs-text = %x80-FF /// > ``` /// /// > A.2. Changes from RFC 2616 /// > /// > Non-US-ASCII content in header fields and the reason phrase /// > has been obsoleted and made opaque (the TEXT rule was removed). #[inline] fn parse_reason<'a>(bytes: &mut Bytes<'a>) -> Result<&'a str> { let mut seen_obs_text = false; loop { let b = next!(bytes); if b == b'\r' { expect!(bytes.next() == b'\n' => Err(Error::Status)); return Ok(Status::Complete(unsafe { let bytes = bytes.slice_skip(2); if !seen_obs_text { // all bytes up till `i` must have been HTAB / SP / VCHAR str::from_utf8_unchecked(bytes) } else { // obs-text characters were found, so return the fallback empty string "" } })); } else if b == b'\n' { return Ok(Status::Complete(unsafe { let bytes = bytes.slice_skip(1); if !seen_obs_text { // all bytes up till `i` must have been HTAB / SP / VCHAR str::from_utf8_unchecked(bytes) } else { // obs-text characters were found, so return the fallback empty string "" } })); } else if !(b == 0x09 || b == b' ' || (b >= 0x21 && b <= 0x7E) || b >= 0x80) { return Err(Error::Status); } else if b >= 0x80 { seen_obs_text = true; } } } #[inline] fn parse_token<'a>(bytes: &mut Bytes<'a>) -> Result<&'a str> { loop { let b = next!(bytes); if b == b' ' { return Ok(Status::Complete(unsafe { // all bytes up till `i` must have been `is_token`. str::from_utf8_unchecked(bytes.slice_skip(1)) })); } else if !is_token(b) { return Err(Error::Token); } } } #[inline] fn parse_uri<'a>(bytes: &mut Bytes<'a>) -> Result<&'a str> { simd::match_uri_vectored(bytes); loop { let b = next!(bytes); if b == b' ' { return Ok(Status::Complete(unsafe { // all bytes up till `i` must have been `is_token`. str::from_utf8_unchecked(bytes.slice_skip(1)) })); } else if !is_uri_token(b) { return Err(Error::Token); } } } #[inline] fn parse_code(bytes: &mut Bytes) -> Result<u16> { let hundreds = expect!(bytes.next() == b'0'...b'9' => Err(Error::Status)); let tens = expect!(bytes.next() == b'0'...b'9' => Err(Error::Status)); let ones = expect!(bytes.next() == b'0'...b'9' => Err(Error::Status)); Ok(Status::Complete((hundreds - b'0') as u16 * 100 + (tens - b'0') as u16 * 10 + (ones - b'0') as u16)) } /// Parse a buffer of bytes as headers. /// /// The return value, if complete and successful, includes the index of the /// buffer that parsing stopped at, and a sliced reference to the parsed /// headers. The length of the slice will be equal to the number of properly /// parsed headers. /// /// # Example /// /// ``` /// let buf = b"Host: foo.bar\nAccept: */*\n\nblah blah"; /// let mut headers = [httparse::EMPTY_HEADER; 4]; /// assert_eq!(httparse::parse_headers(buf, &mut headers), /// Ok(httparse::Status::Complete((27, &[ /// httparse::Header { name: "Host", value: b"foo.bar" }, /// httparse::Header { name: "Accept", value: b"*/*" } /// ][..])))); /// ``` pub fn parse_headers<'b: 'h, 'h>(src: &'b [u8], mut dst: &'h mut [Header<'b>]) -> Result<(usize, &'h [Header<'b>])> { let mut iter = Bytes::new(src); let pos = complete!(parse_headers_iter(&mut dst, &mut iter)); Ok(Status::Complete((pos, dst))) } #[inline] fn parse_headers_iter<'a, 'b>(headers: &mut &mut [Header<'a>], bytes: &'b mut Bytes<'a>) -> Result<usize> { let mut num_headers: usize = 0; let mut count: usize = 0; let mut result = Err(Error::TooManyHeaders); { let mut iter = headers.iter_mut(); 'headers: loop { // a newline here means the head is over! let b = next!(bytes); if b == b'\r' { expect!(bytes.next() == b'\n' => Err(Error::NewLine)); result = Ok(Status::Complete(count + bytes.pos())); break; } else if b == b'\n' { result = Ok(Status::Complete(count + bytes.pos())); break; } else if !is_header_name_token(b) { return Err(Error::HeaderName); } let header = match iter.next() { Some(header) => header, None => break 'headers }; num_headers += 1; // parse header name until colon 'name: loop { let b = next!(bytes); if b == b':' { count += bytes.pos(); header.name = unsafe { str::from_utf8_unchecked(bytes.slice_skip(1)) }; break 'name; } else if !is_header_name_token(b) { return Err(Error::HeaderName); } } let mut b; 'value: loop { // eat white space between colon and value 'whitespace: loop { b = next!(bytes); if b == b' ' || b == b'\t' { count += bytes.pos(); bytes.slice(); continue 'whitespace; } else { if !is_header_value_token(b) { break 'value; } break 'whitespace; } } // parse value till EOL simd::match_header_value_vectored(bytes); macro_rules! check { ($bytes:ident, $i:ident) => ({ b = $bytes.$i(); if !is_header_value_token(b) { break 'value; } }); ($bytes:ident) => ({ check!($bytes, _0); check!($bytes, _1); check!($bytes, _2); check!($bytes, _3); check!($bytes, _4); check!($bytes, _5); check!($bytes, _6); check!($bytes, _7); }) } while let Some(mut bytes8) = bytes.next_8() { check!(bytes8); } loop { b = next!(bytes); if !is_header_value_token(b) { break 'value; } } } //found_ctl let value_slice : &[u8] = if b == b'\r' { expect!(bytes.next() == b'\n' => Err(Error::HeaderValue)); count += bytes.pos(); // having just check that `\r\n` exists, it's safe to skip those 2 bytes unsafe { bytes.slice_skip(2) } } else if b == b'\n' { count += bytes.pos(); // having just check that `\r\n` exists, it's safe to skip 1 byte unsafe { bytes.slice_skip(1) } } else { return Err(Error::HeaderValue); }; // trim trailing whitespace in the header if let Some(last_visible) = value_slice.iter().rposition(|b| *b != b' ' && *b != b'\t' ) { // There is at least one non-whitespace character. header.value = &value_slice[0..last_visible+1]; } else { // There is no non-whitespace character. This can only happen when value_slice is // empty. header.value = value_slice; } } } // drop iter shrink(headers, num_headers); result } /// Parse a buffer of bytes as a chunk size. /// /// The return value, if complete and successful, includes the index of the /// buffer that parsing stopped at, and the size of the following chunk. /// /// # Example /// /// ``` /// let buf = b"4\r\nRust\r\n0\r\n\r\n"; /// assert_eq!(httparse::parse_chunk_size(buf), /// Ok(httparse::Status::Complete((3, 4)))); /// ``` pub fn parse_chunk_size(buf: &[u8]) -> result::Result<Status<(usize, u64)>, InvalidChunkSize> { const RADIX: u64 = 16; let mut bytes = Bytes::new(buf); let mut size = 0; let mut in_chunk_size = true; let mut in_ext = false; let mut count = 0; loop { let b = next!(bytes); match b { b'0' ... b'9' if in_chunk_size => { if count > 15 { return Err(InvalidChunkSize); } count += 1; size *= RADIX; size += (b - b'0') as u64; }, b'a' ... b'f' if in_chunk_size => { if count > 15 { return Err(InvalidChunkSize); } count += 1; size *= RADIX; size += (b + 10 - b'a') as u64; } b'A' ... b'F' if in_chunk_size => { if count > 15 { return Err(InvalidChunkSize); } count += 1; size *= RADIX; size += (b + 10 - b'A') as u64; } b'\r' => { match next!(bytes) { b'\n' => break, _ => return Err(InvalidChunkSize), } } // If we weren't in the extension yet, the ";" signals its start b';' if !in_ext => { in_ext = true; in_chunk_size = false; } // "Linear white space" is ignored between the chunk size and the // extension separator token (";") due to the "implied *LWS rule". b'\t' | b' ' if !in_ext && !in_chunk_size => {} // LWS can follow the chunk size, but no more digits can come b'\t' | b' ' if in_chunk_size => in_chunk_size = false, // We allow any arbitrary octet once we are in the extension, since // they all get ignored anyway. According to the HTTP spec, valid // extensions would have a more strict syntax: // (token ["=" (token | quoted-string)]) // but we gain nothing by rejecting an otherwise valid chunk size. _ if in_ext => {} // Finally, if we aren't in the extension and we're reading any // other octet, the chunk size line is invalid! _ => return Err(InvalidChunkSize), } } Ok(Status::Complete((bytes.pos(), size))) } #[cfg(test)] mod tests { use super::{Request, Response, Status, EMPTY_HEADER, shrink, parse_chunk_size}; const NUM_OF_HEADERS: usize = 4; #[test] fn test_shrink() { let mut arr = [EMPTY_HEADER; 16]; { let slice = &mut &mut arr[..]; assert_eq!(slice.len(), 16); shrink(slice, 4); assert_eq!(slice.len(), 4); } assert_eq!(arr.len(), 16); } macro_rules! req { ($name:ident, $buf:expr, |$arg:ident| $body:expr) => ( req! {$name, $buf, Ok(Status::Complete($buf.len())), |$arg| $body } ); ($name:ident, $buf:expr, $len:expr, |$arg:ident| $body:expr) => ( #[test] fn $name() { let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; let mut req = Request::new(&mut headers[..]); let status = req.parse($buf.as_ref()); assert_eq!(status, $len); closure(req); fn closure($arg: Request) { $body } } ) } req! { test_request_simple, b"GET / HTTP/1.1\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 0); } } req! { test_request_simple_with_query_params, b"GET /thing?data=a HTTP/1.1\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/thing?data=a"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 0); } } req! { test_request_simple_with_whatwg_query_params, b"GET /thing?data=a^ HTTP/1.1\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/thing?data=a^"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 0); } } req! { test_request_headers, b"GET / HTTP/1.1\r\nHost: foo.com\r\nCookie: \r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 2); assert_eq!(req.headers[0].name, "Host"); assert_eq!(req.headers[0].value, b"foo.com"); assert_eq!(req.headers[1].name, "Cookie"); assert_eq!(req.headers[1].value, b""); } } req! { test_request_headers_optional_whitespace, b"GET / HTTP/1.1\r\nHost: \tfoo.com\t \r\nCookie: \t \r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 2); assert_eq!(req.headers[0].name, "Host"); assert_eq!(req.headers[0].value, b"foo.com"); assert_eq!(req.headers[1].name, "Cookie"); assert_eq!(req.headers[1].value, b""); } } req! { // test the scalar parsing test_request_header_value_htab_short, b"GET / HTTP/1.1\r\nUser-Agent: some\tagent\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 1); assert_eq!(req.headers[0].name, "User-Agent"); assert_eq!(req.headers[0].value, b"some\tagent"); } } req! { // test the sse42 parsing test_request_header_value_htab_med, b"GET / HTTP/1.1\r\nUser-Agent: 1234567890some\tagent\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 1); assert_eq!(req.headers[0].name, "User-Agent"); assert_eq!(req.headers[0].value, b"1234567890some\tagent"); } } req! { // test the avx2 parsing test_request_header_value_htab_long, b"GET / HTTP/1.1\r\nUser-Agent: 1234567890some\t1234567890agent1234567890\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 1); assert_eq!(req.headers[0].name, "User-Agent"); assert_eq!(req.headers[0].value, &b"1234567890some\t1234567890agent1234567890"[..]); } } req! { test_request_headers_max, b"GET / HTTP/1.1\r\nA: A\r\nB: B\r\nC: C\r\nD: D\r\n\r\n", |req| { assert_eq!(req.headers.len(), NUM_OF_HEADERS); } } req! { test_request_multibyte, b"GET / HTTP/1.1\r\nHost: foo.com\r\nUser-Agent: \xe3\x81\xb2\xe3/1.0\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers[0].name, "Host"); assert_eq!(req.headers[0].value, b"foo.com"); assert_eq!(req.headers[1].name, "User-Agent"); assert_eq!(req.headers[1].value, b"\xe3\x81\xb2\xe3/1.0"); } } req! { test_request_partial, b"GET / HTTP/1.1\r\n\r", Ok(Status::Partial), |_req| {} } req! { test_request_partial_version, b"GET / HTTP/1.", Ok(Status::Partial), |_req| {} } req! { test_request_newlines, b"GET / HTTP/1.1\nHost: foo.bar\n\n", |_r| {} } req! { test_request_empty_lines_prefix, b"\r\n\r\nGET / HTTP/1.1\r\n\r\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 0); } } req! { test_request_empty_lines_prefix_lf_only, b"\n\nGET / HTTP/1.1\n\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 0); } } req! { test_request_path_backslash, b"\n\nGET /\\?wayne\\=5 HTTP/1.1\n\n", |req| { assert_eq!(req.method.unwrap(), "GET"); assert_eq!(req.path.unwrap(), "/\\?wayne\\=5"); assert_eq!(req.version.unwrap(), 1); assert_eq!(req.headers.len(), 0); } } req! { test_request_with_invalid_token_delimiter, b"GET\n/ HTTP/1.1\r\nHost: foo.bar\r\n\r\n", Err(::Error::Token), |_r| {} } req! { test_request_with_invalid_but_short_version, b"GET / HTTP/1!", Err(::Error::Version), |_r| {} } macro_rules! res { ($name:ident, $buf:expr, |$arg:ident| $body:expr) => ( res! {$name, $buf, Ok(Status::Complete($buf.len())), |$arg| $body } ); ($name:ident, $buf:expr, $len:expr, |$arg:ident| $body:expr) => ( #[test] fn $name() { let mut headers = [EMPTY_HEADER; NUM_OF_HEADERS]; let mut res = Response::new(&mut headers[..]); let status = res.parse($buf.as_ref()); assert_eq!(status, $len); closure(res); fn closure($arg: Response) { $body } } ) } res! { test_response_simple, b"HTTP/1.1 200 OK\r\n\r\n", |res| { assert_eq!(res.version.unwrap(), 1); assert_eq!(res.code.unwrap(), 200); assert_eq!(res.reason.unwrap(), "OK"); } } res! { test_response_newlines, b"HTTP/1.0 403 Forbidden\nServer: foo.bar\n\n", |_r| {} } res! { test_response_reason_missing, b"HTTP/1.1 200 \r\n\r\n", |res| { assert_eq!(res.version.unwrap(), 1); assert_eq!(res.code.unwrap(), 200); assert_eq!(res.reason.unwrap(), ""); } } res! { test_response_reason_missing_no_space, b"HTTP/1.1 200\r\n\r\n", |res| { assert_eq!(res.version.unwrap(), 1); assert_eq!(res.code.unwrap(), 200); assert_eq!(res.reason.unwrap(), ""); } } res! { test_response_reason_missing_no_space_with_headers, b"HTTP/1.1 200\r\nFoo: bar\r\n\r\n", |res| { assert_eq!(res.version.unwrap(), 1); assert_eq!(res.code.unwrap(), 200); assert_eq!(res.reason.unwrap(), ""); assert_eq!(res.headers.len(), 1); assert_eq!(res.headers[0].name, "Foo"); assert_eq!(res.headers[0].value, b"bar"); } } res! { test_response_reason_with_space_and_tab, b"HTTP/1.1 101 Switching Protocols\t\r\n\r\n", |res| { assert_eq!(res.version.unwrap(), 1); assert_eq!(res.code.unwrap(), 101); assert_eq!(res.reason.unwrap(), "Switching Protocols\t"); } } static RESPONSE_REASON_WITH_OBS_TEXT_BYTE: &'static [u8] = b"HTTP/1.1 200 X\xFFZ\r\n\r\n"; res! { test_response_reason_with_obsolete_text_byte, RESPONSE_REASON_WITH_OBS_TEXT_BYTE, |res| { assert_eq!(res.version.unwrap(), 1); assert_eq!(res.code.unwrap(), 200); // Empty string fallback in case of obs-text assert_eq!(res.reason.unwrap(), ""); } } res! { test_response_reason_with_nul_byte, b"HTTP/1.1 200 \x00\r\n\r\n", Err(::Error::Status), |_res| {} } res! { test_response_version_missing_space, b"HTTP/1.1", Ok(Status::Partial), |_res| {} } res! { test_response_code_missing_space, b"HTTP/1.1 200", Ok(Status::Partial), |_res| {} } res! { test_response_empty_lines_prefix_lf_only, b"\n\nHTTP/1.1 200 OK\n\n", |_res| {} } #[test] fn test_chunk_size() { assert_eq!(parse_chunk_size(b"0\r\n"), Ok(Status::Complete((3, 0)))); assert_eq!(parse_chunk_size(b"12\r\nchunk"), Ok(Status::Complete((4, 18)))); assert_eq!(parse_chunk_size(b"3086d\r\n"), Ok(Status::Complete((7, 198765)))); assert_eq!(parse_chunk_size(b"3735AB1;foo bar*\r\n"), Ok(Status::Complete((18, 57891505)))); assert_eq!(parse_chunk_size(b"3735ab1 ; baz \r\n"), Ok(Status::Complete((16, 57891505)))); assert_eq!(parse_chunk_size(b"77a65\r"), Ok(Status::Partial)); assert_eq!(parse_chunk_size(b"ab"), Ok(Status::Partial)); assert_eq!(parse_chunk_size(b"567f8a\rfoo"), Err(::InvalidChunkSize)); assert_eq!(parse_chunk_size(b"567f8a\rfoo"), Err(::InvalidChunkSize)); assert_eq!(parse_chunk_size(b"567xf8a\r\n"), Err(::InvalidChunkSize)); assert_eq!(parse_chunk_size(b"ffffffffffffffff\r\n"), Ok(Status::Complete((18, ::core::u64::MAX)))); assert_eq!(parse_chunk_size(b"1ffffffffffffffff\r\n"), Err(::InvalidChunkSize)); assert_eq!(parse_chunk_size(b"Affffffffffffffff\r\n"), Err(::InvalidChunkSize)); assert_eq!(parse_chunk_size(b"fffffffffffffffff\r\n"), Err(::InvalidChunkSize)); } #[cfg(feature = "std")] #[test] fn test_std_error() { use super::Error; use std::error::Error as StdError; let err = Error::HeaderName; assert_eq!(err.to_string(), err.description()); } }
33.480786
108
0.499413
b9d297519bee69beb791c6bd69ddadf8097f172d
1,202
use css_color_parser::Color; const BLACK_COLOR: Color = Color { r: 0, g: 0, b: 0, a: 1.0 }; const WHITE_COLOR: Color = Color { r: 255, g: 255, b: 255, a: 1.0 }; pub fn generate_qr(bytes: &[u8], width: u32, height: u32, color: &str, background_color: &str) -> Vec<u8> { let qr = qrcode::QrCode::new(bytes); let color = color.parse::<css_color_parser::Color>().unwrap_or( BLACK_COLOR ); let background_color = background_color.parse::<css_color_parser::Color>().unwrap_or( WHITE_COLOR ); let dark_color = image::Rgba::from([ color.r, color.g, color.b, (color.a * 255.) as u8 ]); let light_color = image::Rgba::from([ background_color.r, background_color.g, background_color.b, (background_color.a * 255.) as u8 ]); let result = qr.unwrap() .render::<image::Rgba<u8>>() .dark_color( dark_color ) .light_color( light_color ) .build(); let image = image::DynamicImage::ImageRgba8(result).resize( width, height, image::imageops::Nearest ); image.to_bytes() }
26.711111
101
0.55574
3376c6e17caf769cabfa39fd93e5c988fb002331
1,365
use super::atomic::{Atomic, IntCast}; use crate::types::ValueType; use std::{cell::Cell, marker::PhantomData, ops::Deref, slice}; pub trait Atomicity {} pub struct Atomically; impl Atomicity for Atomically {} pub struct NonAtomically; impl Atomicity for NonAtomically {} pub struct MemoryView<'a, T: 'a, A = NonAtomically> { ptr: *mut T, length: usize, _phantom: PhantomData<(&'a [Cell<T>], A)>, } impl<'a, T> MemoryView<'a, T, NonAtomically> where T: ValueType, { pub(super) unsafe fn new(ptr: *mut T, length: u32) -> Self { Self { ptr, length: length as usize, _phantom: PhantomData, } } } impl<'a, T: IntCast> MemoryView<'a, T, NonAtomically> { pub fn atomically(&self) -> MemoryView<'a, T, Atomically> { MemoryView { ptr: self.ptr, length: self.length, _phantom: PhantomData, } } } impl<'a, T> Deref for MemoryView<'a, T, NonAtomically> { type Target = [Cell<T>]; fn deref(&self) -> &[Cell<T>] { unsafe { slice::from_raw_parts(self.ptr as *const Cell<T>, self.length) } } } impl<'a, T: IntCast> Deref for MemoryView<'a, T, Atomically> { type Target = [Atomic<T>]; fn deref(&self) -> &[Atomic<T>] { unsafe { slice::from_raw_parts(self.ptr as *const Atomic<T>, self.length) } } }
25.277778
83
0.591941
61afae269d696f1f992973c4a8903e40064b80a3
569
//! Query validation related methods and data structures mod context; mod input_value; mod multi_visitor; mod rules; mod traits; mod visitor; #[cfg(test)] pub(crate) mod test_harness; pub(crate) use self::rules::visit_all_rules; pub use self::{ context::{RuleError, ValidatorContext}, input_value::validate_input_values, multi_visitor::MultiVisitorNil, traits::Visitor, visitor::visit, }; #[cfg(test)] pub use self::test_harness::{ expect_fails_rule, expect_fails_rule_with_schema, expect_passes_rule, expect_passes_rule_with_schema, };
21.074074
73
0.752197
de0658dc5c6f02d68768a8e2e368221f529d68d6
8,679
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::validators::{new_validator_context_by_name, Validator}, anyhow::Error, fidl::endpoints::DiscoverableService, fuchsia_syslog as syslog, serde::Deserialize, serde_json::{self, value::Value}, std::{collections::HashMap, default::Default, io}, }; const FACTORY_DEVICE_CONFIG: &'static str = "/config/data/factory.config"; /// Type that maps a file to a group of arguments passed to a validator. pub type ValidatorFileArgsMap = HashMap<String, Value>; #[derive(Debug, Deserialize)] pub struct FactoryFileSpec { pub dest: Option<String>, pub path: String, #[serde(default)] validators: Vec<ValidatorSpec>, } #[derive(Debug, Deserialize)] pub struct ValidatorSpec { pub name: String, #[serde(default)] pub args: Value, } #[derive(Debug)] pub struct ValidatorContext { pub name: String, pub paths_to_validate: Vec<String>, pub validator: Box<dyn Validator>, } #[derive(Debug)] pub struct ConfigContext { pub file_path_map: HashMap<String, String>, pub validator_contexts: Vec<ValidatorContext>, } #[derive(Debug, Default, Deserialize)] pub struct Config { files: Vec<FactoryFileSpec>, } impl Config { fn load_file(path: &str) -> Result<Self, Error> { Ok(serde_json::from_reader(io::BufReader::new(std::fs::File::open(path)?))?) } pub fn load<T>() -> Result<Self, Error> where T: DiscoverableService, { let config_data_file = format!("/config/data/{}.config", &T::SERVICE_NAME); syslog::fx_log_info!("Loading {}", &config_data_file); Config::load_file(&config_data_file) } pub fn into_context(self) -> Result<ConfigContext, Error> { let mut file_path_map = HashMap::new(); let mut validator_config_map: HashMap<String, ValidatorFileArgsMap> = HashMap::new(); // De-dupe validator configurations over the collection of file specs. for file in self.files.into_iter() { if file.validators.is_empty() { syslog::fx_log_warn!( "Entry {:?} must have at least one validator to be processed, skipping", &file.path ); continue; } for validator in file.validators.into_iter() { match validator_config_map.get_mut(&validator.name) { Some(args_map) => { args_map.insert(file.path.clone(), validator.args); } None => { let mut args_map = ValidatorFileArgsMap::new(); args_map.insert(file.path.clone(), validator.args); validator_config_map.insert(validator.name, args_map); } }; } let dest = file.dest.unwrap_or(file.path.clone()); match file_path_map.get(&file.path) { Some(old_dest) => { syslog::fx_log_warn!( "Entry {:?} already mapped to destination {:?}, ignoring mapping to {:?}", &file.path, old_dest, dest ); } None => { file_path_map.insert(file.path.clone(), dest); } }; } // Now that validation configurations have been de-duped, the validators can actually be // constructed. let mut validator_contexts = Vec::new(); for (name, args_map) in validator_config_map.into_iter() { validator_contexts.push(new_validator_context_by_name(&name, args_map)?); } Ok(ConfigContext { file_path_map, validator_contexts }) } } #[derive(Debug, Deserialize)] #[serde(rename_all = "snake_case")] pub enum FactoryConfig { FactoryItems, Ext4(String), } impl FactoryConfig { pub fn load() -> Result<Self, Error> { Ok(serde_json::from_reader(io::BufReader::new(std::fs::File::open( FACTORY_DEVICE_CONFIG, )?))?) } } impl Default for FactoryConfig { fn default() -> Self { FactoryConfig::FactoryItems } } #[cfg(test)] mod tests { use {super::*, serde_json::json}; #[test] fn test_simple_configs() { { let config_json = json!({ "files": [ { "path": "file1", "validators": [ { "name": "pass" } ] } ] }); let config: Config = serde_json::from_value(config_json).unwrap(); assert_eq!("file1", config.files[0].path); assert_eq!(None, config.files[0].dest); let context = config.into_context().unwrap(); assert_eq!("file1", context.file_path_map.get("file1").unwrap()); assert_eq!("pass", context.validator_contexts[0].name); assert_eq!(vec!["file1"], context.validator_contexts[0].paths_to_validate); } { let config_json = json!({ "files": [ { "path": "file2", "dest": "destfile2", "validators": [ { "name": "pass" } ] } ] }); let config: Config = serde_json::from_value(config_json).unwrap(); assert_eq!("file2", config.files[0].path); assert_eq!(Some("destfile2".to_string()), config.files[0].dest); let context = config.into_context().unwrap(); assert_eq!("destfile2", context.file_path_map.get("file2").unwrap()); assert_eq!("pass", context.validator_contexts[0].name); assert_eq!(vec!["file2"], context.validator_contexts[0].paths_to_validate); } } #[test] fn test_into_validation_contexts_dedupes_validators() { let config_json = json!({ "files": [ { "path": "file1", "validators": [ { "name": "pass" } ] }, { "path": "file2", "validators": [ { "name": "pass", } ] }, { "path": "file3", "validators": [ { "name": "text" } ] } ] }); let config: Config = serde_json::from_value(config_json).unwrap(); let context = config.into_context().unwrap(); assert_eq!(2, context.validator_contexts.len()); for mut validator_context in context.validator_contexts { match validator_context.name.as_ref() { "pass" => { validator_context.paths_to_validate.sort_unstable(); assert_eq!(vec!["file1", "file2"], validator_context.paths_to_validate) } "text" => assert_eq!(vec!["file3"], validator_context.paths_to_validate), _ => panic!("Unexpected validator: {}", validator_context.name), } } } #[test] fn test_config_context_skips_unvalidated_files() { let config_json = json!({ "files": [ { "path": "file1", "validators": [ { "name": "pass" } ] }, { "path": "file2", } ], }); let config: Config = serde_json::from_value(config_json).unwrap(); let context = config.into_context().unwrap(); assert_eq!(1, context.file_path_map.len()); assert_eq!(1, context.validator_contexts.len()); assert_eq!("pass", context.validator_contexts[0].name); assert_eq!(vec!["file1"], context.validator_contexts[0].paths_to_validate); } }
32.62782
98
0.494642
697823712bf05e4ef411709915b1100fea7672cc
8,846
#![feature(test)] // compiletest_rs requires this attribute use compiletest_rs as compiletest; use compiletest_rs::common::Mode as TestMode; use std::env::{self, set_var, var}; use std::ffi::OsStr; use std::fs; use std::io; use std::path::{Path, PathBuf}; mod cargo; fn host_lib() -> PathBuf { option_env!("HOST_LIBS").map_or(cargo::CARGO_TARGET_DIR.join(env!("PROFILE")), PathBuf::from) } fn clippy_driver_path() -> PathBuf { option_env!("CLIPPY_DRIVER_PATH").map_or(cargo::TARGET_LIB.join("clippy-driver"), PathBuf::from) } // When we'll want to use `extern crate ..` for a dependency that is used // both by the crate and the compiler itself, we can't simply pass -L flags // as we'll get a duplicate matching versions. Instead, disambiguate with // `--extern dep=path`. // See https://github.com/rust-lang/rust-clippy/issues/4015. // // FIXME: We cannot use `cargo build --message-format=json` to resolve to dependency files. // Because it would force-rebuild if the options passed to `build` command is not the same // as what we manually pass to `cargo` invocation fn third_party_crates() -> String { use std::collections::HashMap; static CRATES: &[&str] = &["serde", "serde_derive", "regex", "clippy_lints", "syn", "quote"]; let dep_dir = cargo::TARGET_LIB.join("deps"); let mut crates: HashMap<&str, PathBuf> = HashMap::with_capacity(CRATES.len()); for entry in fs::read_dir(dep_dir).unwrap() { let path = match entry { Ok(entry) => entry.path(), Err(_) => continue, }; if let Some(name) = path.file_name().and_then(OsStr::to_str) { for dep in CRATES { if name.starts_with(&format!("lib{}-", dep)) && name.ends_with(".rlib") { if let Some(old) = crates.insert(dep, path.clone()) { panic!("Found multiple rlibs for crate `{}`: `{:?}` and `{:?}", dep, old, path); } break; } } } } let v: Vec<_> = crates .into_iter() .map(|(dep, path)| format!("--extern {}={}", dep, path.display())) .collect(); v.join(" ") } fn default_config() -> compiletest::Config { let mut config = compiletest::Config::default(); if let Ok(name) = env::var("TESTNAME") { config.filter = Some(name); } if let Some(path) = option_env!("RUSTC_LIB_PATH") { let path = PathBuf::from(path); config.run_lib_path = path.clone(); config.compile_lib_path = path; } config.target_rustcflags = Some(format!( "-L {0} -L {1} -Dwarnings -Zui-testing {2}", host_lib().join("deps").display(), cargo::TARGET_LIB.join("deps").display(), third_party_crates(), )); config.build_base = if cargo::is_rustc_test_suite() { // This make the stderr files go to clippy OUT_DIR on rustc repo build dir let mut path = PathBuf::from(env!("OUT_DIR")); path.push("test_build_base"); path } else { host_lib().join("test_build_base") }; config.rustc_path = clippy_driver_path(); config } fn run_mode(cfg: &mut compiletest::Config) { cfg.mode = TestMode::Ui; cfg.src_base = Path::new("tests").join("ui"); compiletest::run_tests(&cfg); } fn run_ui_toml(config: &mut compiletest::Config) { fn run_tests(config: &compiletest::Config, mut tests: Vec<tester::TestDescAndFn>) -> Result<bool, io::Error> { let mut result = true; let opts = compiletest::test_opts(config); for dir in fs::read_dir(&config.src_base)? { let dir = dir?; if !dir.file_type()?.is_dir() { continue; } let dir_path = dir.path(); set_var("CARGO_MANIFEST_DIR", &dir_path); for file in fs::read_dir(&dir_path)? { let file = file?; let file_path = file.path(); if file.file_type()?.is_dir() { continue; } if file_path.extension() != Some(OsStr::new("rs")) { continue; } let paths = compiletest::common::TestPaths { file: file_path, base: config.src_base.clone(), relative_dir: dir_path.file_name().unwrap().into(), }; let test_name = compiletest::make_test_name(&config, &paths); let index = tests .iter() .position(|test| test.desc.name == test_name) .expect("The test should be in there"); result &= tester::run_tests_console(&opts, vec![tests.swap_remove(index)])?; } } Ok(result) } config.mode = TestMode::Ui; config.src_base = Path::new("tests").join("ui-toml").canonicalize().unwrap(); let tests = compiletest::make_tests(&config); let manifest_dir = var("CARGO_MANIFEST_DIR").unwrap_or_default(); let res = run_tests(&config, tests); set_var("CARGO_MANIFEST_DIR", &manifest_dir); match res { Ok(true) => {}, Ok(false) => panic!("Some tests failed"), Err(e) => { panic!("I/O failure during tests: {:?}", e); }, } } fn run_ui_cargo(config: &mut compiletest::Config) { fn run_tests( config: &compiletest::Config, filter: &Option<String>, mut tests: Vec<tester::TestDescAndFn>, ) -> Result<bool, io::Error> { let mut result = true; let opts = compiletest::test_opts(config); for dir in fs::read_dir(&config.src_base)? { let dir = dir?; if !dir.file_type()?.is_dir() { continue; } // Use the filter if provided let dir_path = dir.path(); match &filter { Some(name) if !dir_path.ends_with(name) => continue, _ => {}, } for case in fs::read_dir(&dir_path)? { let case = case?; if !case.file_type()?.is_dir() { continue; } let src_path = case.path().join("src"); // When switching between branches, if the previous branch had a test // that the current branch does not have, the directory is not removed // because an ignored Cargo.lock file exists. if !src_path.exists() { continue; } env::set_current_dir(&src_path)?; for file in fs::read_dir(&src_path)? { let file = file?; if file.file_type()?.is_dir() { continue; } // Search for the main file to avoid running a test for each file in the project let file_path = file.path(); match file_path.file_name().and_then(OsStr::to_str) { Some("main.rs") => {}, _ => continue, } let paths = compiletest::common::TestPaths { file: file_path, base: config.src_base.clone(), relative_dir: src_path.strip_prefix(&config.src_base).unwrap().into(), }; let test_name = compiletest::make_test_name(&config, &paths); let index = tests .iter() .position(|test| test.desc.name == test_name) .expect("The test should be in there"); result &= tester::run_tests_console(&opts, vec![tests.swap_remove(index)])?; } } } Ok(result) } if cargo::is_rustc_test_suite() { return; } config.mode = TestMode::Ui; config.src_base = Path::new("tests").join("ui-cargo").canonicalize().unwrap(); let tests = compiletest::make_tests(&config); let current_dir = env::current_dir().unwrap(); let filter = env::var("TESTNAME").ok(); let res = run_tests(&config, &filter, tests); env::set_current_dir(current_dir).unwrap(); match res { Ok(true) => {}, Ok(false) => panic!("Some tests failed"), Err(e) => { panic!("I/O failure during tests: {:?}", e); }, } } fn prepare_env() { set_var("CLIPPY_DISABLE_DOCS_LINKS", "true"); set_var("CLIPPY_TESTS", "true"); //set_var("RUST_BACKTRACE", "0"); } #[test] fn compile_test() { prepare_env(); let mut config = default_config(); run_mode(&mut config); run_ui_toml(&mut config); run_ui_cargo(&mut config); }
34.554688
114
0.534818
edd0446d6f7ca2e42c81f5a6cff613f20f456d5d
5,907
#![recursion_limit = "512"] //! Actori-web codegen module //! //! Generators for routes and scopes //! //! ## Route //! //! Macros: //! //! - [get](attr.get.html) //! - [post](attr.post.html) //! - [put](attr.put.html) //! - [delete](attr.delete.html) //! - [head](attr.head.html) //! - [connect](attr.connect.html) //! - [options](attr.options.html) //! - [trace](attr.trace.html) //! - [patch](attr.patch.html) //! //! ### Attributes: //! //! - `"path"` - Raw literal string with path for which to register handle. Mandatory. //! - `guard="function_name"` - Registers function as guard using `actori_web::guard::fn_guard` //! //! ## Notes //! //! Function name can be specified as any expression that is going to be accessible to the generate //! code (e.g `my_guard` or `my_module::my_guard`) //! //! ## Example: //! //! ```rust //! use actori_web::HttpResponse; //! use actori_web_codegen::get; //! use futures::{future, Future}; //! //! #[get("/test")] //! async fn async_test() -> Result<HttpResponse, actori_web::Error> { //! Ok(HttpResponse::Ok().finish()) //! } //! ``` extern crate proc_macro; mod route; use proc_macro::TokenStream; use syn::parse_macro_input; /// Creates route handler with `GET` method guard. /// /// Syntax: `#[get("path"[, attributes])]` /// /// ## Attributes: /// /// - `"path"` - Raw literal string with path for which to register handler. Mandatory. /// - `guard="function_name"` - Registers function as guard using `actori_web::guard::fn_guard` #[proc_macro_attribute] pub fn get(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Get) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `POST` method guard. /// /// Syntax: `#[post("path"[, attributes])]` /// /// Attributes are the same as in [get](attr.get.html) #[proc_macro_attribute] pub fn post(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Post) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `PUT` method guard. /// /// Syntax: `#[put("path"[, attributes])]` /// /// Attributes are the same as in [get](attr.get.html) #[proc_macro_attribute] pub fn put(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Put) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `DELETE` method guard. /// /// Syntax: `#[delete("path"[, attributes])]` /// /// Attributes are the same as in [get](attr.get.html) #[proc_macro_attribute] pub fn delete(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Delete) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `HEAD` method guard. /// /// Syntax: `#[head("path"[, attributes])]` /// /// Attributes are the same as in [head](attr.head.html) #[proc_macro_attribute] pub fn head(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Head) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `CONNECT` method guard. /// /// Syntax: `#[connect("path"[, attributes])]` /// /// Attributes are the same as in [connect](attr.connect.html) #[proc_macro_attribute] pub fn connect(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Connect) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `OPTIONS` method guard. /// /// Syntax: `#[options("path"[, attributes])]` /// /// Attributes are the same as in [options](attr.options.html) #[proc_macro_attribute] pub fn options(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Options) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `TRACE` method guard. /// /// Syntax: `#[trace("path"[, attributes])]` /// /// Attributes are the same as in [trace](attr.trace.html) #[proc_macro_attribute] pub fn trace(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Trace) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() } /// Creates route handler with `PATCH` method guard. /// /// Syntax: `#[patch("path"[, attributes])]` /// /// Attributes are the same as in [patch](attr.patch.html) #[proc_macro_attribute] pub fn patch(args: TokenStream, input: TokenStream) -> TokenStream { let args = parse_macro_input!(args as syn::AttributeArgs); let gen = match route::Route::new(args, input, route::GuardType::Patch) { Ok(gen) => gen, Err(err) => return err.to_compile_error().into(), }; gen.generate() }
31.588235
99
0.642289
750765fb49b19dc9b5037904b9c422b2dc13677f
20,101
use crate::sync::batch_semaphore::Semaphore; use std::cell::UnsafeCell; use std::fmt; use std::marker; use std::mem; use std::ops; #[cfg(not(loom))] const MAX_READS: usize = 32; #[cfg(loom)] const MAX_READS: usize = 10; /// An asynchronous reader-writer lock. /// /// This type of lock allows a number of readers or at most one writer at any /// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// /// In comparison, a [`Mutex`] does not distinguish between readers or writers /// that acquire the lock, therefore causing any tasks waiting for the lock to /// become available to yield. An `RwLock` will allow any number of readers to /// acquire the lock as long as a writer is not holding the lock. /// /// The priority policy of Tokio's read-write lock is _fair_ (or /// [_write-preferring_]), in order to ensure that readers cannot starve /// writers. Fairness is ensured using a first-in, first-out queue for the tasks /// awaiting the lock; if a task that wishes to acquire the write lock is at the /// head of the queue, read locks will not be given out until the write lock has /// been released. This is in contrast to the Rust standard library's /// `std::sync::RwLock`, where the priority policy is dependent on the /// operating system's implementation. /// /// The type parameter `T` represents the data that this lock protects. It is /// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards /// returned from the locking methods implement [`Deref`](trait@std::ops::Deref) /// (and [`DerefMut`](trait@std::ops::DerefMut) /// for the `write` methods) to allow access to the content of the lock. /// /// # Examples /// /// ``` /// use tokio::sync::RwLock; /// /// #[tokio::main] /// async fn main() { /// let lock = RwLock::new(5); /// /// // many reader locks can be held at once /// { /// let r1 = lock.read().await; /// let r2 = lock.read().await; /// assert_eq!(*r1, 5); /// assert_eq!(*r2, 5); /// } // read locks are dropped at this point /// /// // only one write lock may be held, however /// { /// let mut w = lock.write().await; /// *w += 1; /// assert_eq!(*w, 6); /// } // write lock is dropped here /// } /// ``` /// /// [`Mutex`]: struct@super::Mutex /// [`RwLock`]: struct@RwLock /// [`RwLockReadGuard`]: struct@RwLockReadGuard /// [`RwLockWriteGuard`]: struct@RwLockWriteGuard /// [`Send`]: trait@std::marker::Send /// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies #[derive(Debug)] pub struct RwLock<T: ?Sized> { //semaphore to coordinate read and write access to T s: Semaphore, //inner data T c: UnsafeCell<T>, } /// RAII structure used to release the shared read access of a lock when /// dropped. /// /// This structure is created by the [`read`] method on /// [`RwLock`]. /// /// [`read`]: method@RwLock::read /// [`RwLock`]: struct@RwLock pub struct RwLockReadGuard<'a, T: ?Sized> { s: &'a Semaphore, data: *const T, marker: marker::PhantomData<&'a T>, } impl<'a, T> RwLockReadGuard<'a, T> { /// Make a new `RwLockReadGuard` for a component of the locked data. /// /// This operation cannot fail as the `RwLockReadGuard` passed in already /// locked the data. /// /// This is an associated function that needs to be /// used as `RwLockReadGuard::map(...)`. A method would interfere with /// methods of the same name on the contents of the locked data. /// /// This is an asynchronous version of [`RwLockReadGuard::map`] from the /// [`parking_lot` crate]. /// /// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map /// [`parking_lot` crate]: https://crates.io/crates/parking_lot /// /// # Examples /// /// ``` /// use tokio::sync::{RwLock, RwLockReadGuard}; /// /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] /// struct Foo(u32); /// /// # #[tokio::main] /// # async fn main() { /// let lock = RwLock::new(Foo(1)); /// /// let guard = lock.read().await; /// let guard = RwLockReadGuard::map(guard, |f| &f.0); /// /// assert_eq!(1, *guard); /// # } /// ``` #[inline] pub fn map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U> where F: FnOnce(&T) -> &U, { let data = f(&*this) as *const U; let s = this.s; // NB: Forget to avoid drop impl from being called. mem::forget(this); RwLockReadGuard { s, data, marker: marker::PhantomData, } } /// Attempts to make a new [`RwLockReadGuard`] for a component of the /// locked data. The original guard is returned if the closure returns /// `None`. /// /// This operation cannot fail as the `RwLockReadGuard` passed in already /// locked the data. /// /// This is an associated function that needs to be used as /// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the /// same name on the contents of the locked data. /// /// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the /// [`parking_lot` crate]. /// /// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map /// [`parking_lot` crate]: https://crates.io/crates/parking_lot /// /// # Examples /// /// ``` /// use tokio::sync::{RwLock, RwLockReadGuard}; /// /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] /// struct Foo(u32); /// /// # #[tokio::main] /// # async fn main() { /// let lock = RwLock::new(Foo(1)); /// /// let guard = lock.read().await; /// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail"); /// /// assert_eq!(1, *guard); /// # } /// ``` #[inline] pub fn try_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self> where F: FnOnce(&T) -> Option<&U>, { let data = match f(&*this) { Some(data) => data as *const U, None => return Err(this), }; let s = this.s; // NB: Forget to avoid drop impl from being called. mem::forget(this); Ok(RwLockReadGuard { s, data, marker: marker::PhantomData, }) } } impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T> where T: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> { fn drop(&mut self) { self.s.release(1); } } /// RAII structure used to release the exclusive write access of a lock when /// dropped. /// /// This structure is created by the [`write`] and method /// on [`RwLock`]. /// /// [`write`]: method@RwLock::write /// [`RwLock`]: struct@RwLock pub struct RwLockWriteGuard<'a, T: ?Sized> { s: &'a Semaphore, data: *mut T, marker: marker::PhantomData<&'a mut T>, } impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { /// Make a new `RwLockWriteGuard` for a component of the locked data. /// /// This operation cannot fail as the `RwLockWriteGuard` passed in already /// locked the data. /// /// This is an associated function that needs to be used as /// `RwLockWriteGuard::map(..)`. A method would interfere with methods of /// the same name on the contents of the locked data. /// /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the /// [`parking_lot` crate]. /// /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map /// [`parking_lot` crate]: https://crates.io/crates/parking_lot /// /// # Examples /// /// ``` /// use tokio::sync::{RwLock, RwLockWriteGuard}; /// /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] /// struct Foo(u32); /// /// # #[tokio::main] /// # async fn main() { /// let lock = RwLock::new(Foo(1)); /// /// { /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0); /// *mapped = 2; /// } /// /// assert_eq!(Foo(2), *lock.read().await); /// # } /// ``` #[inline] pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockWriteGuard<'a, U> where F: FnOnce(&mut T) -> &mut U, { let data = f(&mut *this) as *mut U; let s = this.s; // NB: Forget to avoid drop impl from being called. mem::forget(this); RwLockWriteGuard { s, data, marker: marker::PhantomData, } } /// Attempts to make a new [`RwLockWriteGuard`] for a component of /// the locked data. The original guard is returned if the closure returns /// `None`. /// /// This operation cannot fail as the `RwLockWriteGuard` passed in already /// locked the data. /// /// This is an associated function that needs to be /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with /// methods of the same name on the contents of the locked data. /// /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from /// the [`parking_lot` crate]. /// /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map /// [`parking_lot` crate]: https://crates.io/crates/parking_lot /// /// # Examples /// /// ``` /// use tokio::sync::{RwLock, RwLockWriteGuard}; /// /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] /// struct Foo(u32); /// /// # #[tokio::main] /// # async fn main() { /// let lock = RwLock::new(Foo(1)); /// /// { /// let guard = lock.write().await; /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); /// *guard = 2; /// } /// /// assert_eq!(Foo(2), *lock.read().await); /// # } /// ``` #[inline] pub fn try_map<F, U: ?Sized>(mut this: Self, f: F) -> Result<RwLockWriteGuard<'a, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, { let data = match f(&mut *this) { Some(data) => data as *mut U, None => return Err(this), }; let s = this.s; // NB: Forget to avoid drop impl from being called. mem::forget(this); Ok(RwLockWriteGuard { s, data, marker: marker::PhantomData, }) } /// Atomically downgrades a write lock into a read lock without allowing /// any writers to take exclusive access of the lock in the meantime. /// /// **Note:** This won't *necessarily* allow any additional readers to acquire /// locks, since [`RwLock`] is fair and it is possible that a writer is next /// in line. /// /// Returns an RAII guard which will drop the read access of this rwlock /// when dropped. /// /// # Examples /// /// ``` /// # use tokio::sync::RwLock; /// # use std::sync::Arc; /// # /// # #[tokio::main] /// # async fn main() { /// let lock = Arc::new(RwLock::new(1)); /// /// let n = lock.write().await; /// /// let cloned_lock = lock.clone(); /// let handle = tokio::spawn(async move { /// *cloned_lock.write().await = 2; /// }); /// /// let n = n.downgrade(); /// assert_eq!(*n, 1, "downgrade is atomic"); /// /// drop(n); /// handle.await.unwrap(); /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock"); /// # } /// ``` /// /// [`RwLock`]: struct@RwLock pub fn downgrade(self) -> RwLockReadGuard<'a, T> { let RwLockWriteGuard { s, data, .. } = self; // Release all but one of the permits held by the write guard s.release(MAX_READS - 1); // NB: Forget to avoid drop impl from being called. mem::forget(self); RwLockReadGuard { s, data, marker: marker::PhantomData, } } } impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T> where T: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { fn drop(&mut self) { self.s.release(MAX_READS); } } #[test] #[cfg(not(loom))] fn bounds() { fn check_send<T: Send>() {} fn check_sync<T: Sync>() {} fn check_unpin<T: Unpin>() {} // This has to take a value, since the async fn's return type is unnameable. fn check_send_sync_val<T: Send + Sync>(_t: T) {} check_send::<RwLock<u32>>(); check_sync::<RwLock<u32>>(); check_unpin::<RwLock<u32>>(); check_send::<RwLockReadGuard<'_, u32>>(); check_sync::<RwLockReadGuard<'_, u32>>(); check_unpin::<RwLockReadGuard<'_, u32>>(); check_send::<RwLockWriteGuard<'_, u32>>(); check_sync::<RwLockWriteGuard<'_, u32>>(); check_unpin::<RwLockWriteGuard<'_, u32>>(); let rwlock = RwLock::new(0); check_send_sync_val(rwlock.read()); check_send_sync_val(rwlock.write()); } // As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads. // If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through // RwLock<T>. unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {} unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {} // NB: These impls need to be explicit since we're storing a raw pointer. // Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over // `T` is `Send`. unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {} unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {} unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} // Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over // `T` is `Send` - but since this is also provides mutable access, we need to // make sure that `T` is `Send` since its value can be sent across thread // boundaries. unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {} impl<T: ?Sized> RwLock<T> { /// Creates a new instance of an `RwLock<T>` which is unlocked. /// /// # Examples /// /// ``` /// use tokio::sync::RwLock; /// /// let lock = RwLock::new(5); /// ``` pub fn new(value: T) -> RwLock<T> where T: Sized, { RwLock { c: UnsafeCell::new(value), s: Semaphore::new(MAX_READS), } } /// Creates a new instance of an `RwLock<T>` which is unlocked. /// /// # Examples /// /// ``` /// use tokio::sync::RwLock; /// /// static LOCK: RwLock<i32> = RwLock::const_new(5); /// ``` #[cfg(all(feature = "parking_lot", not(all(loom, test))))] #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))] pub const fn const_new(value: T) -> RwLock<T> where T: Sized, { RwLock { c: UnsafeCell::new(value), s: Semaphore::const_new(MAX_READS), } } /// Locks this rwlock with shared read access, causing the current task /// to yield until the lock has been acquired. /// /// The calling task will yield until there are no more writers which /// hold the lock. There may be other readers currently inside the lock when /// this method returns. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use tokio::sync::RwLock; /// /// #[tokio::main] /// async fn main() { /// let lock = Arc::new(RwLock::new(1)); /// let c_lock = lock.clone(); /// /// let n = lock.read().await; /// assert_eq!(*n, 1); /// /// tokio::spawn(async move { /// // While main has an active read lock, we acquire one too. /// let r = c_lock.read().await; /// assert_eq!(*r, 1); /// }).await.expect("The spawned task has paniced"); /// /// // Drop the guard after the spawned task finishes. /// drop(n); ///} /// ``` pub async fn read(&self) -> RwLockReadGuard<'_, T> { self.s.acquire(1).await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); RwLockReadGuard { s: &self.s, data: self.c.get(), marker: marker::PhantomData, } } /// Locks this rwlock with exclusive write access, causing the current task /// to yield until the lock has been acquired. /// /// This function will not return while other writers or other readers /// currently have access to the lock. /// /// Returns an RAII guard which will drop the write access of this rwlock /// when dropped. /// /// # Examples /// /// ``` /// use tokio::sync::RwLock; /// /// #[tokio::main] /// async fn main() { /// let lock = RwLock::new(1); /// /// let mut n = lock.write().await; /// *n = 2; ///} /// ``` pub async fn write(&self) -> RwLockWriteGuard<'_, T> { self.s.acquire(MAX_READS as u32).await.unwrap_or_else(|_| { // The semaphore was closed. but, we never explicitly close it, and we have a // handle to it through the Arc, which means that this can never happen. unreachable!() }); RwLockWriteGuard { s: &self.s, data: self.c.get(), marker: marker::PhantomData, } } /// Returns a mutable reference to the underlying data. /// /// Since this call borrows the `RwLock` mutably, no actual locking needs to /// take place -- the mutable borrow statically guarantees no locks exist. /// /// # Examples /// /// ``` /// use tokio::sync::RwLock; /// /// fn main() { /// let mut lock = RwLock::new(1); /// /// let n = lock.get_mut(); /// *n = 2; /// } /// ``` pub fn get_mut(&mut self) -> &mut T { unsafe { // Safety: This is https://github.com/rust-lang/rust/pull/76936 &mut *self.c.get() } } /// Consumes the lock, returning the underlying data. pub fn into_inner(self) -> T where T: Sized, { self.c.into_inner() } } impl<T: ?Sized> ops::Deref for RwLockReadGuard<'_, T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.data } } } impl<T: ?Sized> ops::Deref for RwLockWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.data } } } impl<T: ?Sized> ops::DerefMut for RwLockWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.data } } } impl<T> From<T> for RwLock<T> { fn from(s: T) -> Self { Self::new(s) } } impl<T: ?Sized> Default for RwLock<T> where T: Default, { fn default() -> Self { Self::new(T::default()) } }
30.641768
123
0.563057
64db5fbf4e0767cebbafffa2815abe0e1955e025
714
use criterion::{criterion_group, criterion_main, Criterion}; extern crate goscript_engine as engine; fn run(path: &str, trace: bool) -> usize { let cfg = engine::Config { work_dir: Some("./".to_string()), base_path: Some("./std/".to_string()), trace_parser: trace, trace_checker: trace, trace_vm: true, }; let engine = engine::Engine::new(cfg); engine.run(path) } fn leetcode5() { let err_cnt = run("./tests/demo/leetcode5.gos", false); assert!(err_cnt == 0); } pub fn criterion_benchmark(c: &mut Criterion) { c.bench_function("leet5", |b| b.iter(|| leetcode5())); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches);
25.5
60
0.642857
2f38089928bb75c542d047daf52853be482be780
768
#![feature(proc_macro)] // <- IMPORTANT! Feature gate for procedural macros #![feature(const_unsafe_cell_new)] #![no_std] extern crate f3; extern crate cortex_m_rtfm as rtfm; // <- this rename is required extern crate cortex_m_semihosting as semihosting; use core::fmt::Write; use rtfm::app; // <- this is a procedural macro use semihosting::hio; // This macro expands into the `main` function app! { // this is a path to a _device_ crate, a crate generated using svd2rust device: f3::stm32f30x, } // INITIALIZATION fn init(_p: init::Peripherals) { // Nothing to initialize in this example ... } // IDLE LOOP fn idle() -> ! { writeln!(hio::hstdout().unwrap(), "Hello, world!").unwrap(); // Go to sleep loop { rtfm::wfi(); } }
24
75
0.667969
648e89be037ff49c9f37d8d249b7d493919a267d
1,311
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test what happens we save incremental compilation state that makes // use of foreign items. This used to ICE (#34991). // revisions: rpass1 #![feature(rustc_private)] extern crate libc; use std::ffi::CString; mod mlibc { use libc::{c_char, c_long, c_longlong}; extern { pub fn atol(x: *const c_char) -> c_long; pub fn atoll(x: *const c_char) -> c_longlong; } } fn atol(s: String) -> isize { let c = CString::new(s).unwrap(); unsafe { mlibc::atol(c.as_ptr()) as isize } } fn atoll(s: String) -> i64 { let c = CString::new(s).unwrap(); unsafe { mlibc::atoll(c.as_ptr()) as i64 } } pub fn main() { assert_eq!(atol("1024".to_string()) * 10, atol("10240".to_string())); assert_eq!((atoll("11111111111111111".to_string()) * 10), atoll("111111111111111110".to_string())); }
28.5
73
0.664378
14754b04df09f0e6917271615d37b4f4754579b6
24,298
use std::{cell::RefCell, ops::Deref as _, rc::Rc}; use wayland_server::{ protocol::{ wl_pointer::{self, Axis, AxisSource, ButtonState, Request, WlPointer}, wl_surface::WlSurface, }, Filter, Main, }; use crate::wayland::compositor::{roles::Role, CompositorToken}; use crate::wayland::Serial; /// The role representing a surface set as the pointer cursor #[derive(Default, Copy, Clone)] pub struct CursorImageRole { /// Location of the hotspot of the pointer in the surface pub hotspot: (i32, i32), } /// Possible status of a cursor as requested by clients #[derive(Clone, PartialEq)] pub enum CursorImageStatus { /// The cursor should be hidden Hidden, /// The compositor should draw its cursor Default, /// The cursor should be drawn using this surface as an image Image(WlSurface), } enum GrabStatus { None, Active(Serial, Box<dyn PointerGrab>), Borrowed, } struct PointerInternal { known_pointers: Vec<WlPointer>, focus: Option<(WlSurface, (f64, f64))>, pending_focus: Option<(WlSurface, (f64, f64))>, location: (f64, f64), grab: GrabStatus, pressed_buttons: Vec<u32>, image_callback: Box<dyn FnMut(CursorImageStatus)>, } impl PointerInternal { fn new<F, R>(token: CompositorToken<R>, mut cb: F) -> PointerInternal where R: Role<CursorImageRole> + 'static, F: FnMut(CursorImageStatus) + 'static, { let mut old_status = CursorImageStatus::Default; let wrapper = move |new_status: CursorImageStatus| { if let CursorImageStatus::Image(surface) = ::std::mem::replace(&mut old_status, new_status.clone()) { match new_status { CursorImageStatus::Image(ref new_surface) if new_surface == &surface => { // don't remove the role, we are just re-binding the same surface } _ => { if surface.as_ref().is_alive() { token.remove_role::<CursorImageRole>(&surface).unwrap(); } } } } cb(new_status) }; PointerInternal { known_pointers: Vec::new(), focus: None, pending_focus: None, location: (0.0, 0.0), grab: GrabStatus::None, pressed_buttons: Vec::new(), image_callback: Box::new(wrapper) as Box<_>, } } fn with_focused_pointers<F>(&self, mut f: F) where F: FnMut(&WlPointer, &WlSurface), { if let Some((ref focus, _)) = self.focus { for ptr in &self.known_pointers { if ptr.as_ref().same_client_as(focus.as_ref()) { f(ptr, focus) } } } } fn with_grab<F>(&mut self, f: F) where F: FnOnce(PointerInnerHandle<'_>, &mut dyn PointerGrab), { let mut grab = ::std::mem::replace(&mut self.grab, GrabStatus::Borrowed); match grab { GrabStatus::Borrowed => panic!("Accessed a pointer grab from within a pointer grab access."), GrabStatus::Active(_, ref mut handler) => { f(PointerInnerHandle { inner: self }, &mut **handler); } GrabStatus::None => { f(PointerInnerHandle { inner: self }, &mut DefaultGrab); } } if let GrabStatus::Borrowed = self.grab { // the grab has not been ended nor replaced, put it back in place self.grab = grab; } } } /// An handle to a pointer handler /// /// It can be cloned and all clones manipulate the same internal state. /// /// This handle gives you access to an interface to send pointer events to your /// clients. /// /// When sending events using this handle, they will be intercepted by a pointer /// grab if any is active. See the [`PointerGrab`] trait for details. #[derive(Clone)] pub struct PointerHandle { inner: Rc<RefCell<PointerInternal>>, } impl PointerHandle { pub(crate) fn new_pointer(&self, pointer: WlPointer) { let mut guard = self.inner.borrow_mut(); guard.known_pointers.push(pointer); } /// Change the current grab on this pointer to the provided grab /// /// Overwrites any current grab. pub fn set_grab<G: PointerGrab + 'static>(&self, grab: G, serial: Serial) { self.inner.borrow_mut().grab = GrabStatus::Active(serial, Box::new(grab)); } /// Remove any current grab on this pointer, reseting it to the default behavior pub fn unset_grab(&self) { self.inner.borrow_mut().grab = GrabStatus::None; } /// Check if this pointer is currently grabbed with this serial pub fn has_grab(&self, serial: Serial) -> bool { let guard = self.inner.borrow_mut(); match guard.grab { GrabStatus::Active(s, _) => s == serial, _ => false, } } /// Check if this pointer is currently being grabbed pub fn is_grabbed(&self) -> bool { let guard = self.inner.borrow_mut(); !matches!(guard.grab, GrabStatus::None) } /// Returns the start data for the grab, if any. pub fn grab_start_data(&self) -> Option<GrabStartData> { let guard = self.inner.borrow(); match &guard.grab { GrabStatus::Active(_, g) => Some(g.start_data().clone()), _ => None, } } /// Notify that the pointer moved /// /// You provide the new location of the pointer, in the form of: /// /// - The coordinates of the pointer in the global compositor space /// - The surface on top of which the cursor is, and the coordinates of its /// origin in the global compositor space (or `None` of the pointer is not /// on top of a client surface). /// /// This will internally take care of notifying the appropriate client objects /// of enter/motion/leave events. pub fn motion( &self, location: (f64, f64), focus: Option<(WlSurface, (f64, f64))>, serial: Serial, time: u32, ) { let mut inner = self.inner.borrow_mut(); inner.pending_focus = focus.clone(); inner.with_grab(move |mut handle, grab| { grab.motion(&mut handle, location, focus, serial, time); }); } /// Notify that a button was pressed /// /// This will internally send the appropriate button event to the client /// objects matching with the currently focused surface. pub fn button(&self, button: u32, state: ButtonState, serial: Serial, time: u32) { let mut inner = self.inner.borrow_mut(); match state { ButtonState::Pressed => { inner.pressed_buttons.push(button); } ButtonState::Released => { inner.pressed_buttons.retain(|b| *b != button); } _ => unreachable!(), } inner.with_grab(|mut handle, grab| { grab.button(&mut handle, button, state, serial, time); }); } /// Start an axis frame /// /// A single frame will group multiple scroll events as if they happened in the same instance. pub fn axis(&self, details: AxisFrame) { self.inner.borrow_mut().with_grab(|mut handle, grab| { grab.axis(&mut handle, details); }); } /// Access the current location of this pointer in the global space pub fn current_location(&self) -> (f64, f64) { self.inner.borrow().location } } /// Data about the event that started the grab. #[derive(Clone)] pub struct GrabStartData { /// The focused surface and its location, if any, at the start of the grab. /// /// The location coordinates are in the global compositor space. pub focus: Option<(WlSurface, (f64, f64))>, /// The button that initiated the grab. pub button: u32, /// The location of the click that initiated the grab, in the global compositor space. pub location: (f64, f64), } /// A trait to implement a pointer grab /// /// In some context, it is necessary to temporarily change the behavior of the pointer. This is /// typically known as a pointer grab. A typical example would be, during a drag'n'drop operation, /// the underlying surfaces will no longer receive classic pointer event, but rather special events. /// /// This trait is the interface to intercept regular pointer events and change them as needed, its /// interface mimics the [`PointerHandle`] interface. /// /// If your logic decides that the grab should end, both [`PointerInnerHandle`] and [`PointerHandle`] have /// a method to change it. /// /// When your grab ends (either as you requested it or if it was forcefully cancelled by the server), /// the struct implementing this trait will be dropped. As such you should put clean-up logic in the destructor, /// rather than trying to guess when the grab will end. pub trait PointerGrab { /// A motion was reported fn motion( &mut self, handle: &mut PointerInnerHandle<'_>, location: (f64, f64), focus: Option<(WlSurface, (f64, f64))>, serial: Serial, time: u32, ); /// A button press was reported fn button( &mut self, handle: &mut PointerInnerHandle<'_>, button: u32, state: ButtonState, serial: Serial, time: u32, ); /// An axis scroll was reported fn axis(&mut self, handle: &mut PointerInnerHandle<'_>, details: AxisFrame); /// The data about the event that started the grab. fn start_data(&self) -> &GrabStartData; } /// This inner handle is accessed from inside a pointer grab logic, and directly /// sends event to the client pub struct PointerInnerHandle<'a> { inner: &'a mut PointerInternal, } impl<'a> PointerInnerHandle<'a> { /// Change the current grab on this pointer to the provided grab /// /// Overwrites any current grab. pub fn set_grab<G: PointerGrab + 'static>(&mut self, serial: Serial, grab: G) { self.inner.grab = GrabStatus::Active(serial, Box::new(grab)); } /// Remove any current grab on this pointer, resetting it to the default behavior /// /// This will also restore the focus of the underlying pointer pub fn unset_grab(&mut self, serial: Serial, time: u32) { self.inner.grab = GrabStatus::None; // restore the focus let location = self.current_location(); let focus = self.inner.pending_focus.take(); self.motion(location, focus, serial, time); } /// Access the current focus of this pointer pub fn current_focus(&self) -> Option<&(WlSurface, (f64, f64))> { self.inner.focus.as_ref() } /// Access the current location of this pointer in the global space pub fn current_location(&self) -> (f64, f64) { self.inner.location } /// A list of the currently physically pressed buttons /// /// This still includes buttons that your grab have intercepted and not sent /// to the client. pub fn current_pressed(&self) -> &[u32] { &self.inner.pressed_buttons } /// Notify that the pointer moved /// /// You provide the new location of the pointer, in the form of: /// /// - The coordinates of the pointer in the global compositor space /// - The surface on top of which the cursor is, and the coordinates of its /// origin in the global compositor space (or `None` of the pointer is not /// on top of a client surface). /// /// This will internally take care of notifying the appropriate client objects /// of enter/motion/leave events. pub fn motion( &mut self, (x, y): (f64, f64), focus: Option<(WlSurface, (f64, f64))>, serial: Serial, time: u32, ) { // do we leave a surface ? let mut leave = true; self.inner.location = (x, y); if let Some((ref current_focus, _)) = self.inner.focus { if let Some((ref surface, _)) = focus { if current_focus.as_ref().equals(surface.as_ref()) { leave = false; } } } if leave { self.inner.with_focused_pointers(|pointer, surface| { pointer.leave(serial.into(), &surface); if pointer.as_ref().version() >= 5 { pointer.frame(); } }); self.inner.focus = None; (self.inner.image_callback)(CursorImageStatus::Default); } // do we enter one ? if let Some((surface, (sx, sy))) = focus { let entered = self.inner.focus.is_none(); // in all cases, update the focus, the coordinates of the surface // might have changed self.inner.focus = Some((surface, (sx, sy))); if entered { self.inner.with_focused_pointers(|pointer, surface| { pointer.enter(serial.into(), &surface, x - sx, y - sy); if pointer.as_ref().version() >= 5 { pointer.frame(); } }) } else { // we were on top of a surface and remained on it self.inner.with_focused_pointers(|pointer, _| { pointer.motion(time, x - sx, y - sy); if pointer.as_ref().version() >= 5 { pointer.frame(); } }) } } } /// Notify that a button was pressed /// /// This will internally send the appropriate button event to the client /// objects matching with the currently focused surface. pub fn button(&self, button: u32, state: ButtonState, serial: Serial, time: u32) { self.inner.with_focused_pointers(|pointer, _| { pointer.button(serial.into(), time, button, state); if pointer.as_ref().version() >= 5 { pointer.frame(); } }) } /// Notify that an axis was scrolled /// /// This will internally send the appropriate axis events to the client /// objects matching with the currently focused surface. pub fn axis(&mut self, details: AxisFrame) { self.inner.with_focused_pointers(|pointer, _| { // axis if details.axis.0 != 0.0 { pointer.axis(details.time, Axis::HorizontalScroll, details.axis.0); } if details.axis.1 != 0.0 { pointer.axis(details.time, Axis::VerticalScroll, details.axis.1); } if pointer.as_ref().version() >= 5 { // axis source if let Some(source) = details.source { pointer.axis_source(source); } // axis discrete if details.discrete.0 != 0 { pointer.axis_discrete(Axis::HorizontalScroll, details.discrete.0); } if details.discrete.1 != 0 { pointer.axis_discrete(Axis::VerticalScroll, details.discrete.1); } // stop if details.stop.0 { pointer.axis_stop(details.time, Axis::HorizontalScroll); } if details.stop.1 { pointer.axis_stop(details.time, Axis::VerticalScroll); } // frame pointer.frame(); } }); } } /// A frame of pointer axis events. /// /// Can be used with the builder pattern, e.g.: /// /// ```ignore /// AxisFrame::new() /// .source(AxisSource::Wheel) /// .discrete(Axis::Vertical, 6) /// .value(Axis::Vertical, 30, time) /// .stop(Axis::Vertical); /// ``` #[derive(Copy, Clone, Debug)] pub struct AxisFrame { source: Option<AxisSource>, time: u32, axis: (f64, f64), discrete: (i32, i32), stop: (bool, bool), } impl AxisFrame { /// Create a new frame of axis events pub fn new(time: u32) -> Self { AxisFrame { source: None, time, axis: (0.0, 0.0), discrete: (0, 0), stop: (false, false), } } /// Specify the source of the axis events /// /// This event is optional, if no source is known, you can ignore this call. /// Only one source event is allowed per frame. /// /// Using the [`AxisSource::Finger`] requires a stop event to be send, /// when the user lifts off the finger (not necessarily in the same frame). pub fn source(mut self, source: AxisSource) -> Self { self.source = Some(source); self } /// Specify discrete scrolling steps additionally to the computed value. /// /// This event is optional and gives the client additional information about /// the nature of the axis event. E.g. a scroll wheel might issue separate steps, /// while a touchpad may never issue this event as it has no steps. pub fn discrete(mut self, axis: Axis, steps: i32) -> Self { match axis { Axis::HorizontalScroll => { self.discrete.0 = steps; } Axis::VerticalScroll => { self.discrete.1 = steps; } _ => unreachable!(), }; self } /// The actual scroll value. This event is the only required one, but can also /// be send multiple times. The values off one frame will be accumulated by the client. pub fn value(mut self, axis: Axis, value: f64) -> Self { match axis { Axis::HorizontalScroll => { self.axis.0 = value; } Axis::VerticalScroll => { self.axis.1 = value; } _ => unreachable!(), }; self } /// Notification of stop of scrolling on an axis. /// /// This event is required for sources of the [`AxisSource::Finger`] type /// and otherwise optional. pub fn stop(mut self, axis: Axis) -> Self { match axis { Axis::HorizontalScroll => { self.stop.0 = true; } Axis::VerticalScroll => { self.stop.1 = true; } _ => unreachable!(), }; self } } pub(crate) fn create_pointer_handler<F, R>(token: CompositorToken<R>, cb: F) -> PointerHandle where R: Role<CursorImageRole> + 'static, F: FnMut(CursorImageStatus) + 'static, { PointerHandle { inner: Rc::new(RefCell::new(PointerInternal::new(token, cb))), } } pub(crate) fn implement_pointer<R>( pointer: Main<WlPointer>, handle: Option<&PointerHandle>, token: CompositorToken<R>, ) -> WlPointer where R: Role<CursorImageRole> + 'static, { let inner = handle.map(|h| h.inner.clone()); pointer.quick_assign(move |pointer, request, _data| { match request { Request::SetCursor { surface, hotspot_x, hotspot_y, .. } => { if let Some(ref inner) = inner { let mut guard = inner.borrow_mut(); // only allow setting the cursor icon if the current pointer focus // is of the same client let PointerInternal { ref mut image_callback, ref focus, .. } = *guard; if let Some((ref focus, _)) = *focus { if focus.as_ref().same_client_as(&pointer.as_ref()) { match surface { Some(surface) => { let role_data = CursorImageRole { hotspot: (hotspot_x, hotspot_y), }; // we gracefully tolerate the client to provide a surface that // already had the "CursorImage" role, as most clients will // always reuse the same surface (and they are right to do so!) if token.with_role_data(&surface, |data| *data = role_data).is_err() && token.give_role_with(&surface, role_data).is_err() { pointer.as_ref().post_error( wl_pointer::Error::Role as u32, "Given wl_surface has another role.".into(), ); return; } image_callback(CursorImageStatus::Image(surface)); } None => { image_callback(CursorImageStatus::Hidden); } } } } } } Request::Release => { // Our destructors already handle it } _ => unreachable!(), } }); if let Some(h) = handle { let inner = h.inner.clone(); pointer.assign_destructor(Filter::new(move |pointer: WlPointer, _, _| { inner .borrow_mut() .known_pointers .retain(|p| !p.as_ref().equals(&pointer.as_ref())) })) } pointer.deref().clone() } /* * Grabs definition */ // The default grab, the behavior when no particular grab is in progress struct DefaultGrab; impl PointerGrab for DefaultGrab { fn motion( &mut self, handle: &mut PointerInnerHandle<'_>, location: (f64, f64), focus: Option<(WlSurface, (f64, f64))>, serial: Serial, time: u32, ) { handle.motion(location, focus, serial, time); } fn button( &mut self, handle: &mut PointerInnerHandle<'_>, button: u32, state: ButtonState, serial: Serial, time: u32, ) { handle.button(button, state, serial, time); handle.set_grab( serial, ClickGrab { start_data: GrabStartData { focus: handle.current_focus().cloned(), button, location: handle.current_location(), }, }, ); } fn axis(&mut self, handle: &mut PointerInnerHandle<'_>, details: AxisFrame) { handle.axis(details); } fn start_data(&self) -> &GrabStartData { unreachable!() } } // A click grab, basic grab started when an user clicks a surface // to maintain it focused until the user releases the click. // // In case the user maintains several simultaneous clicks, release // the grab once all are released. struct ClickGrab { start_data: GrabStartData, } impl PointerGrab for ClickGrab { fn motion( &mut self, handle: &mut PointerInnerHandle<'_>, location: (f64, f64), _focus: Option<(WlSurface, (f64, f64))>, serial: Serial, time: u32, ) { handle.motion(location, self.start_data.focus.clone(), serial, time); } fn button( &mut self, handle: &mut PointerInnerHandle<'_>, button: u32, state: ButtonState, serial: Serial, time: u32, ) { handle.button(button, state, serial, time); if handle.current_pressed().is_empty() { // no more buttons are pressed, release the grab handle.unset_grab(serial, time); } } fn axis(&mut self, handle: &mut PointerInnerHandle<'_>, details: AxisFrame) { handle.axis(details); } fn start_data(&self) -> &GrabStartData { &self.start_data } }
34.030812
112
0.547329
61122735e595293b0f60f554de9270ec1cb66bd3
4,066
extern crate regex; #[macro_use] extern crate lazy_static; use regex::{RegexBuilder, Regex, Captures}; fn reg(s: &str) -> Regex { RegexBuilder::new(s) .multi_line(true) .build() .unwrap() } //All of the Regexs lazy_static!{ static ref CODE_BLOCK: Regex = reg(r"```(\s*)([\s\S]*?)(\s*)```"); static ref CODE_INLINE: Regex = reg(r"(`)(.*?)(`)"); static ref LINK: Regex = reg(r"\[([^\[]+)\]\(([^\)]+)\)"); static ref HEADING: Regex = reg(r"\n(#+\s*)(.*)"); static ref EMPHASIS: Regex = reg(r"(\*{1,2})(.*?)(\*{1,2})"); static ref STRIKETHROUGH: Regex = reg(r"(\~\~)(.*?)(\~\~)"); static ref HORIZONTAL: Regex = reg(r"\n((\-{3,})|(={3,}))"); static ref UNORDERED: Regex = reg(r"(\n\s*(\-|\+)\s.*)+"); static ref ORDERED: Regex = reg(r"(\n\s*([0-9]+\.)\s.*)+"); /* Markdown or HTML reserved symbols */ static ref LT: Regex = reg(r"<"); static ref GT: Regex = reg(r">"); static ref AMP: Regex = reg(r"&"); static ref AST: Regex = reg(r"\*"); static ref UND: Regex = reg(r"_"); static ref TIC: Regex = reg(r"`"); static ref EQL: Regex = reg(r"="); static ref HYP: Regex = reg(r"-"); static ref HASH: Regex = reg(r"#"); } //function to replace HTML or Markdown reserved symbols fn symbols(s: &str) -> String { HYP.replace_all(&EQL.replace_all(&TIC.replace_all(&UND.replace_all(&AST.replace_all(&LT.replace_all(&GT.replace_all(&HASH.replace_all(&AMP.replace_all(s, "&amp;"), "&#35;"), "&gt;"), "&lt;"), "&#42;"), "&#95;"), "&#96;"), "&#61;"), "&#45;").to_string() } /* The replacer functions */ fn code_block_replacer(cap: &Captures) -> String { format!("<pre>{}</pre>", symbols(&cap[2])) } fn code_inline_replacer(cap: &Captures) -> String { format!("<code>{}</code>", &cap[2]) } fn link_replacer(cap: &Captures) -> String { format!("<a href='{}'>{}</a>", &cap[2], &cap[1]) } fn heading_replacer(cap: &Captures) -> String { format!("\n<h{}>{}</h{}>", cap[1].len().to_string(), &cap[2], cap[1].len().to_string()) } fn emphasis_replacer(cap: &Captures) -> String { format!("<{}>{}</{}>", if{cap[1].len()==1}{"em"}else{"strong"}, &cap[2], if{cap[1].len()==1}{"em"}else{"strong"}) } fn rule_replacer(cap: &Captures) -> String { format!("\n<hr />") } fn unordered_replacer(cap: &Captures) -> String { let mut items = String::from(""); for i in cap[0].trim().split('\n') { items = format!("{}<li>{}</li>", items, &i[2..]); } format!("<ul>{}</ul>", items) } fn ordered_replacer(cap: &Captures) -> String { let mut items = String::from(""); for i in cap[0].trim().split('\n') { items = format!("{}<li>{}</li>", items, &i[i.find('.').unwrap()+2..]); } format!("<ol>{}</ol>", items) } //The main format function; call this to get markdown with the best results pub fn replace_all(s: &String) -> String { replace::unordered(&replace::ordered(&replace::rules(&replace::emphasis(&replace::headings(&replace::links(&replace::code_inline(&replace::code_blocks(s)))))))) } //Individual markdown replacement functions. pub mod replace { use crate::*; pub fn code_blocks(s: &String) -> String { CODE_BLOCK.replace_all(s, &code_block_replacer).to_string() } pub fn code_inline(s: &String) -> String { CODE_INLINE.replace_all(s, &code_inline_replacer).to_string() } pub fn links(s: &String) -> String { LINK.replace_all(s, &link_replacer).to_string() } pub fn headings(s: &String) -> String { HEADING.replace_all(s, &heading_replacer).to_string() } pub fn emphasis(s: &String) -> String { EMPHASIS.replace_all(s, &emphasis_replacer).to_string() } pub fn rules(s: &String) -> String { HORIZONTAL.replace_all(s, &rule_replacer).to_string() } pub fn unordered(s: &String) -> String { UNORDERED.replace_all(s, &unordered_replacer).to_string() } pub fn ordered(s: &String) -> String { ORDERED.replace_all(s, &ordered_replacer).to_string() } }
28.836879
256
0.575996
dd656ecf30e474957f61611351fd795398366142
2,447
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::T4CKR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct T4CKRR { bits: u8, } impl T4CKRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _T4CKRW<'a> { w: &'a mut W, } impl<'a> _T4CKRW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:3"] #[inline] pub fn t4ckr(&self) -> T4CKRR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; T4CKRR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:3"] #[inline] pub fn t4ckr(&mut self) -> _T4CKRW { _T4CKRW { w: self } } }
23.084906
59
0.487127
c1b62024d1afb57b8fa7b1e97ca3c9234069fafa
1,516
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use common_exception::Result; use common_meta_types::RoleIdentity; use common_planners::DropRolePlan; use common_planners::PlanNode; use common_tracing::tracing; use crate::sessions::QueryContext; use crate::sql::statements::AnalyzableStatement; use crate::sql::statements::AnalyzedResult; #[derive(Debug, Clone, PartialEq)] pub struct DfDropRole { pub if_exists: bool, pub role_identity: RoleIdentity, } #[async_trait::async_trait] impl AnalyzableStatement for DfDropRole { #[tracing::instrument(level = "debug", skip(self, _ctx), fields(ctx.id = _ctx.get_id().as_str()))] async fn analyze(&self, _ctx: Arc<QueryContext>) -> Result<AnalyzedResult> { Ok(AnalyzedResult::SimpleQuery(Box::new(PlanNode::DropRole( DropRolePlan { if_exists: self.if_exists, role_identity: self.role_identity.clone(), }, )))) } }
33.688889
102
0.713061
91757f83333be6c9642d704aa919b332893266ee
75,793
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::PACRC { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `TP7`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP7R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP7R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP7R::_0 => false, TP7R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP7R { match value { false => TP7R::_0, true => TP7R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP7R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP7R::_1 } } #[doc = "Possible values of the field `WP7`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP7R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP7R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP7R::_0 => false, WP7R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP7R { match value { false => WP7R::_0, true => WP7R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP7R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP7R::_1 } } #[doc = "Possible values of the field `SP7`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP7R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP7R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP7R::_0 => false, SP7R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP7R { match value { false => SP7R::_0, true => SP7R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP7R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP7R::_1 } } #[doc = "Possible values of the field `TP6`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP6R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP6R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP6R::_0 => false, TP6R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP6R { match value { false => TP6R::_0, true => TP6R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP6R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP6R::_1 } } #[doc = "Possible values of the field `WP6`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP6R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP6R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP6R::_0 => false, WP6R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP6R { match value { false => WP6R::_0, true => WP6R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP6R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP6R::_1 } } #[doc = "Possible values of the field `SP6`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP6R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP6R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP6R::_0 => false, SP6R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP6R { match value { false => SP6R::_0, true => SP6R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP6R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP6R::_1 } } #[doc = "Possible values of the field `TP5`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP5R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP5R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP5R::_0 => false, TP5R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP5R { match value { false => TP5R::_0, true => TP5R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP5R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP5R::_1 } } #[doc = "Possible values of the field `WP5`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP5R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP5R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP5R::_0 => false, WP5R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP5R { match value { false => WP5R::_0, true => WP5R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP5R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP5R::_1 } } #[doc = "Possible values of the field `SP5`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP5R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP5R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP5R::_0 => false, SP5R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP5R { match value { false => SP5R::_0, true => SP5R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP5R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP5R::_1 } } #[doc = "Possible values of the field `TP4`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP4R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP4R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP4R::_0 => false, TP4R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP4R { match value { false => TP4R::_0, true => TP4R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP4R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP4R::_1 } } #[doc = "Possible values of the field `WP4`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP4R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP4R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP4R::_0 => false, WP4R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP4R { match value { false => WP4R::_0, true => WP4R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP4R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP4R::_1 } } #[doc = "Possible values of the field `SP4`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP4R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP4R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP4R::_0 => false, SP4R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP4R { match value { false => SP4R::_0, true => SP4R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP4R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP4R::_1 } } #[doc = "Possible values of the field `TP3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP3R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP3R::_0 => false, TP3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP3R { match value { false => TP3R::_0, true => TP3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP3R::_1 } } #[doc = "Possible values of the field `WP3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP3R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP3R::_0 => false, WP3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP3R { match value { false => WP3R::_0, true => WP3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP3R::_1 } } #[doc = "Possible values of the field `SP3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP3R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP3R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP3R::_0 => false, SP3R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP3R { match value { false => SP3R::_0, true => SP3R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP3R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP3R::_1 } } #[doc = "Possible values of the field `TP2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP2R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP2R::_0 => false, TP2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP2R { match value { false => TP2R::_0, true => TP2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP2R::_1 } } #[doc = "Possible values of the field `WP2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP2R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP2R::_0 => false, WP2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP2R { match value { false => WP2R::_0, true => WP2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP2R::_1 } } #[doc = "Possible values of the field `SP2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP2R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP2R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP2R::_0 => false, SP2R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP2R { match value { false => SP2R::_0, true => SP2R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP2R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP2R::_1 } } #[doc = "Possible values of the field `TP1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP1R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP1R::_0 => false, TP1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP1R { match value { false => TP1R::_0, true => TP1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP1R::_1 } } #[doc = "Possible values of the field `WP1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP1R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP1R::_0 => false, WP1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP1R { match value { false => WP1R::_0, true => WP1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP1R::_1 } } #[doc = "Possible values of the field `SP1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP1R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP1R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP1R::_0 => false, SP1R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP1R { match value { false => SP1R::_0, true => SP1R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP1R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP1R::_1 } } #[doc = "Possible values of the field `TP0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TP0R { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TP0R::_0 => false, TP0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TP0R { match value { false => TP0R::_0, true => TP0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TP0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TP0R::_1 } } #[doc = "Possible values of the field `WP0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WP0R { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { WP0R::_0 => false, WP0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> WP0R { match value { false => WP0R::_0, true => WP0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == WP0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == WP0R::_1 } } #[doc = "Possible values of the field `SP0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SP0R { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP0R { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SP0R::_0 => false, SP0R::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SP0R { match value { false => SP0R::_0, true => SP0R::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == SP0R::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == SP0R::_1 } } #[doc = "Values that can be written to the field `TP7`"] pub enum TP7W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP7W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP7W::_0 => false, TP7W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP7W<'a> { w: &'a mut W, } impl<'a> _TP7W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP7W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP7W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP7W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP7`"] pub enum WP7W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP7W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP7W::_0 => false, WP7W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP7W<'a> { w: &'a mut W, } impl<'a> _WP7W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP7W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP7W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP7W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP7`"] pub enum SP7W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP7W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP7W::_0 => false, SP7W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP7W<'a> { w: &'a mut W, } impl<'a> _SP7W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP7W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP7W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP7W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP6`"] pub enum TP6W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP6W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP6W::_0 => false, TP6W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP6W<'a> { w: &'a mut W, } impl<'a> _TP6W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP6W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP6W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP6W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP6`"] pub enum WP6W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP6W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP6W::_0 => false, WP6W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP6W<'a> { w: &'a mut W, } impl<'a> _WP6W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP6W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP6W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP6W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP6`"] pub enum SP6W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP6W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP6W::_0 => false, SP6W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP6W<'a> { w: &'a mut W, } impl<'a> _SP6W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP6W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP6W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP6W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP5`"] pub enum TP5W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP5W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP5W::_0 => false, TP5W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP5W<'a> { w: &'a mut W, } impl<'a> _TP5W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP5W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP5W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP5W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP5`"] pub enum WP5W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP5W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP5W::_0 => false, WP5W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP5W<'a> { w: &'a mut W, } impl<'a> _WP5W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP5W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP5W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP5W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP5`"] pub enum SP5W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP5W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP5W::_0 => false, SP5W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP5W<'a> { w: &'a mut W, } impl<'a> _SP5W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP5W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP5W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP5W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP4`"] pub enum TP4W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP4W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP4W::_0 => false, TP4W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP4W<'a> { w: &'a mut W, } impl<'a> _TP4W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP4W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP4W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP4W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP4`"] pub enum WP4W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP4W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP4W::_0 => false, WP4W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP4W<'a> { w: &'a mut W, } impl<'a> _WP4W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP4W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP4W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP4W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 13; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP4`"] pub enum SP4W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP4W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP4W::_0 => false, SP4W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP4W<'a> { w: &'a mut W, } impl<'a> _SP4W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP4W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP4W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP4W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 14; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP3`"] pub enum TP3W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP3W::_0 => false, TP3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP3W<'a> { w: &'a mut W, } impl<'a> _TP3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP3W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP3`"] pub enum WP3W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP3W::_0 => false, WP3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP3W<'a> { w: &'a mut W, } impl<'a> _WP3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP3W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 17; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP3`"] pub enum SP3W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP3W::_0 => false, SP3W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP3W<'a> { w: &'a mut W, } impl<'a> _SP3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP3W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP3W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP3W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 18; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP2`"] pub enum TP2W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP2W::_0 => false, TP2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP2W<'a> { w: &'a mut W, } impl<'a> _TP2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP2W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP2`"] pub enum WP2W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP2W::_0 => false, WP2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP2W<'a> { w: &'a mut W, } impl<'a> _WP2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP2W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 21; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP2`"] pub enum SP2W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP2W::_0 => false, SP2W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP2W<'a> { w: &'a mut W, } impl<'a> _SP2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP2W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP2W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP2W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 22; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP1`"] pub enum TP1W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP1W::_0 => false, TP1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP1W<'a> { w: &'a mut W, } impl<'a> _TP1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP1W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 24; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP1`"] pub enum WP1W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP1W::_0 => false, WP1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP1W<'a> { w: &'a mut W, } impl<'a> _WP1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP1W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 25; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP1`"] pub enum SP1W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP1W::_0 => false, SP1W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP1W<'a> { w: &'a mut W, } impl<'a> _SP1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP1W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP1W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP1W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 26; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TP0`"] pub enum TP0W { #[doc = "Accesses from an untrusted master are allowed."] _0, #[doc = "Accesses from an untrusted master are not allowed."] _1, } impl TP0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TP0W::_0 => false, TP0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _TP0W<'a> { w: &'a mut W, } impl<'a> _TP0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TP0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Accesses from an untrusted master are allowed."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TP0W::_0) } #[doc = "Accesses from an untrusted master are not allowed."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TP0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `WP0`"] pub enum WP0W { #[doc = "This peripheral allows write accesses."] _0, #[doc = "This peripheral is write protected."] _1, } impl WP0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { WP0W::_0 => false, WP0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _WP0W<'a> { w: &'a mut W, } impl<'a> _WP0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: WP0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral allows write accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(WP0W::_0) } #[doc = "This peripheral is write protected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(WP0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 29; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SP0`"] pub enum SP0W { #[doc = "This peripheral does not require supervisor privilege level for accesses."] _0, #[doc = "This peripheral requires supervisor privilege level for accesses."] _1, } impl SP0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SP0W::_0 => false, SP0W::_1 => true, } } } #[doc = r" Proxy"] pub struct _SP0W<'a> { w: &'a mut W, } impl<'a> _SP0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SP0W) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "This peripheral does not require supervisor privilege level for accesses."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(SP0W::_0) } #[doc = "This peripheral requires supervisor privilege level for accesses."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(SP0W::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 30; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - Trusted Protect"] #[inline] pub fn tp7(&self) -> TP7R { TP7R::_from({ const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 1 - Write Protect"] #[inline] pub fn wp7(&self) -> WP7R { WP7R::_from({ const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 2 - Supervisor Protect"] #[inline] pub fn sp7(&self) -> SP7R { SP7R::_from({ const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 4 - Trusted Protect"] #[inline] pub fn tp6(&self) -> TP6R { TP6R::_from({ const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 5 - Write Protect"] #[inline] pub fn wp6(&self) -> WP6R { WP6R::_from({ const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 6 - Supervisor Protect"] #[inline] pub fn sp6(&self) -> SP6R { SP6R::_from({ const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 8 - Trusted Protect"] #[inline] pub fn tp5(&self) -> TP5R { TP5R::_from({ const MASK: bool = true; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 9 - Write Protect"] #[inline] pub fn wp5(&self) -> WP5R { WP5R::_from({ const MASK: bool = true; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 10 - Supervisor Protect"] #[inline] pub fn sp5(&self) -> SP5R { SP5R::_from({ const MASK: bool = true; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 12 - Trusted Protect"] #[inline] pub fn tp4(&self) -> TP4R { TP4R::_from({ const MASK: bool = true; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 13 - Write Protect"] #[inline] pub fn wp4(&self) -> WP4R { WP4R::_from({ const MASK: bool = true; const OFFSET: u8 = 13; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 14 - Supervisor Protect"] #[inline] pub fn sp4(&self) -> SP4R { SP4R::_from({ const MASK: bool = true; const OFFSET: u8 = 14; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 16 - Trusted Protect"] #[inline] pub fn tp3(&self) -> TP3R { TP3R::_from({ const MASK: bool = true; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 17 - Write Protect"] #[inline] pub fn wp3(&self) -> WP3R { WP3R::_from({ const MASK: bool = true; const OFFSET: u8 = 17; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 18 - Supervisor Protect"] #[inline] pub fn sp3(&self) -> SP3R { SP3R::_from({ const MASK: bool = true; const OFFSET: u8 = 18; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 20 - Trusted Protect"] #[inline] pub fn tp2(&self) -> TP2R { TP2R::_from({ const MASK: bool = true; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 21 - Write Protect"] #[inline] pub fn wp2(&self) -> WP2R { WP2R::_from({ const MASK: bool = true; const OFFSET: u8 = 21; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 22 - Supervisor Protect"] #[inline] pub fn sp2(&self) -> SP2R { SP2R::_from({ const MASK: bool = true; const OFFSET: u8 = 22; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 24 - Trusted Protect"] #[inline] pub fn tp1(&self) -> TP1R { TP1R::_from({ const MASK: bool = true; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 25 - Write Protect"] #[inline] pub fn wp1(&self) -> WP1R { WP1R::_from({ const MASK: bool = true; const OFFSET: u8 = 25; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 26 - Supervisor Protect"] #[inline] pub fn sp1(&self) -> SP1R { SP1R::_from({ const MASK: bool = true; const OFFSET: u8 = 26; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 28 - Trusted Protect"] #[inline] pub fn tp0(&self) -> TP0R { TP0R::_from({ const MASK: bool = true; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 29 - Write Protect"] #[inline] pub fn wp0(&self) -> WP0R { WP0R::_from({ const MASK: bool = true; const OFFSET: u8 = 29; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 30 - Supervisor Protect"] #[inline] pub fn sp0(&self) -> SP0R { SP0R::_from({ const MASK: bool = true; const OFFSET: u8 = 30; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - Trusted Protect"] #[inline] pub fn tp7(&mut self) -> _TP7W { _TP7W { w: self } } #[doc = "Bit 1 - Write Protect"] #[inline] pub fn wp7(&mut self) -> _WP7W { _WP7W { w: self } } #[doc = "Bit 2 - Supervisor Protect"] #[inline] pub fn sp7(&mut self) -> _SP7W { _SP7W { w: self } } #[doc = "Bit 4 - Trusted Protect"] #[inline] pub fn tp6(&mut self) -> _TP6W { _TP6W { w: self } } #[doc = "Bit 5 - Write Protect"] #[inline] pub fn wp6(&mut self) -> _WP6W { _WP6W { w: self } } #[doc = "Bit 6 - Supervisor Protect"] #[inline] pub fn sp6(&mut self) -> _SP6W { _SP6W { w: self } } #[doc = "Bit 8 - Trusted Protect"] #[inline] pub fn tp5(&mut self) -> _TP5W { _TP5W { w: self } } #[doc = "Bit 9 - Write Protect"] #[inline] pub fn wp5(&mut self) -> _WP5W { _WP5W { w: self } } #[doc = "Bit 10 - Supervisor Protect"] #[inline] pub fn sp5(&mut self) -> _SP5W { _SP5W { w: self } } #[doc = "Bit 12 - Trusted Protect"] #[inline] pub fn tp4(&mut self) -> _TP4W { _TP4W { w: self } } #[doc = "Bit 13 - Write Protect"] #[inline] pub fn wp4(&mut self) -> _WP4W { _WP4W { w: self } } #[doc = "Bit 14 - Supervisor Protect"] #[inline] pub fn sp4(&mut self) -> _SP4W { _SP4W { w: self } } #[doc = "Bit 16 - Trusted Protect"] #[inline] pub fn tp3(&mut self) -> _TP3W { _TP3W { w: self } } #[doc = "Bit 17 - Write Protect"] #[inline] pub fn wp3(&mut self) -> _WP3W { _WP3W { w: self } } #[doc = "Bit 18 - Supervisor Protect"] #[inline] pub fn sp3(&mut self) -> _SP3W { _SP3W { w: self } } #[doc = "Bit 20 - Trusted Protect"] #[inline] pub fn tp2(&mut self) -> _TP2W { _TP2W { w: self } } #[doc = "Bit 21 - Write Protect"] #[inline] pub fn wp2(&mut self) -> _WP2W { _WP2W { w: self } } #[doc = "Bit 22 - Supervisor Protect"] #[inline] pub fn sp2(&mut self) -> _SP2W { _SP2W { w: self } } #[doc = "Bit 24 - Trusted Protect"] #[inline] pub fn tp1(&mut self) -> _TP1W { _TP1W { w: self } } #[doc = "Bit 25 - Write Protect"] #[inline] pub fn wp1(&mut self) -> _WP1W { _WP1W { w: self } } #[doc = "Bit 26 - Supervisor Protect"] #[inline] pub fn sp1(&mut self) -> _SP1W { _SP1W { w: self } } #[doc = "Bit 28 - Trusted Protect"] #[inline] pub fn tp0(&mut self) -> _TP0W { _TP0W { w: self } } #[doc = "Bit 29 - Write Protect"] #[inline] pub fn wp0(&mut self) -> _WP0W { _WP0W { w: self } } #[doc = "Bit 30 - Supervisor Protect"] #[inline] pub fn sp0(&mut self) -> _SP0W { _SP0W { w: self } } }
25.947621
88
0.5044
fc6cb9123252f269b636e914a043c6e4852fa929
2,804
use crate::*; use crate::d3d::*; use winapi::shared::winerror::*; use winapi::um::d3dcommon::ID3DBlob; use std::fmt::{self, Debug, Display, Formatter}; /// { kind: [ErrorKind], method, errors } #[derive(Clone)] pub struct MethodErrorBlob { pub(crate) kind: ErrorKind, pub(crate) method: Option<&'static str>, pub(crate) errors: TextBlob, } impl MethodErrorBlob { /// Returns the corresponding [ErrorKind] for this error. pub fn kind(&self) -> ErrorKind { self.kind } pub(crate) fn new(method: &'static str, kind: impl Into<ErrorKind>) -> Self { Self { kind: kind.into(), method: Some(method), errors: Default::default() } } /// ### ⚠️ Safety ⚠️ /// * If `!SUCCEEDED(hr)`, this accesses and takes over ownership of `errors` and returns `Err(...)`. /// * Otherwise, `errors` is left untouched. /// /// ### Arguments /// * `method` - The method that failed /// * `hr` - A possibly successful win32 error code /// * `errors` - A `\0`-terminated blob of errors to be owned by <code>[Err]\([MethodErrorBlob]\)</code> if `!SUCCEEDED(hr)` pub(crate) unsafe fn check_blob(method: &'static str, hr: HRESULT, errors: *mut ID3DBlob) -> Result<(), Self> { if !SUCCEEDED(hr) { let errors = TextBlob::new(ReadOnlyBlob::from_raw_opt(errors)); Err(Self { kind: ErrorKind(hr), method: Some(method), errors, }) } else { Ok(()) } } } impl From<MethodErrorBlob> for ErrorKind { fn from(error: MethodErrorBlob ) -> ErrorKind { error.kind } } //impl From<ErrorKind> for MethodErrorBlob { fn from(error: ErrorKind ) -> Self { Self { kind: error, method: None, errors: Default::default() } } } impl From<MethodError> for MethodErrorBlob { fn from(error: MethodError) -> Self { Self { kind: error.kind(), method: Some(error.method()), errors: Default::default() } } } impl std::error::Error for MethodErrorBlob {} impl Debug for MethodErrorBlob { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { let mut ds = fmt.debug_struct("MethodErrorBlob"); ds.field("kind", &self.kind); if let Some(method) = self.method.as_ref() { ds.field("method", method); } if !self.errors.is_empty() { ds.field("errors", &self.errors.to_utf8_lossy()); } ds.finish() } } impl Display for MethodErrorBlob { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { let method = self.method.unwrap_or("thindx method"); write!(fmt, "{} failed ({:?})", method, self.kind)?; if !self.errors.is_empty() { write!(fmt, "\n{}\n", self.errors.to_utf8_lossy())?; } Ok(()) } }
36.415584
172
0.580956
28de70f8dd77912f1a36097da4935cf31882d28b
10,050
#![cfg_attr(not(feature = "std"), no_std)] #![deny( warnings, unused, future_incompatible, nonstandard_style, rust_2018_idioms )] #![forbid(unsafe_code)] #[macro_use] extern crate derivative; #[macro_use] extern crate ark_std; use crate::group::Group; use ark_ff::{ bytes::{FromBytes, ToBytes}, fields::{Field, PrimeField, SquareRootField}, UniformRand, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, ConstantSerializedSize}; use ark_std::{ fmt::{Debug, Display}, hash::Hash, ops::{Add, AddAssign, MulAssign, Neg, Sub, SubAssign}, vec::Vec, }; use num_traits::Zero; pub mod models; pub use self::models::*; pub mod group; pub mod msm; pub trait PairingEngine: Sized + 'static + Copy + Debug + Sync + Send + Eq + PartialEq { /// This is the scalar field of the G1/G2 groups. type Fr: PrimeField + SquareRootField; /// The projective representation of an element in G1. type G1Projective: ProjectiveCurve<BaseField = Self::Fq, ScalarField = Self::Fr, Affine = Self::G1Affine> + From<Self::G1Affine> + Into<Self::G1Affine> + MulAssign<Self::Fr>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// The affine representation of an element in G1. type G1Affine: AffineCurve<BaseField = Self::Fq, ScalarField = Self::Fr, Projective = Self::G1Projective> + From<Self::G1Projective> + Into<Self::G1Projective> + Into<Self::G1Prepared>; /// A G1 element that has been preprocessed for use in a pairing. type G1Prepared: ToBytes + Default + Clone + Send + Sync + Debug + From<Self::G1Affine>; /// The projective representation of an element in G2. type G2Projective: ProjectiveCurve<BaseField = Self::Fqe, ScalarField = Self::Fr, Affine = Self::G2Affine> + From<Self::G2Affine> + Into<Self::G2Affine> + MulAssign<Self::Fr>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// The affine representation of an element in G2. type G2Affine: AffineCurve<BaseField = Self::Fqe, ScalarField = Self::Fr, Projective = Self::G2Projective> + From<Self::G2Projective> + Into<Self::G2Projective> + Into<Self::G2Prepared>; /// A G2 element that has been preprocessed for use in a pairing. type G2Prepared: ToBytes + Default + Clone + Send + Sync + Debug + From<Self::G2Affine>; /// The base field that hosts G1. type Fq: PrimeField + SquareRootField; /// The extension field that hosts G2. type Fqe: SquareRootField; /// The extension field that hosts the target group of the pairing. type Fqk: Field; /// Perform a miller loop with some number of (G1, G2) pairs. #[must_use] fn miller_loop<'a, I>(i: I) -> Self::Fqk where I: IntoIterator<Item = &'a (Self::G1Prepared, Self::G2Prepared)>; /// Perform final exponentiation of the result of a miller loop. #[must_use] fn final_exponentiation(_: &Self::Fqk) -> Option<Self::Fqk>; /// Computes a product of pairings. #[must_use] fn product_of_pairings<'a, I>(i: I) -> Self::Fqk where I: IntoIterator<Item = &'a (Self::G1Prepared, Self::G2Prepared)>, { Self::final_exponentiation(&Self::miller_loop(i)).unwrap() } /// Performs multiple pairing operations #[must_use] fn pairing<G1, G2>(p: G1, q: G2) -> Self::Fqk where G1: Into<Self::G1Affine>, G2: Into<Self::G2Affine>, { let g1_prep = Self::G1Prepared::from(p.into()); let g2_prep = Self::G2Prepared::from(q.into()); Self::product_of_pairings(core::iter::once(&(g1_prep, g2_prep))) } } /// Projective representation of an elliptic curve point guaranteed to be /// in the correct prime order subgroup. pub trait ProjectiveCurve: Eq + 'static + Sized + ToBytes + FromBytes + Copy + Clone + Default + Send + Sync + Hash + Debug + Display + UniformRand + Zero + Neg<Output = Self> + Add<Self, Output = Self> + Sub<Self, Output = Self> + AddAssign<Self> + SubAssign<Self> + MulAssign<<Self as ProjectiveCurve>::ScalarField> + for<'a> Add<&'a Self, Output = Self> + for<'a> Sub<&'a Self, Output = Self> + for<'a> AddAssign<&'a Self> + for<'a> SubAssign<&'a Self> + core::iter::Sum<Self> + for<'a> core::iter::Sum<&'a Self> + From<<Self as ProjectiveCurve>::Affine> { const COFACTOR: &'static [u64]; type ScalarField: PrimeField + SquareRootField; type BaseField: Field; type Affine: AffineCurve<Projective = Self, ScalarField = Self::ScalarField, BaseField = Self::BaseField> + From<Self> + Into<Self>; /// Returns a fixed generator of unknown exponent. #[must_use] fn prime_subgroup_generator() -> Self; /// Normalizes a slice of projective elements so that /// conversion to affine is cheap. fn batch_normalization(v: &mut [Self]); /// Normalizes a slice of projective elements and outputs a vector /// containing the affine equivalents. fn batch_normalization_into_affine(v: &[Self]) -> Vec<Self::Affine> { let mut v = v.to_vec(); Self::batch_normalization(&mut v); v.into_iter().map(|v| v.into()).collect() } /// Checks if the point is already "normalized" so that /// cheap affine conversion is possible. #[must_use] fn is_normalized(&self) -> bool; /// Doubles this element. #[must_use] fn double(&self) -> Self { let mut copy = *self; copy.double_in_place(); copy } /// Doubles this element in place. fn double_in_place(&mut self) -> &mut Self; /// Converts self into the affine representation. fn into_affine(&self) -> Self::Affine { (*self).into() } /// Set `self` to be `self + other`, where `other: Self::Affine`. /// This is usually faster than adding `other` in projective form. fn add_mixed(mut self, other: &Self::Affine) -> Self { self.add_assign_mixed(other); self } /// Set `self` to be `self + other`, where `other: Self::Affine`. /// This is usually faster than adding `other` in projective form. fn add_assign_mixed(&mut self, other: &Self::Affine); /// Performs scalar multiplication of this element. fn mul<S: Into<<Self::ScalarField as PrimeField>::BigInt>>(mut self, other: S) -> Self { let mut res = Self::zero(); for b in ark_ff::BitIteratorBE::without_leading_zeros(other.into()) { res.double_in_place(); if b { res += self; } } self = res; self } } /// Affine representation of an elliptic curve point guaranteed to be /// in the correct prime order subgroup. pub trait AffineCurve: Eq + 'static + Sized + ToBytes + FromBytes + CanonicalSerialize + ConstantSerializedSize + CanonicalDeserialize + Copy + Clone + Default + Send + Sync + Hash + Debug + Display + Zero + Neg<Output = Self> + From<<Self as AffineCurve>::Projective> { const COFACTOR: &'static [u64]; type ScalarField: PrimeField + SquareRootField + Into<<Self::ScalarField as PrimeField>::BigInt>; type BaseField: Field; type Projective: ProjectiveCurve<Affine = Self, ScalarField = Self::ScalarField, BaseField = Self::BaseField> + From<Self> + Into<Self> + MulAssign<Self::ScalarField>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// Returns a fixed generator of unknown exponent. #[must_use] fn prime_subgroup_generator() -> Self; /// Converts self into the projective representation. fn into_projective(&self) -> Self::Projective { (*self).into() } /// Returns a group element if the set of bytes forms a valid group element, /// otherwise returns None. This function is primarily intended for sampling /// random group elements from a hash-function or RNG output. fn from_random_bytes(bytes: &[u8]) -> Option<Self>; /// Performs scalar multiplication of this element with mixed addition. #[must_use] fn mul<S: Into<<Self::ScalarField as PrimeField>::BigInt>>(&self, other: S) -> Self::Projective; /// Multiply this element by the cofactor and output the /// resulting projective element. #[must_use] fn mul_by_cofactor_to_projective(&self) -> Self::Projective; /// Multiply this element by the cofactor. #[must_use] fn mul_by_cofactor(&self) -> Self { self.mul_by_cofactor_to_projective().into() } /// Multiply this element by the inverse of the cofactor in /// `Self::ScalarField`. #[must_use] fn mul_by_cofactor_inv(&self) -> Self; } impl<C: ProjectiveCurve> Group for C { type ScalarField = C::ScalarField; #[inline] #[must_use] fn double(&self) -> Self { let mut tmp = *self; tmp += self; tmp } #[inline] fn double_in_place(&mut self) -> &mut Self { <C as ProjectiveCurve>::double_in_place(self) } } /// Preprocess a G1 element for use in a pairing. pub fn prepare_g1<E: PairingEngine>(g: impl Into<E::G1Affine>) -> E::G1Prepared { let g: E::G1Affine = g.into(); E::G1Prepared::from(g) } /// Preprocess a G2 element for use in a pairing. pub fn prepare_g2<E: PairingEngine>(g: impl Into<E::G2Affine>) -> E::G2Prepared { let g: E::G2Affine = g.into(); E::G2Prepared::from(g) } /// A cycle of pairing-friendly elliptic curves. pub trait CycleEngine: Sized + 'static + Copy + Debug + Sync + Send where <Self::E2 as PairingEngine>::G1Projective: MulAssign<<Self::E1 as PairingEngine>::Fq>, <Self::E2 as PairingEngine>::G2Projective: MulAssign<<Self::E1 as PairingEngine>::Fq>, { type E1: PairingEngine; type E2: PairingEngine< Fr = <Self::E1 as PairingEngine>::Fq, Fq = <Self::E1 as PairingEngine>::Fr, >; }
31.018519
113
0.63403
f40173c90c02d0f5761eba41d384b1b3387832aa
797
use crate::partition; use std::collections::BTreeMap; pub trait SymbolLike { fn epsilon() -> Self; } #[derive(Clone, Eq, Ord, PartialEq, PartialOrd)] pub enum Symbol { EmptySet, Epsilon, Bracket(partition::Partition), } impl SymbolLike for Symbol { fn epsilon() -> Symbol { Symbol::Epsilon } } struct SymbolTable { table: BTreeMap<Symbol, usize>, } impl SymbolTable { fn new() -> Self { SymbolTable { table: BTreeMap::new(), } } /// Returns ID of inserted symbol. fn add(&mut self, symbol: Symbol) -> usize { if let Some(val) = self.table.get(&symbol) { *val } else { let id = self.table.len(); self.table.insert(symbol, id); id } } }
18.534884
52
0.548306
6a5fd6ec8fcfc6440e4a5f0ba464af7d7226a7db
8,820
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. use crate::fs_util; use crate::http_cache::url_to_filename; use deno_core::url::{Host, Url}; use std::ffi::OsStr; use std::fs; use std::io; use std::path::Component; use std::path::Path; use std::path::PathBuf; use std::path::Prefix; use std::str; #[derive(Clone)] pub struct DiskCache { pub location: PathBuf, } fn with_io_context<T: AsRef<str>>( e: &std::io::Error, context: T, ) -> std::io::Error { std::io::Error::new(e.kind(), format!("{} (for '{}')", e, context.as_ref())) } impl DiskCache { /// `location` must be an absolute path. pub fn new(location: &Path) -> Self { assert!(location.is_absolute()); Self { location: location.to_owned(), } } /// Ensures the location of the cache. pub fn ensure_dir_exists(&self, path: &Path) -> io::Result<()> { if path.is_dir() { return Ok(()); } fs::create_dir_all(&path).map_err(|e| { io::Error::new(e.kind(), format!( "Could not create TypeScript compiler cache location: {:?}\nCheck the permission of the directory.", path )) }) } fn get_cache_filename(&self, url: &Url) -> Option<PathBuf> { let mut out = PathBuf::new(); let scheme = url.scheme(); out.push(scheme); match scheme { "wasm" => { let host = url.host_str().unwrap(); let host_port = match url.port() { // Windows doesn't support ":" in filenames, so we represent port using a // special string. Some(port) => format!("{}_PORT{}", host, port), None => host.to_string(), }; out.push(host_port); for path_seg in url.path_segments().unwrap() { out.push(path_seg); } } "http" | "https" | "data" | "blob" => out = url_to_filename(url)?, "file" => { let path = match url.to_file_path() { Ok(path) => path, Err(_) => return None, }; let mut path_components = path.components(); if cfg!(target_os = "windows") { if let Some(Component::Prefix(prefix_component)) = path_components.next() { // Windows doesn't support ":" in filenames, so we need to extract disk prefix // Example: file:///C:/deno/js/unit_test_runner.ts // it should produce: file\c\deno\js\unit_test_runner.ts match prefix_component.kind() { Prefix::Disk(disk_byte) | Prefix::VerbatimDisk(disk_byte) => { let disk = (disk_byte as char).to_string(); out.push(disk); } Prefix::UNC(server, share) | Prefix::VerbatimUNC(server, share) => { out.push("UNC"); let host = Host::parse(server.to_str().unwrap()).unwrap(); let host = host.to_string().replace(":", "_"); out.push(host); out.push(share); } _ => unreachable!(), } } } // Must be relative, so strip forward slash let mut remaining_components = path_components.as_path(); if let Ok(stripped) = remaining_components.strip_prefix("/") { remaining_components = stripped; }; out = out.join(remaining_components); } _ => return None, }; Some(out) } pub fn get_cache_filename_with_extension( &self, url: &Url, extension: &str, ) -> Option<PathBuf> { let base = self.get_cache_filename(url)?; match base.extension() { None => Some(base.with_extension(extension)), Some(ext) => { let original_extension = OsStr::to_str(ext).unwrap(); let final_extension = format!("{}.{}", original_extension, extension); Some(base.with_extension(final_extension)) } } } pub fn get(&self, filename: &Path) -> std::io::Result<Vec<u8>> { let path = self.location.join(filename); fs::read(&path) } pub fn set(&self, filename: &Path, data: &[u8]) -> std::io::Result<()> { let path = self.location.join(filename); match path.parent() { Some(parent) => self.ensure_dir_exists(parent), None => Ok(()), }?; fs_util::atomic_write_file(&path, data, crate::http_cache::CACHE_PERM) .map_err(|e| with_io_context(&e, format!("{:#?}", &path))) } } #[cfg(test)] mod tests { use super::*; use tempfile::TempDir; #[test] fn test_create_cache_if_dir_exits() { let cache_location = TempDir::new().unwrap(); let mut cache_path = cache_location.path().to_owned(); cache_path.push("foo"); let cache = DiskCache::new(&cache_path); cache .ensure_dir_exists(&cache.location) .expect("Testing expect:"); assert!(cache_path.is_dir()); } #[test] fn test_create_cache_if_dir_not_exits() { let temp_dir = TempDir::new().unwrap(); let mut cache_location = temp_dir.path().to_owned(); assert!(fs::remove_dir(&cache_location).is_ok()); cache_location.push("foo"); assert!(!cache_location.is_dir()); let cache = DiskCache::new(&cache_location); cache .ensure_dir_exists(&cache.location) .expect("Testing expect:"); assert!(cache_location.is_dir()); } #[test] fn test_get_cache_filename() { let cache_location = if cfg!(target_os = "windows") { PathBuf::from(r"C:\deno_dir\") } else { PathBuf::from("/deno_dir/") }; let cache = DiskCache::new(&cache_location); let mut test_cases = vec![ ( "http://deno.land/std/http/file_server.ts", "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", ), ( "http://localhost:8000/std/http/file_server.ts", "http/localhost_PORT8000/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", ), ( "https://deno.land/std/http/file_server.ts", "https/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", ), ("wasm://wasm/d1c677ea", "wasm/wasm/d1c677ea"), ]; if cfg!(target_os = "windows") { test_cases.push(("file:///D:/a/1/s/format.ts", "file/D/a/1/s/format.ts")); // IPv4 localhost test_cases.push(( "file://127.0.0.1/d$/a/1/s/format.ts", "file/UNC/127.0.0.1/d$/a/1/s/format.ts", )); // IPv6 localhost test_cases.push(( "file://[0:0:0:0:0:0:0:1]/d$/a/1/s/format.ts", "file/UNC/[__1]/d$/a/1/s/format.ts", )); // shared folder test_cases.push(( "file://comp/t-share/a/1/s/format.ts", "file/UNC/comp/t-share/a/1/s/format.ts", )); } else { test_cases.push(( "file:///std/http/file_server.ts", "file/std/http/file_server.ts", )); } for test_case in &test_cases { let cache_filename = cache.get_cache_filename(&Url::parse(test_case.0).unwrap()); assert_eq!(cache_filename, Some(PathBuf::from(test_case.1))); } } #[test] fn test_get_cache_filename_with_extension() { let p = if cfg!(target_os = "windows") { "C:\\foo" } else { "/foo" }; let cache = DiskCache::new(&PathBuf::from(p)); let mut test_cases = vec![ ( "http://deno.land/std/http/file_server.ts", "js", "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js", ), ( "http://deno.land/std/http/file_server.ts", "js.map", "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js.map", ), ]; if cfg!(target_os = "windows") { test_cases.push(( "file:///D:/std/http/file_server", "js", "file/D/std/http/file_server.js", )); } else { test_cases.push(( "file:///std/http/file_server", "js", "file/std/http/file_server.js", )); } for test_case in &test_cases { assert_eq!( cache.get_cache_filename_with_extension( &Url::parse(test_case.0).unwrap(), test_case.1 ), Some(PathBuf::from(test_case.2)) ) } } #[test] fn test_get_cache_filename_invalid_urls() { let cache_location = if cfg!(target_os = "windows") { PathBuf::from(r"C:\deno_dir\") } else { PathBuf::from("/deno_dir/") }; let cache = DiskCache::new(&cache_location); let mut test_cases = vec!["unknown://localhost/test.ts"]; if cfg!(target_os = "windows") { test_cases.push("file://"); test_cases.push("file:///"); } for test_case in &test_cases { let cache_filename = cache.get_cache_filename(&Url::parse(test_case).unwrap()); assert_eq!(cache_filename, None); } } }
28.543689
108
0.577324
394c83d62d98079a70c4f0d425a6ec38dfa694e4
13,600
//! Defines a scrollbar component. See definition of [`Scrollbar`] for details. #![feature(option_result_contains)] #![feature(trait_alias)] #![warn(missing_copy_implementations)] #![warn(missing_debug_implementations)] #![warn(missing_docs)] #![warn(trivial_casts)] #![warn(trivial_numeric_casts)] #![warn(unsafe_code)] #![warn(unused_import_braces)] #![warn(unused_qualifications)] #![recursion_limit = "512"] use ensogl_core::prelude::*; use enso_frp as frp; use ensogl_core::animation::delayed::DelayedAnimation; use ensogl_core::application; use ensogl_core::application::Application; use ensogl_core::data::color; use ensogl_core::display; use ensogl_core::display::shape::StyleWatchFrp; use ensogl_core::Animation; use ensogl_hardcoded_theme as theme; use ensogl_selector as selector; use ensogl_selector::model::Model; use ensogl_selector::Bounds; // ================= // === Constants === // ================= // TODO: Some of those values could be defined by the theme instead. But currently, this does not // seem to be worth it because the FRP initialization introduces a lot of complexity, as // described at https://github.com/enso-org/ide/issues/1654. /// Amount the scrollbar moves on a single click, relative to the viewport size. const CLICK_JUMP_PERCENTAGE: f32 = 0.80; /// Width of the scrollbar in px. pub const WIDTH: f32 = 11.0; /// The amount of padding on each side inside the scrollbar. const PADDING: f32 = 2.0; /// The thumb will be displayed with at least this size to make it more visible and dragging easier. const MIN_THUMB_SIZE: f32 = 12.0; /// After an animation, the thumb will be visible for this time, before it hides again. const HIDE_DELAY: f32 = 1000.0; const ERROR_MARGIN_FOR_ACTIVITY_DETECTION: f32 = 0.1; // =========== // === Frp === // =========== ensogl_core::define_endpoints! { Input { /// Sets the length of the scrollbar as display object in px. set_length (f32), /// Sets the number of scroll units on the scroll bar. Should usually be the size of the /// scrolled area in px. set_max (f32), /// Sets the thumb size in scroll units. set_thumb_size (f32), /// Scroll smoothly by the given amount in scroll units. scroll_by (f32), /// Scroll smoothly to the given position in scroll units. scroll_to (f32), /// Jumps to the given position in scroll units without animation and without revealing the /// scrollbar. jump_to (f32), } Output { /// Scroll position in scroll units. thumb_position (f32), } } impl Frp { /// Initialize the FRP network. pub fn init(&self, app: &Application, model: &Model, style: &StyleWatchFrp) { let frp = &self; let network = &frp.network; let scene = app.display.scene(); let mouse = &scene.mouse.frp; let thumb_position = Animation::new(network); let thumb_color = color::Animation::new(network); let activity_cool_off = DelayedAnimation::new(network); activity_cool_off.frp.set_delay(HIDE_DELAY); activity_cool_off.frp.set_duration(0.0); frp::extend! { network resize <- frp.set_length.map(|&length| Vector2::new(length,WIDTH)); } let base_frp = selector::Frp::new(model, style, network, resize.clone(), mouse); model.use_track_handles(false); model.set_track_corner_round(true); model.show_background(false); model.show_left_overflow(false); model.show_right_overflow(false); model.set_padding(PADDING); let default_color = style.get_color(theme::component::slider::track::color); let hover_color = style.get_color(theme::component::slider::track::hover_color); frp::extend! { network // Scrolling and Jumping frp.scroll_to <+ frp.scroll_by.map2(&thumb_position.target,|delta,pos| *pos+*delta); // We will use this to reveal the scrollbar on scrolling. It has to be defined before // the following nodes that update `thumb_position.target`. active <- frp.scroll_to.map2(&thumb_position.target,|&new:&f32,&old:&f32| { (new - old).abs() > ERROR_MARGIN_FOR_ACTIVITY_DETECTION }).on_true(); unbounded_target_position <- any(&frp.scroll_to,&frp.jump_to); thumb_position.target <+ all_with3(&unbounded_target_position,&frp.set_thumb_size, &frp.set_max,|target,&size,&max| target.min(max-size).max(0.0)); thumb_position.skip <+ frp.jump_to.constant(()); frp.source.thumb_position <+ thumb_position.value; // === Mouse position in local coordinates === mouse_position <- mouse.position.map(f!([scene,model](pos) scene.screen_to_object_space(&model,*pos))); // We will initialize the mouse position with `Vector2(f32::NAN,f32::NAN)`, because the // default `Vector2(0.0,0.0)` would reveal the scrollbar before we get the actual mouse // coordinates. init_mouse_position <- source::<Vector2>(); mouse_position <- any(&mouse_position,&init_mouse_position); // === Color === init_color <- any_mut::<()>(); default_color <- all(&default_color,&init_color)._0().map(|c| color::Lch::from(*c)); hover_color <- all(&hover_color,&init_color)._0().map(|c| color::Lch::from(*c)); engaged <- base_frp.track_hover || base_frp.is_dragging_track; thumb_color.target_color <+ engaged.switch(&default_color,&hover_color); eval thumb_color.value((c) model.set_track_color(color::Rgba::from(*c))); // === Hiding === // We start a delayed animation whenever the bar is scrolled to a new place (it is // active). This will instantly reveal the scrollbar and hide it after the delay has // passed. activity_cool_off.frp.reset <+ active; activity_cool_off.frp.start <+ active; recently_active <- bool(&activity_cool_off.frp.on_end,&activity_cool_off.frp.on_reset); // The signed distance between the cursor and the edge of the scrollbar. If the cursor // is further left or right than the ends of the scrollbar then we count the distance as // infinite. We use this distance to reveal the scrollbar when approached by the cursor. // Returning infinity has the effect that we do not reveal it when the cursor approaches // from the sides. This could be handled differently, but the solution was chosen for // the simplicity of the implementation and the feeling of the interaction. vert_mouse_distance <- all_with(&mouse_position,&frp.set_length,|&pos,&length| { let scrollbar_x_range = (-length/2.0)..=(length/2.0); if scrollbar_x_range.contains(&pos.x) { pos.y.abs() - WIDTH / 2.0 } else { f32::INFINITY } }); thumb_color.target_alpha <+ all_with5(&recently_active,&base_frp.is_dragging_track, &vert_mouse_distance,&frp.set_thumb_size,&frp.set_max,Self::compute_target_alpha); // === Position on Screen === // Space that the thumb can actually move in inner_length <- frp.set_length.map(|length| *length - 2.0 * PADDING); // Thumb position as a number between 0 and 1 normalized_position <- all_with3(&frp.thumb_position,&frp.set_thumb_size,&frp.set_max, |&pos,&size,&max| pos / (max - size)); normalized_size <- all_with(&frp.set_thumb_size,&frp.set_max,|&size,&max| size / max); // Minimum thumb size in normalized units min_visual_size <- inner_length.map(|&length| MIN_THUMB_SIZE / length); // The size at which we render the thumb on screen, in normalized units. Can differ from // the actual thumb size if the thumb is smaller than the min. visual_size <- all_with(&normalized_size,&min_visual_size,|&size,&min| size.max(min).min(1.0)); // The position at which we render the thumb on screen, in normalized units. visual_start <- all_with(&normalized_position,&visual_size,|&pos,&size| pos * (1.0 - size)); visual_bounds <- all_with(&visual_start,&visual_size,|&start,&size| Bounds::new(start,start+size)); visual_center <- visual_bounds.map(|bounds| bounds.center()); thumb_center_px <- all_with(&visual_center,&inner_length, |normalized,length| (normalized - 0.5) * length); update_slider <- all(&visual_bounds,&resize); eval update_slider(((value,size)) model.set_background_range(*value,*size)); // === Clicking === frp.scroll_by <+ base_frp.background_click.map3(&thumb_center_px,&frp.set_thumb_size, |click_position,thumb_center,thumb_size| { let direction = if click_position.x > *thumb_center { 1.0 } else { -1.0 }; direction * thumb_size * CLICK_JUMP_PERCENTAGE }); // === Dragging === drag_started <- base_frp.is_dragging_track.on_change().on_true().constant(()); x <- all4(&mouse_position,&inner_length,&frp.set_max,&frp.thumb_position); x <- x.sample(&drag_started); drag_offset <- x.map(|(mouse_px,length_px,max,thumb_pos)| { let thumb_position_px = thumb_pos / max * length_px; mouse_px.x - thumb_position_px }); x <- all4(&mouse_position,&drag_offset,&inner_length,&frp.set_max); x <- x.gate(&base_frp.is_dragging_track); frp.jump_to <+ x.map(|(mouse_px,offset_px,length_px,max)| { let target_px = mouse_px.x - offset_px; target_px / length_px * max }); } // === Init Network === frp.set_length(200.0); frp.set_thumb_size(0.2); frp.set_max(1.0); init_mouse_position.emit(Vector2(f32::NAN, f32::NAN)); init_color.emit(()); } fn compute_target_alpha( &recently_active: &bool, &dragging: &bool, &cursor_distance: &f32, &thumb_size: &f32, &max: &f32, ) -> f32 { let thumb_fills_bar = thumb_size >= max; if thumb_fills_bar { 0.0 } else if recently_active || dragging { 1.0 } else { #[allow(clippy::collapsible_else_if)] if cursor_distance <= 0.0 { 1.0 } else { // The opacity approaches 0.7 when the cursor is right next to the bar and fades // linearly to 0.0 at 20 px distance. (0.7 - cursor_distance / 20.0).max(0.0) } } } } // =========================== // === Scrollbar Component === // =========================== /// Scrollbar component that can be used to implement scrollable components. /// /// We say "thumb" to mean the object inside the bar that indicates the scroll position and can be /// dragged to change that position. Clicking on the scrollbar on either side of the thumb will move /// the thumb a step in that direction. The scrollbar is hidden by default and will show when it is /// animated, dragged or approached by the cursor. /// /// The scrollbar has a horizontal orientation with the beginning on the left and the end on the /// right. But it can be rotated arbitrarily. The origin is in the center. /// /// All operations related to the scroll position take as argument a number of pixels describing a /// position or distance on the scrolled area. We call them scroll units. #[derive(Clone, CloneRef, Debug, Derivative)] pub struct Scrollbar { /// Public FRP api of the Component. pub frp: Rc<Frp>, model: Rc<Model>, /// Reference to the application the Component belongs to. Generally required for implementing /// `application::View` and initialising the `Model` and `Frp` and thus provided by the /// `Component`. pub app: Application, } impl Scrollbar { /// Constructor. pub fn new(app: &Application) -> Self { let app = app.clone_ref(); let model = Rc::new(Model::new(&app)); let frp = Frp::default(); let style = StyleWatchFrp::new(&app.display.scene().style_sheet); frp.init(&app, &model, &style); let frp = Rc::new(frp); Self { frp, model, app } } } impl display::Object for Scrollbar { fn display_object(&self) -> &display::object::Instance { self.model.display_object() } } impl Deref for Scrollbar { type Target = Frp; fn deref(&self) -> &Self::Target { &self.frp } } impl application::command::FrpNetworkProvider for Scrollbar { fn network(&self) -> &frp::Network { self.frp.network() } } impl application::View for Scrollbar { fn label() -> &'static str { "Scrollbar" } fn new(app: &Application) -> Self { Scrollbar::new(app) } fn app(&self) -> &Application { &self.app } }
39.534884
100
0.608897
6a326eb4ca39a971a7a212e03b6631fa4b4c5408
5,566
//! This module has the functionality to search the project and its dependencies for a certain item, //! by its name and a few criteria. //! The main reason for this module to exist is the fact that project's items and dependencies' items //! are located in different caches, with different APIs. use either::Either; use hir::{ import_map::{self, ImportKind}, AsAssocItem, Crate, ItemInNs, Semantics, }; use limit::Limit; use syntax::{ast, AstNode, SyntaxKind::NAME}; use crate::{ defs::{Definition, NameClass}, helpers::import_assets::NameToImport, symbol_index::{self, FileSymbol}, RootDatabase, }; /// A value to use, when uncertain which limit to pick. pub static DEFAULT_QUERY_SEARCH_LIMIT: Limit = Limit::new(40); /// Three possible ways to search for the name in associated and/or other items. #[derive(Debug, Clone, Copy)] pub enum AssocItemSearch { /// Search for the name in both associated and other items. Include, /// Search for the name in other items only. Exclude, /// Search for the name in the associated items only. AssocItemsOnly, } /// Searches for importable items with the given name in the crate and its dependencies. pub fn items_with_name<'a>( sema: &'a Semantics<'_, RootDatabase>, krate: Crate, name: NameToImport, assoc_item_search: AssocItemSearch, limit: Option<usize>, ) -> impl Iterator<Item = ItemInNs> + 'a { let _p = profile::span("items_with_name").detail(|| { format!( "Name: {}, crate: {:?}, assoc items: {:?}, limit: {:?}", name.text(), assoc_item_search, krate.display_name(sema.db).map(|name| name.to_string()), limit, ) }); let (mut local_query, mut external_query) = match name { NameToImport::Exact(exact_name) => { let mut local_query = symbol_index::Query::new(exact_name.clone()); local_query.exact(); let external_query = import_map::Query::new(exact_name) .name_only() .search_mode(import_map::SearchMode::Equals) .case_sensitive(); (local_query, external_query) } NameToImport::Fuzzy(fuzzy_search_string) => { let mut local_query = symbol_index::Query::new(fuzzy_search_string.clone()); let mut external_query = import_map::Query::new(fuzzy_search_string.clone()) .search_mode(import_map::SearchMode::Fuzzy) .name_only(); match assoc_item_search { AssocItemSearch::Include => {} AssocItemSearch::Exclude => { external_query = external_query.exclude_import_kind(ImportKind::AssociatedItem); } AssocItemSearch::AssocItemsOnly => { external_query = external_query.assoc_items_only(); } } if fuzzy_search_string.to_lowercase() != fuzzy_search_string { local_query.case_sensitive(); external_query = external_query.case_sensitive(); } (local_query, external_query) } }; if let Some(limit) = limit { external_query = external_query.limit(limit); local_query.limit(limit); } find_items(sema, krate, assoc_item_search, local_query, external_query) } fn find_items<'a>( sema: &'a Semantics<'_, RootDatabase>, krate: Crate, assoc_item_search: AssocItemSearch, local_query: symbol_index::Query, external_query: import_map::Query, ) -> impl Iterator<Item = ItemInNs> + 'a { let _p = profile::span("find_items"); let db = sema.db; let external_importables = krate.query_external_importables(db, external_query).map(|external_importable| { match external_importable { Either::Left(module_def) => ItemInNs::from(module_def), Either::Right(macro_def) => ItemInNs::from(macro_def), } }); // Query the local crate using the symbol index. let local_results = symbol_index::crate_symbols(db, krate.into(), local_query) .into_iter() .filter_map(move |local_candidate| get_name_definition(sema, &local_candidate)) .filter_map(|name_definition_to_import| match name_definition_to_import { Definition::ModuleDef(module_def) => Some(ItemInNs::from(module_def)), Definition::Macro(macro_def) => Some(ItemInNs::from(macro_def)), _ => None, }); external_importables.chain(local_results).filter(move |&item| match assoc_item_search { AssocItemSearch::Include => true, AssocItemSearch::Exclude => !is_assoc_item(item, sema.db), AssocItemSearch::AssocItemsOnly => is_assoc_item(item, sema.db), }) } fn get_name_definition( sema: &Semantics<'_, RootDatabase>, import_candidate: &FileSymbol, ) -> Option<Definition> { let _p = profile::span("get_name_definition"); let file_id = import_candidate.file_id; let candidate_node = import_candidate.ptr.to_node(sema.parse(file_id).syntax()); let candidate_name_node = if candidate_node.kind() != NAME { candidate_node.children().find(|it| it.kind() == NAME)? } else { candidate_node }; let name = ast::Name::cast(candidate_name_node)?; NameClass::classify(sema, &name)?.defined() } fn is_assoc_item(item: ItemInNs, db: &RootDatabase) -> bool { item.as_module_def().and_then(|module_def| module_def.as_assoc_item(db)).is_some() }
36.618421
101
0.640496
4b4ee2febfd06bad40bb3e53687a29ba5a73af4e
6,620
use std::{ cmp::Ordering, fmt::{Debug, Display, Formatter}, ops::Add, slice::SliceIndex, }; use crate::{ location::Location, utf8_parser::{IOk, IResultLookahead}, }; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Offset { Absolute(usize), Relative(usize), } impl Add<usize> for Offset { type Output = Offset; fn add(self, offset: usize) -> Self::Output { match self { Offset::Absolute(x) => Offset::Absolute(x + offset), _ => todo!(), } } } impl<'a> From<Input<'a>> for Location { fn from(i: Input<'a>) -> Self { match i.offset { Offset::Absolute(offset) => { assert!( i.input.is_char_boundary(offset), "offset not at char boundary" ); let line = i.input.bytes().take(offset).filter(|&b| b == b'\n').count() + 1; let (byte_ind, char_ind, _c) = get_char_at_offset(i.input, offset); if byte_ind != offset { println!("Input {:?}", i); assert_eq!(byte_ind, offset, "offset not at char boundary"); } let line_start = i .input .char_indices() .take(char_ind) .filter(|(_, c)| *c == '\n') .map(|(i, _c)| i) .last() .map(|i| i + 1) .unwrap_or(0); Location { line: line as u32, column: (char_ind - line_start + 1) as u32, } } Offset::Relative(_) => todo!(), } } } #[derive(Clone, Copy, Eq, PartialEq)] pub struct Input<'a> { offset: Offset, /// the complete input /// /// if `offset` is absolute input: &'a str, fragment: &'a str, } impl<'a> Input<'a> { pub fn new(input: &'a str) -> Self { Input { offset: Offset::Absolute(0), input, fragment: input, } } pub fn is_empty(&self) -> bool { self.fragment.is_empty() } pub fn len(&self) -> usize { self.fragment.len() } pub fn offset(&self) -> Offset { self.offset } pub fn offset_to(&self, other: &Self) -> usize { str_offset(self.fragment, other.fragment) } pub fn fragment(&self) -> &'a str { self.fragment } pub fn chars(&self) -> impl Iterator<Item = char> + 'a { self.fragment.chars() } pub fn char_indices(&self) -> impl Iterator<Item = (usize, char)> + 'a { self.fragment.char_indices() } pub fn take_split(self, count: usize) -> IOk<'a, Self> { (self.slice(count..), self.slice(..count)).into() } pub fn slice(&self, range: impl SliceIndex<str, Output = str>) -> Self { let next_fragment = &self.fragment[range]; let consumed_len = str_offset(self.fragment, next_fragment); if consumed_len == 0 { return Input { offset: self.offset, input: self.input, fragment: next_fragment, }; } let next_offset = self.offset + consumed_len; Input { offset: next_offset, input: self.input, fragment: next_fragment, } } } #[cfg(test)] impl<'a> crate::utf8_parser::test_util::TestMockNew for Input<'a> { fn new_mocked() -> Self { Input::new("") } } impl<'a> Debug for Input<'a> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { Display::fmt(self, f) } } impl<'a> Display for Input<'a> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( f, "{} (`{}`)", Location::from(*self), self.fragment.get(..1).unwrap_or("eof"), ) } } impl PartialOrd for Input<'_> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { let first = self.fragment.as_ptr(); let second = other.fragment.as_ptr(); let first = first as usize; let second = second as usize; first.partial_cmp(&second) } } impl Ord for Input<'_> { fn cmp(&self, other: &Self) -> Ordering { let first = self.fragment.as_ptr(); let second = other.fragment.as_ptr(); let first = first as usize; let second = second as usize; first.cmp(&second) } } pub fn position(input: Input) -> IResultLookahead<Input> { Ok(input.take_split(0)) } /// returns (byte index, char index, char) #[inline] fn get_char_at_offset(input: &str, offset: usize) -> (usize, usize, char) { input .char_indices() .enumerate() .map(|(c_ind, (b_ind, c))| (b_ind, c_ind, c)) // we now have an iterator of (byte index, char index, char) .find(|(i, _, _)| *i >= offset) .unwrap_or((input.len(), input.chars().count(), '�')) } /// Byte offset between string slices fn str_offset(first: &str, second: &str) -> usize { let first = first.as_ptr(); let second = second.as_ptr(); second as usize - first as usize } #[cfg(test)] mod tests { use crate::{ location::Location, utf8_parser::{input::get_char_at_offset, Input}, }; #[test] fn test_location() { let input = Input::new("Foo(\na: true,\nb: false)"); assert_eq!( Location::from(input.take_split(0).remaining), Location { line: 1, column: 1 } ); assert_eq!( Location::from(input.take_split(1).remaining), Location { line: 1, column: 2 } ); assert_eq!( Location::from(input.take_split(5).remaining), Location { line: 2, column: 1 } ); assert_eq!( Location::from(input.take_split(6).remaining), Location { line: 2, column: 2 } ); assert_eq!( Location::from(input.take_split(14).remaining), Location { line: 3, column: 1 } ); } #[test] fn test_char_offset_basic() { assert_eq!(get_char_at_offset("123", 1), (1, 1, '2')); } #[test] fn test_char_offset_start() { assert_eq!(get_char_at_offset("123", 0), (0, 0, '1')); } #[test] fn test_char_offset_end() { assert_eq!(get_char_at_offset("123", 2), (2, 2, '3')); } #[test] fn test_char_offset_eof() { assert_eq!(get_char_at_offset("123", 3), (3, 3, '�')); } }
25.075758
92
0.505589
9cad71e30bddb85e8d78b798c70b8d32543339ca
8,894
use std::{io, io::prelude::Write}; use super::OutputFormatter; use crate::{ bench::fmt_bench_samples, console::{ConsoleTestState, OutputLocation}, term, test_result::TestResult, time, types::TestDesc, }; pub(crate) struct PrettyFormatter<T> { out: OutputLocation<T>, use_color: bool, time_options: Option<time::TestTimeOptions>, /// Number of columns to fill when aligning names max_name_len: usize, is_multithreaded: bool, } impl<T: Write> PrettyFormatter<T> { pub fn new( out: OutputLocation<T>, use_color: bool, max_name_len: usize, is_multithreaded: bool, time_options: Option<time::TestTimeOptions>, ) -> Self { PrettyFormatter { out, use_color, max_name_len, is_multithreaded, time_options } } #[cfg(test)] pub fn output_location(&self) -> &OutputLocation<T> { &self.out } pub fn write_ok(&mut self) -> io::Result<()> { self.write_short_result("ok", term::color::GREEN) } pub fn write_failed(&mut self) -> io::Result<()> { self.write_short_result("FAILED", term::color::RED) } pub fn write_ignored(&mut self) -> io::Result<()> { self.write_short_result("ignored", term::color::YELLOW) } pub fn write_allowed_fail(&mut self) -> io::Result<()> { self.write_short_result("FAILED (allowed)", term::color::YELLOW) } pub fn write_time_failed(&mut self) -> io::Result<()> { self.write_short_result("FAILED (time limit exceeded)", term::color::RED) } pub fn write_bench(&mut self) -> io::Result<()> { self.write_pretty("bench", term::color::CYAN) } pub fn write_short_result( &mut self, result: &str, color: term::color::Color, ) -> io::Result<()> { self.write_pretty(result, color) } pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { match self.out { OutputLocation::Pretty(ref mut term) => { if self.use_color { term.fg(color)?; } term.write_all(word.as_bytes())?; if self.use_color { term.reset()?; } term.flush() } OutputLocation::Raw(ref mut stdout) => { stdout.write_all(word.as_bytes())?; stdout.flush() } } } pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> { let s = s.as_ref(); self.out.write_all(s.as_bytes())?; self.out.flush() } fn write_time( &mut self, desc: &TestDesc, exec_time: Option<&time::TestExecTime>, ) -> io::Result<()> { if let (Some(opts), Some(time)) = (self.time_options, exec_time) { let time_str = format!(" <{}>", time); let color = if opts.colored { if opts.is_critical(desc, time) { Some(term::color::RED) } else if opts.is_warn(desc, time) { Some(term::color::YELLOW) } else { None } } else { None }; match color { Some(color) => self.write_pretty(&time_str, color)?, None => self.write_plain(&time_str)?, } } Ok(()) } fn write_results( &mut self, inputs: &Vec<(TestDesc, Vec<u8>)>, results_type: &str, ) -> io::Result<()> { let results_out_str = format!("\n{}:\n", results_type); self.write_plain(&results_out_str)?; let mut results = Vec::new(); let mut stdouts = String::new(); for &(ref f, ref stdout) in inputs { results.push(f.name.to_string()); if !stdout.is_empty() { stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); let output = String::from_utf8_lossy(stdout); stdouts.push_str(&output); stdouts.push('\n'); } } if !stdouts.is_empty() { self.write_plain("\n")?; self.write_plain(&stdouts)?; } self.write_plain(&results_out_str)?; results.sort(); for name in &results { self.write_plain(&format!(" {}\n", name))?; } Ok(()) } pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_results(&state.not_failures, "successes") } pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_results(&state.failures, "failures") } pub fn write_time_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_results(&state.time_failures, "failures (time limit exceeded)") } fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { let name = desc.padded_name(self.max_name_len, desc.name.padding()); if let Some(test_mode) = desc.test_mode() { self.write_plain(&format!("test {} - {} ... ", name, test_mode))?; } else { self.write_plain(&format!("test {} ... ", name))?; } Ok(()) } } impl<T: Write> OutputFormatter for PrettyFormatter<T> { fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { let noun = if test_count != 1 { "tests" } else { "test" }; self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) } fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { // When running tests concurrently, we should not print // the test's name as the result will be mis-aligned. // When running the tests serially, we print the name here so // that the user can see which test hangs. if !self.is_multithreaded { self.write_test_name(desc)?; } Ok(()) } fn write_result( &mut self, desc: &TestDesc, result: &TestResult, exec_time: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { if self.is_multithreaded { self.write_test_name(desc)?; } match *result { TestResult::TrOk => self.write_ok()?, TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?, TestResult::TrIgnored => self.write_ignored()?, TestResult::TrAllowedFail => self.write_allowed_fail()?, TestResult::TrBench(ref bs) => { self.write_bench()?; self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?; } TestResult::TrTimedFail => self.write_time_failed()?, } self.write_time(desc, exec_time)?; self.write_plain("\n") } fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { self.write_plain(&format!( "test {} has been running for over {} seconds\n", desc.name, time::TEST_WARN_TIMEOUT_S )) } fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> { if state.options.display_output { self.write_successes(state)?; } let success = state.failed == 0; if !success { if !state.failures.is_empty() { self.write_failures(state)?; } if !state.time_failures.is_empty() { self.write_time_failures(state)?; } } self.write_plain("\ntest result: ")?; if success { // There's no parallelism at this point so it's safe to use color self.write_pretty("ok", term::color::GREEN)?; } else { self.write_pretty("FAILED", term::color::RED)?; } let s = if state.allowed_fail > 0 { format!( ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out", state.passed, state.failed + state.allowed_fail, state.allowed_fail, state.ignored, state.measured, state.filtered_out ) } else { format!( ". {} passed; {} failed; {} ignored; {} measured; {} filtered out", state.passed, state.failed, state.ignored, state.measured, state.filtered_out ) }; self.write_plain(&s)?; if let Some(ref exec_time) = state.exec_time { let time_str = format!("; finished in {}", exec_time); self.write_plain(&time_str)?; } self.write_plain("\n\n")?; Ok(success) } }
30.668966
96
0.52541
5b1ce27f0799b6dbf9afc208d29d8619c31412e1
991
//! This crate is responsible on decoding function buffers. //! Its code is meant to be used as part of SVM Templates (i.e Smart-Contract) code. //! That's the reason why we add to the crate the `#![no_std]` annotation. //! (not using the Rust standard library should result in a smaller WASM file). //! Besides Smart-Contracts, this crate should be ready to use in other contexts. //! For example, a client such as `smapp` or the `Process Explorer` should be able to interpret an encoded `CallData` //! (which is part of the SVM transaction) in a friendly manner. //! //! For more info regarding the encoding scheme see the counterpart `svm-abi-encoder` crate. //! #![no_std] #![allow(missing_docs)] #![allow(unused)] #![deny(dead_code)] #![deny(unreachable_code)] mod calldata; mod cursor; mod decoder; pub use calldata::CallData; pub use cursor::Cursor; pub use decoder::{DecodeError, Decoder}; /// `ReturnData` is a type alias to `CallData` for now. pub type ReturnData = CallData;
34.172414
117
0.726539
8f7cb914c47354294c7342dbe17b88e577c02eb2
33,818
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::{Layout, LayoutTyper}; use rustc::mir::tcx::LvalueTy; use rustc::mir; use middle::lang_items::ExchangeMallocFnLangItem; use base; use builder::Builder; use callee; use common::{self, val_ty, C_bool, C_null, C_uint}; use common::{C_integral}; use adt; use machine; use monomorphize; use type_::Type; use type_of; use tvec; use value::Value; use Disr; use super::MirContext; use super::constant::const_scalar_checked_binop; use super::operand::{OperandRef, OperandValue}; use super::lvalue::LvalueRef; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_rvalue(&mut self, bcx: Builder<'a, 'tcx>, dest: LvalueRef<'tcx>, rvalue: &mir::Rvalue<'tcx>) -> Builder<'a, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", Value(dest.llval), rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { let tr_operand = self.trans_operand(&bcx, operand); // FIXME: consider not copying constants through stack. (fixable by translating // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand); bcx } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { let cast_ty = self.monomorphize(&cast_ty); if common::type_is_fat_ptr(bcx.ccx, cast_ty) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp); return bcx; } // Unsize of a nontrivial struct. I would prefer for // this to be eliminated by MIR translation, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); let operand = operand.pack_if_pair(&bcx); let llref = match operand.val { OperandValue::Pair(..) => bug!(), OperandValue::Immediate(llval) => { // unsize from an immediate structure. We don't // really need a temporary alloca here, but // avoiding it would require us to have // `coerce_unsized_into` use extractvalue to // index into the struct, and this case isn't // important enough for it. debug!("trans_rvalue: creating ugly alloca"); let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp"); base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty); scratch } OperandValue::Ref(llref, align) => { LvalueRef::new_sized_ty(llref, operand.ty, align) } }; base::coerce_unsized_into(&bcx, &llref, &dest); bcx } mir::Rvalue::Repeat(ref elem, ref count) => { let tr_elem = self.trans_operand(&bcx, elem); let size = count.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem); bcx.br(loop_bb); }) } mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { let disr = Disr::for_variant(bcx.tcx(), adt_def, variant_index); let dest_ty = dest.ty.to_ty(bcx.tcx()); adt::trans_set_discr(&bcx, dest_ty, dest.llval, disr); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx, op.ty) { let mut val = LvalueRef::new_sized( dest.llval, dest.ty, dest.alignment); let field_index = active_field_index.unwrap_or(i); val.ty = LvalueTy::Downcast { adt_def: adt_def, substs: self.monomorphize(&substs), variant_index: variant_index, }; let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index); self.store_operand(&bcx, lldest_i, align.to_align(), op); } } }, _ => { // If this is a tuple or closure, we need to translate GEP indices. let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); let translation = if let Layout::Univariant { ref variant, .. } = *layout { Some(&variant.memory_index) } else { None }; let alignment = dest.alignment; for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx, op.ty) { // Note: perhaps this should be StructGep, but // note that in some cases the values here will // not be structs but arrays. let i = if let Some(ref t) = translation { t[i] as usize } else { i }; let dest = bcx.gepi(dest.llval, &[0, i]); self.store_operand(&bcx, dest, alignment.to_align(), op); } } } } bcx } _ => { assert!(rvalue_creates_operand(rvalue)); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp); bcx } } } pub fn trans_rvalue_operand(&mut self, bcx: Builder<'a, 'tcx>, rvalue: &mir::Rvalue<'tcx>) -> (Builder<'a, 'tcx>, OperandRef<'tcx>) { assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { let operand = self.trans_operand(&bcx, source); debug!("cast operand is {:?}", operand); let cast_ty = self.monomorphize(&cast_ty); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.ty.sty { ty::TyFnDef(def_id, substs, _) => { OperandValue::Immediate( callee::resolve_and_get_fn(bcx.ccx, def_id, substs)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.ty) } } } mir::CastKind::ClosureFnPointer => { match operand.ty.sty { ty::TyClosure(def_id, substs) => { let instance = monomorphize::resolve_closure( bcx.ccx.shared(), def_id, substs, ty::ClosureKind::FnOnce); OperandValue::Immediate(callee::get_fn(bcx.ccx, instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.ty) } } } mir::CastKind::UnsafeFnPointer => { // this is a no-op at the LLVM level operand.val } mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be operands. assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a // "trait-object-to-supertrait" coercion, for // example, // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); let lldata = bcx.pointercast(lldata, llcast_ty); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata, operand.ty, cast_ty); OperandValue::Pair(lldata, llextra) } OperandValue::Ref(..) => { bug!("by-ref operand {:?} in trans_rvalue_operand", operand); } } } mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => { let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty); if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { if common::type_is_fat_ptr(bcx.ccx, cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); OperandValue::Pair(data_cast, meta_ptr) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. let llval = bcx.pointercast(data_ptr, ll_cast_ty); OperandValue::Immediate(llval) } } else { bug!("Unexpected non-Pair operand") } } mir::CastKind::Misc => { debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty); let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty); let llval = operand.immediate(); let l = bcx.ccx.layout_of(operand.ty); let signed = if let Layout::CEnum { signed, min, max, .. } = *l { if max > min { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. base::call_assume(&bcx, bcx.icmp( llvm::IntULE, llval, C_integral(common::val_ty(llval), max, false) )); } signed } else { operand.ty.is_signed() }; let newval = match (r_t_in, r_t_out) { (CastTy::Int(_), CastTy::Int(_)) => { bcx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { let srcsz = ll_t_in.float_width(); let dstsz = ll_t_out.float_width(); if dstsz > srcsz { bcx.fpext(llval, ll_t_out) } else if srcsz > dstsz { bcx.fptrunc(llval, ll_t_out) } else { llval } } (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_)) | (CastTy::RPtr(_), CastTy::Ptr(_)) => bcx.pointercast(llval, ll_t_out), (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => bcx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => bcx.inttoptr(llval, ll_t_out), (CastTy::Int(_), CastTy::Float) if signed => bcx.sitofp(llval, ll_t_out), (CastTy::Int(_), CastTy::Float) => bcx.uitofp(llval, ll_t_out), (CastTy::Float, CastTy::Int(IntTy::I)) => bcx.fptosi(llval, ll_t_out), (CastTy::Float, CastTy::Int(_)) => bcx.fptoui(llval, ll_t_out), _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty) }; OperandValue::Immediate(newval) } }; let operand = OperandRef { val: val, ty: cast_ty }; (bcx, operand) } mir::Rvalue::Ref(_, bk, ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let ty = tr_lvalue.ty.to_ty(bcx.tcx()); let ref_ty = bcx.tcx().mk_ref( bcx.tcx().mk_region(ty::ReErased), ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } ); // Note: lvalues are indirect, so storing the `llval` into the // destination effectively creates a reference. let operand = if bcx.ccx.shared().type_is_sized(ty) { OperandRef { val: OperandValue::Immediate(tr_lvalue.llval), ty: ref_ty, } } else { OperandRef { val: OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra), ty: ref_ty, } }; (bcx, operand) } mir::Rvalue::Len(ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let operand = OperandRef { val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)), ty: bcx.tcx().types.usize, }; (bcx, operand) } mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) { match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { self.trans_fat_ptr_binop(&bcx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, lhs.ty) } _ => bug!() } } else { self.trans_scalar_binop(&bcx, op, lhs.immediate(), rhs.immediate(), lhs.ty) }; let operand = OperandRef { val: OperandValue::Immediate(llresult), ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), }; (bcx, operand) } mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); let result = self.trans_scalar_checked_binop(&bcx, op, lhs.immediate(), rhs.immediate(), lhs.ty); let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false); let operand = OperandRef { val: result, ty: operand_ty }; (bcx, operand) } mir::Rvalue::UnaryOp(op, ref operand) => { let operand = self.trans_operand(&bcx, operand); let lloperand = operand.immediate(); let is_float = operand.ty.is_fp(); let llval = match op { mir::UnOp::Not => bcx.not(lloperand), mir::UnOp::Neg => if is_float { bcx.fneg(lloperand) } else { bcx.neg(lloperand) } }; (bcx, OperandRef { val: OperandValue::Immediate(llval), ty: operand.ty, }) } mir::Rvalue::Discriminant(ref lvalue) => { let discr_lvalue = self.trans_lvalue(&bcx, lvalue); let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx()); let discr_ty = rvalue.ty(&*self.mir, bcx.tcx()); let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty); let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval, discr_lvalue.alignment, Some(discr_type), true); (bcx, OperandRef { val: OperandValue::Immediate(discr), ty: discr_ty }) } mir::Rvalue::Box(content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let llty = type_of::type_of(bcx.ccx, content_ty); let llsize = machine::llsize_of(bcx.ccx, llty); let align = bcx.ccx.align_of(content_ty); let llalign = C_uint(bcx.ccx, align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); // Allocate space: let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); } }; let instance = ty::Instance::mono(bcx.tcx(), def_id); let r = callee::get_fn(bcx.ccx, instance); let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); let operand = OperandRef { val: OperandValue::Immediate(val), ty: box_ty, }; (bcx, operand) } mir::Rvalue::Use(ref operand) => { let operand = self.trans_operand(&bcx, operand); (bcx, operand) } mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { bug!("cannot generate operand from rvalue {:?}", rvalue); } } } pub fn trans_scalar_binop(&mut self, bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, input_ty: Ty<'tcx>) -> ValueRef { let is_float = input_ty.is_fp(); let is_signed = input_ty.is_signed(); let is_nil = input_ty.is_nil(); let is_bool = input_ty.is_bool(); match op { mir::BinOp::Add => if is_float { bcx.fadd(lhs, rhs) } else { bcx.add(lhs, rhs) }, mir::BinOp::Sub => if is_float { bcx.fsub(lhs, rhs) } else { bcx.sub(lhs, rhs) }, mir::BinOp::Mul => if is_float { bcx.fmul(lhs, rhs) } else { bcx.mul(lhs, rhs) }, mir::BinOp::Div => if is_float { bcx.fdiv(lhs, rhs) } else if is_signed { bcx.sdiv(lhs, rhs) } else { bcx.udiv(lhs, rhs) }, mir::BinOp::Rem => if is_float { bcx.frem(lhs, rhs) } else if is_signed { bcx.srem(lhs, rhs) } else { bcx.urem(lhs, rhs) }, mir::BinOp::BitOr => bcx.or(lhs, rhs), mir::BinOp::BitAnd => bcx.and(lhs, rhs), mir::BinOp::BitXor => bcx.xor(lhs, rhs), mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs), mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { C_bool(bcx.ccx, match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() }) } else if is_float { bcx.fcmp( base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs ) } else { let (lhs, rhs) = if is_bool { // FIXME(#36856) -- extend the bools into `i8` because // LLVM's i1 comparisons are broken. (bcx.zext(lhs, Type::i8(bcx.ccx)), bcx.zext(rhs, Type::i8(bcx.ccx))) } else { (lhs, rhs) }; bcx.icmp( base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs ) } } } pub fn trans_fat_ptr_binop(&mut self, bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs_addr: ValueRef, lhs_extra: ValueRef, rhs_addr: ValueRef, rhs_extra: ValueRef, _input_ty: Ty<'tcx>) -> ValueRef { match op { mir::BinOp::Eq => { bcx.and( bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra) ) } mir::BinOp::Ne => { bcx.or( bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr), bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra) ) } mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => { // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) let (op, strict_op) = match op { mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT), mir::BinOp::Le => (llvm::IntULE, llvm::IntULT), mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT), mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT), _ => bug!(), }; bcx.or( bcx.icmp(strict_op, lhs_addr, rhs_addr), bcx.and( bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), bcx.icmp(op, lhs_extra, rhs_extra) ) ) } _ => { bug!("unexpected fat ptr binop"); } } } pub fn trans_scalar_checked_binop(&mut self, bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, input_ty: Ty<'tcx>) -> OperandValue { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. if !bcx.ccx.check_overflow() { let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty); return OperandValue::Pair(val, C_bool(bcx.ccx, false)); } // First try performing the operation on constants, which // will only succeed if both operands are constant. // This is necessary to determine when an overflow Assert // will always panic at runtime, and produce a warning. if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { return OperandValue::Pair(val, C_bool(bcx.ccx, of)); } let (val, of) = match op { // These are checked using intrinsics mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => { let oop = match op { mir::BinOp::Add => OverflowOp::Add, mir::BinOp::Sub => OverflowOp::Sub, mir::BinOp::Mul => OverflowOp::Mul, _ => unreachable!() }; let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty); let res = bcx.call(intrinsic, &[lhs, rhs], None); (bcx.extract_value(res, 0), bcx.extract_value(res, 1)) } mir::BinOp::Shl | mir::BinOp::Shr => { let lhs_llty = val_ty(lhs); let rhs_llty = val_ty(rhs); let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true); let outer_bits = bcx.and(rhs, invert_mask); let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty); (val, of) } _ => { bug!("Operator `{:?}` is not a checkable operator", op) } }; OperandValue::Pair(val, of) } } pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool { match *rvalue { mir::Rvalue::Ref(..) | mir::Rvalue::Len(..) | mir::Rvalue::Cast(..) | // (*) mir::Rvalue::BinaryOp(..) | mir::Rvalue::CheckedBinaryOp(..) | mir::Rvalue::UnaryOp(..) | mir::Rvalue::Discriminant(..) | mir::Rvalue::Box(..) | mir::Rvalue::Use(..) => // (*) true, mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => false, } // (*) this is only true if the type is suitable } #[derive(Copy, Clone)] enum OverflowOp { Add, Sub, Mul } fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{TyInt, TyUint}; let tcx = bcx.tcx(); let new_sty = match ty.sty { TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { "16" => TyInt(I16), "32" => TyInt(I32), "64" => TyInt(I64), _ => panic!("unsupported target word size") }, TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] { "16" => TyUint(U16), "32" => TyUint(U32), "64" => TyUint(U64), _ => panic!("unsupported target word size") }, ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(), _ => panic!("tried to get overflow intrinsic for op applied to non-int type") }; let name = match oop { OverflowOp::Add => match new_sty { TyInt(I8) => "llvm.sadd.with.overflow.i8", TyInt(I16) => "llvm.sadd.with.overflow.i16", TyInt(I32) => "llvm.sadd.with.overflow.i32", TyInt(I64) => "llvm.sadd.with.overflow.i64", TyInt(I128) => "llvm.sadd.with.overflow.i128", TyUint(U8) => "llvm.uadd.with.overflow.i8", TyUint(U16) => "llvm.uadd.with.overflow.i16", TyUint(U32) => "llvm.uadd.with.overflow.i32", TyUint(U64) => "llvm.uadd.with.overflow.i64", TyUint(U128) => "llvm.uadd.with.overflow.i128", _ => unreachable!(), }, OverflowOp::Sub => match new_sty { TyInt(I8) => "llvm.ssub.with.overflow.i8", TyInt(I16) => "llvm.ssub.with.overflow.i16", TyInt(I32) => "llvm.ssub.with.overflow.i32", TyInt(I64) => "llvm.ssub.with.overflow.i64", TyInt(I128) => "llvm.ssub.with.overflow.i128", TyUint(U8) => "llvm.usub.with.overflow.i8", TyUint(U16) => "llvm.usub.with.overflow.i16", TyUint(U32) => "llvm.usub.with.overflow.i32", TyUint(U64) => "llvm.usub.with.overflow.i64", TyUint(U128) => "llvm.usub.with.overflow.i128", _ => unreachable!(), }, OverflowOp::Mul => match new_sty { TyInt(I8) => "llvm.smul.with.overflow.i8", TyInt(I16) => "llvm.smul.with.overflow.i16", TyInt(I32) => "llvm.smul.with.overflow.i32", TyInt(I64) => "llvm.smul.with.overflow.i64", TyInt(I128) => "llvm.smul.with.overflow.i128", TyUint(U8) => "llvm.umul.with.overflow.i8", TyUint(U16) => "llvm.umul.with.overflow.i16", TyUint(U32) => "llvm.umul.with.overflow.i32", TyUint(U64) => "llvm.umul.with.overflow.i64", TyUint(U128) => "llvm.umul.with.overflow.i128", _ => unreachable!(), }, }; bcx.ccx.get_intrinsic(&name) }
44.851459
100
0.432521
38464737ca58c5803aaeac92ed5fcc2bf08a7875
2,683
extern crate clap; use sp_lib::util::{common_args, datetime}; use clap::{Arg, App}; pub struct Arguments { calculate: String, ds_root: String, ds_name: String, symbol: String, window: usize, from: Option<datetime::LocalDate> } impl Arguments { pub fn new() -> Self { let parsed_args = App::new("Stock Portfolio Stats Tool") .version(common_args::app_version()) .about("Stats tool - describe and calculate") // Options .arg(common_args::ds_root()) .arg(common_args::ds_name()) .arg(Arg::with_name("calculate") .short("c") .long("calc") .help("Calculate stats, one of desc, divdesc, vwap, mvwap, roc") .required(true) .takes_value(true)) .arg(Arg::with_name("symbol") .short("y") .long("symbol") .help("Stock symbol") .required(true) .takes_value(true)) .arg(Arg::with_name("window") .short("w") .long("window") .help("Number of days window, required with mvwap and roc calculations") .takes_value(true)) .arg(Arg::with_name("from_date") .short("f") .long("from") .help("Describe and calculate starting from date YYYY-MM-DD") .takes_value(true)) .get_matches(); Arguments { calculate: String::from(parsed_args.value_of("calculate").unwrap()), ds_root: common_args::parsed_ds_root(&parsed_args).expect("Missing datastore root"), ds_name: common_args::parsed_ds_name(&parsed_args), symbol: String::from(parsed_args.value_of("symbol").unwrap()), window: match parsed_args.value_of("window") { Some(win) => win.parse::<usize>().expect("Invalid calculation window"), None => 0 }, from: match parsed_args.value_of("from_date") { Some(date) => Some(datetime::parse_date(date).expect("Invalid from date")), None => None } } } pub fn calculate(&self) -> &String { &self.calculate } pub fn ds_root(&self) -> &String { &self.ds_root } pub fn ds_name(&self) -> &String { &self.ds_name } pub fn symbol(&self) -> &String { &self.symbol } pub fn window(&self) -> usize { self.window } pub fn from(&self) -> Option<datetime::LocalDate> { self.from } }
30.488636
96
0.51435
ed941ac0fdb996601754e3278c0347c35f53920f
1,101
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Checks that extern fn pointers implement the full range of Fn traits. #![feature(unboxed_closures)] #![feature(unboxed_closures)] use std::ops::{Fn,FnMut,FnOnce}; fn square(x: int) -> int { x * x } fn call_it<F:Fn(int)->int>(f: &F, x: int) -> int { f(x) } fn call_it_mut<F:FnMut(int)->int>(f: &mut F, x: int) -> int { f(x) } fn call_it_once<F:FnOnce(int)->int>(f: F, x: int) -> int { f(x) } fn main() { let x = call_it(&square, 22); let y = call_it_mut(&mut square, 22); let z = call_it_once(square, 22); assert_eq!(x, square(22)); assert_eq!(y, square(22)); assert_eq!(z, square(22)); }
27.525
72
0.658492
ccb2b0ccc52440420ccb93ae1c150c3c46480abc
2,457
use cosmwasm_std::{Addr, BankMsg, coin, CosmosMsg, Env, MessageInfo, Response, StdError, SubMsg, Uint128}; use crate::states::Deposit; use valkyrie::mock_querier::{CustomDeps, custom_deps}; use crate::executions::withdraw; use valkyrie::test_constants::campaign::{campaign_env, DEPOSIT_AMOUNT, DEPOSIT_LOCK_PERIOD}; use cosmwasm_std::testing::mock_info; use valkyrie::errors::ContractError; use valkyrie::common::ContractResult; pub fn exec( deps: &mut CustomDeps, env: Env, info: MessageInfo, amount: Uint128, ) -> ContractResult<Response> { withdraw(deps.as_mut(), env, info, amount) } pub fn will_success( deps: &mut CustomDeps, sender: &str, amount: Uint128, ) -> (Env, MessageInfo, Response) { let env = campaign_env(); let info = mock_info(sender, &[]); let response = exec(deps, env.clone(), info.clone(), amount).unwrap(); (env, info, response) } #[test] fn succeed() { let mut deps = custom_deps(); super::instantiate::default(&mut deps); let (env,_ , _) = super::deposit::will_success(&mut deps, "Actor", Uint128::new(1000)); let mut deposit = Deposit::load(&deps.storage, &Addr::unchecked("Actor")).unwrap(); deposit.locked_amounts = vec![(DEPOSIT_AMOUNT, env.block.height + DEPOSIT_LOCK_PERIOD)]; deposit.save(&mut deps.storage).unwrap(); let withdraw_amount = Uint128::new(1000).checked_sub(DEPOSIT_AMOUNT).unwrap(); let (_, _, response) = will_success(&mut deps, "Actor", withdraw_amount); assert_eq!(response.messages, vec![SubMsg::new(CosmosMsg::Bank(BankMsg::Send { to_address: "Actor".to_string(), amount: vec![coin(withdraw_amount.u128(), "uusd")], }))]); } #[test] fn failed_overdrawn() { let mut deps = custom_deps(); super::instantiate::default(&mut deps); let (env, _, _) = super::deposit::will_success(&mut deps, "Actor", Uint128::new(1000)); let mut deposit = Deposit::load(&deps.storage, &Addr::unchecked("Actor")).unwrap(); deposit.locked_amounts = vec![(DEPOSIT_AMOUNT, env.block.height + DEPOSIT_LOCK_PERIOD)]; deposit.save(&mut deps.storage).unwrap(); let withdraw_amount = Uint128::new(1000).checked_sub(DEPOSIT_AMOUNT).unwrap() + Uint128::new(1); let result = exec( &mut deps, campaign_env(), mock_info("Actor", &[]), withdraw_amount, ); assert_eq!(result.unwrap_err(), ContractError::Std(StdError::generic_err("Overdraw deposit"))); }
34.125
106
0.674807
081948016021033c13ff731c348e9ea1576e6192
180
/// Source position #[derive(Copy, Clone, PartialEq, Debug)] pub(crate) struct Position { pub(crate) offset: usize, pub(crate) column: usize, pub(crate) line: usize, }
22.5
40
0.666667
d783f3e9401dfaade452b9a2b484cf5f73f2120c
3,386
use super::Expression; use nom::{ IResult, Err, error::ErrorKind, bytes::complete::{is_a, tag}, character::complete::{char, digit1, space0}, sequence::{delimited, pair, preceded}, branch::alt, combinator::{map, map_res, opt, recognize}, }; use std::str::{from_utf8, FromStr}; fn raw_ident(i: &str) -> IResult<&str, String> { map(is_a( "abcdefghijklmnopqrstuvwxyz \ ABCDEFGHIJKLMNOPQRSTUVWXYZ \ 0123456789 \ _-" ), |s:&str| s.to_string())(i) } fn integer(i: &str) -> IResult<&str, isize> { map_res( delimited( space0, recognize(pair(opt(tag("-")), digit1)), space0 ), FromStr::from_str )(i) } fn ident(i: &str) -> IResult<&str, Expression> { map(raw_ident, Expression::Identifier)(i) } fn postfix<'a>(expr: Expression) -> impl Fn(&'a str) -> IResult<&'a str, Expression> { let e2 = expr.clone(); let child = map(preceded(tag("."), raw_ident), move |id| Expression::Child(Box::new(expr.clone()), id)); let subscript = map(delimited(char('['), integer, char(']')), move |num| Expression::Subscript(Box::new(e2.clone()), num)); alt(( child, subscript )) } pub fn from_str(input: &str) -> Result<Expression, ErrorKind> { match ident(input) { Ok((mut rem, mut expr)) => { while !rem.is_empty() { match postfix(expr)(rem) { Ok((rem_, expr_)) => { rem = rem_; expr = expr_; } // Forward Incomplete and Error result => { return result.map(|(_, o)| o).map_err(to_error_kind); } } } Ok(expr) } // Forward Incomplete and Error result => result.map(|(_, o)| o).map_err(to_error_kind), } } pub fn to_error_kind(e: Err<(&str, ErrorKind)>) -> ErrorKind { match e { Err::Incomplete(_) => ErrorKind::Complete, Err::Failure((_, e)) | Err::Error((_, e)) => e, } } #[cfg(test)] mod test { use super::Expression::*; use super::*; #[test] fn test_id() { let parsed: Expression = from_str("abcd").unwrap(); assert_eq!(parsed, Identifier("abcd".into())); } #[test] fn test_id_dash() { let parsed: Expression = from_str("abcd-efgh").unwrap(); assert_eq!(parsed, Identifier("abcd-efgh".into())); } #[test] fn test_child() { let parsed: Expression = from_str("abcd.efgh").unwrap(); let expected = Child(Box::new(Identifier("abcd".into())), "efgh".into()); assert_eq!(parsed, expected); let parsed: Expression = from_str("abcd.efgh.ijkl").unwrap(); let expected = Child( Box::new(Child(Box::new(Identifier("abcd".into())), "efgh".into())), "ijkl".into(), ); assert_eq!(parsed, expected); } #[test] fn test_subscript() { let parsed: Expression = from_str("abcd[12]").unwrap(); let expected = Subscript(Box::new(Identifier("abcd".into())), 12); assert_eq!(parsed, expected); } #[test] fn test_subscript_neg() { let parsed: Expression = from_str("abcd[-1]").unwrap(); let expected = Subscript(Box::new(Identifier("abcd".into())), -1); assert_eq!(parsed, expected); } }
26.046154
125
0.53987
38ead9459b2b9645f108efe4cb5e86ff5c4d876f
2,641
mod custom_components; mod image; mod stats; mod util; use gdk::Screen; use gtk::{ prelude::{ApplicationExt, ApplicationExtManual, GtkWindowExt}, traits::{CssProviderExt, WidgetExt}, CssProvider, StyleContext, }; use util::get_widget; const TOP_MARGIN: i32 = 40; const RIGHT_MARGIN: i32 = 10; fn main() { let application = gtk::Application::new(Some("com.developomp.pomky"), Default::default()); // https://lazka.github.io/pgi-docs/Gio-2.0/classes/Application.html#Gio.Application.signals.startup application.connect_startup(|_| { setup_css(); }); // https://lazka.github.io/pgi-docs/Gio-2.0/classes/Application.html#Gio.Application.signals.activate application.connect_activate(build_ui); application.run(); } fn setup_css() { // Load the CSS file and add it to the provider let provider = CssProvider::new(); provider .load_from_data(include_bytes!("style.css")) .expect("Failed to load CSS data"); // Apply CSS StyleContext::add_provider_for_screen( &Screen::default().expect("Could not connect to a display."), &provider, gtk::STYLE_PROVIDER_PRIORITY_APPLICATION, ); } fn set_visual(window: &gtk::ApplicationWindow, _screen: Option<&gdk::Screen>) { if let Some(screen) = window.screen() { if let Some(ref visual) = screen.rgba_visual() { window.set_visual(Some(visual)); // crucial for transparency } } } fn build_ui(application: &gtk::Application) { // load design.ui let builder: gtk::Builder = gtk::Builder::from_string(include_str!("design.ui")); // ==========[ Window ]========== let window: gtk::ApplicationWindow = get_widget("window_main", &builder); window.set_application(Some(application)); window.connect_screen_changed(set_visual); window.connect_draw(|_, ctx| { // set transparent window background ctx.set_source_rgba(0.15, 0.15, 0.15, 0.5); ctx.paint().expect("Failed to paint background"); return gtk::Inhibit(false); }); set_visual(&window, None); // move window to the top-right corner of the screen with margin (compensating for the GNOME top bar) unsafe { window.move_( gdk::ffi::gdk_screen_width() - window.default_width() - RIGHT_MARGIN, TOP_MARGIN, ); } // =====[ Setup Stats ]===== stats::general::setup(&builder); stats::processes::setup(&builder); stats::cpu::setup(&builder); stats::memory::setup(&builder); stats::network::setup(&builder); stats::disk::setup(&builder); window.show(); }
27.8
105
0.645967
dbaae9614e21bc84596e0bc0b7fdf39a5e3d7266
3,728
use crate::core::Method; use crate::core::{Error, PeriodType, ValueType, Window}; use crate::helpers::Peekable; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// [Volume Weighed Moving Average](https://en.wikipedia.org/wiki/Moving_average#Weighted_moving_average) of specified `length` /// for timeseries of type ([`ValueType`], [`ValueType`]) which represents pair of values (`value`, `volume`) /// /// # Parameters /// /// `length` should be > `0` /// /// Has a single parameter `length`: [`PeriodType`] /// /// # Input type /// /// Input type is [`ValueType`] /// /// # Output type /// /// Output type is [`ValueType`] /// /// # Examples /// /// ``` /// use yata::prelude::*; /// use yata::methods::VWMA; /// /// // VWMA of length=3 /// let mut vwma = VWMA::new(3, &(3.0, 1.0)).unwrap(); /// /// // input value is a pair of f64 (value, weight) /// vwma.next(&(3.0, 1.0)); /// vwma.next(&(6.0, 1.0)); /// /// assert_eq!(vwma.next(&(9.0, 2.0)), 6.75); /// assert!((vwma.next(&(12.0, 0.5))- 8.571428571428571).abs() < 1e-10); /// ``` /// /// # Performance /// /// O(1) /// /// [`ValueType`]: crate::core::ValueType /// [`PeriodType`]: crate::core::PeriodType #[derive(Debug, Clone, Copy)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct VWMA { sum: ValueType, vol_sum: ValueType, window: Window<(ValueType, ValueType)>, } impl Method for VWMA { type Params = PeriodType; type Input = (ValueType, ValueType); type Output = ValueType; fn new(length: Self::Params, &value: &Self::Input) -> Result<Self, Error> { match length { 0 => Err(Error::WrongMethodParameters), length => Ok(Self { sum: value.0 * value.1 * length as ValueType, vol_sum: value.1 * length as ValueType, window: Window::new(length, value), }), } } #[inline] fn next(&mut self, &value: &Self::Input) -> Self::Output { let past_value = self.window.push(value); self.vol_sum += value.1 - past_value.1; self.sum += value.0.mul_add(value.1, -past_value.0 * past_value.1); self.sum / self.vol_sum } } impl Peekable<<Self as Method>::Output> for VWMA { fn peek(&self) -> <Self as Method>::Output { self.sum / self.vol_sum } } #[cfg(test)] #[allow(clippy::suboptimal_flops)] mod tests { use super::{Method, VWMA as TestingMethod}; use crate::core::ValueType; use crate::helpers::{assert_eq_float, RandomCandles}; use crate::methods::tests::test_const; #[test] fn test_vwma_const() { for i in 1..255 { let input = ((i as ValueType + 56.0) / 16.3251, 3.55); let mut method = TestingMethod::new(i, &input).unwrap(); let output = method.next(&input); test_const(&mut method, &input, &output); } } #[test] fn test_vwma1() { let mut candles = RandomCandles::default(); let mut ma = TestingMethod::new(1, &(candles.first().close, candles.first().volume)).unwrap(); candles.take(100).for_each(|x| { assert_eq_float(x.close, ma.next(&(x.close, x.volume))); }); } #[test] fn test_vwma() { let candles = RandomCandles::default(); let src: Vec<(ValueType, ValueType)> = candles.take(300).map(|x| (x.close, x.volume)).collect(); (1..255).for_each(|ma_length| { let mut ma = TestingMethod::new(ma_length, &src[0]).unwrap(); let ma_length = ma_length as usize; src.iter().enumerate().for_each(|(i, x)| { let mut slice: Vec<(ValueType, ValueType)> = Vec::with_capacity(ma_length); for x in 0..ma_length { slice.push(src[i.saturating_sub(x)]); } let sum = slice .iter() .fold(0.0, |s, (close, volume)| s + close * volume); let vol_sum = slice.iter().fold(0.0, |s, (_close, vol)| s + vol); let value2 = sum / vol_sum; assert_eq_float(value2, ma.next(x)); }); }); } }
25.020134
127
0.623391
23931491ef36b7b8117fb39ccc45d7e1938ccac4
11,682
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ //! Utilities for providing the hover feature use crate::{ lsp::{HoverContents, LanguageString, MarkedString}, lsp_runtime_error::{LSPRuntimeError, LSPRuntimeResult}, node_resolution_info::{NodeKind, NodeResolutionInfo}, server::LSPState, }; use crate::{server::SourcePrograms, LSPExtraDataProvider}; use common::PerfLogger; use graphql_ir::Value; use graphql_text_printer::print_value; use lsp_types::{request::HoverRequest, request::Request, Hover}; use schema::{SDLSchema, Schema}; use schema_documentation::SchemaDocumentation; use schema_print::print_directive; use std::sync::Arc; fn graphql_marked_string(value: String) -> MarkedString { MarkedString::LanguageString(LanguageString { language: "graphql".to_string(), value, }) } fn hover_content_wrapper(value: String) -> HoverContents { HoverContents::Scalar(graphql_marked_string(value)) } /// This will provide a more accurate information about some of the specific Relay directives /// that cannot be expressed via SDL fn argument_definition_hover_info(directive_name: &str) -> Option<HoverContents> { let content = match directive_name { "argumentDefinitions" => Some( r#" `@argumentDefinitions` is a directive used to specify arguments taken by a fragment. --- @see: https://relay.dev/docs/en/graphql-in-relay.html#argumentdefinitions "#, ), "arguments" => Some( r#" `@arguments` is a directive used to pass arguments to a fragment that was defined using `@argumentDefinitions`. --- @see: https://relay.dev/docs/en/graphql-in-relay.html#arguments "#, ), "uncheckedArguments_DEPRECATED" => Some( r#" DEPRECATED version of `@arguments` directive. `@arguments` is a directive used to pass arguments to a fragment that was defined using `@argumentDefinitions`. --- @see: https://relay.dev/docs/en/graphql-in-relay.html#arguments "#, ), _ => None, }; content.map(|value| HoverContents::Scalar(MarkedString::String(value.to_string()))) } fn get_hover_response_contents( node_resolution_info: NodeResolutionInfo, schema: &SDLSchema, schema_documentation: &Arc<SchemaDocumentation>, source_programs: &SourcePrograms, extra_data_provider: &dyn LSPExtraDataProvider, ) -> Option<HoverContents> { let kind = node_resolution_info.kind; match kind { NodeKind::Variable(type_name) => Some(hover_content_wrapper(type_name)), NodeKind::Directive(directive_name, argument_name) => { if let Some(argument_definition_hover_info) = argument_definition_hover_info(directive_name.lookup()) { return Some(argument_definition_hover_info); } let schema_directive = schema.get_directive(directive_name)?; if let Some(argument_name) = argument_name { let argument = schema_directive.arguments.named(argument_name)?; let content = format!( "{}: {}", argument_name, schema.get_type_string(&argument.type_) ); Some(hover_content_wrapper(content)) } else { let directive_text = print_directive(schema, &schema_directive); Some(hover_content_wrapper(directive_text)) } } NodeKind::FieldName => { let field = node_resolution_info .type_path .resolve_current_field(schema)?; let type_name = schema.get_type_string(&field.type_); let mut hover_contents: Vec<MarkedString> = vec![graphql_marked_string(format!( "{}: {}", field.name, type_name ))]; if let Some(field_description) = schema_documentation.get_field_description(&type_name, field.name.lookup()) { hover_contents.push(MarkedString::String(field_description.to_string())); } if let Some(type_description) = schema_documentation.get_type_description(&type_name) { hover_contents.push(MarkedString::String(type_description.to_string())); } if !field.arguments.is_empty() { let mut args_string: Vec<String> = vec!["This field accepts following arguments:".to_string()]; args_string.push("```".to_string()); for arg in field.arguments.iter() { let default_value = match &arg.default_value { Some(default_value) => format!(" = {}", default_value), None => "".to_string(), }; args_string.push(format!( "- {}: {}{}", arg.name, schema.get_type_string(&arg.type_), default_value, )); } args_string.push("```".to_string()); hover_contents.push(MarkedString::String(args_string.join("\n"))) } Some(HoverContents::Array(hover_contents)) } NodeKind::FieldArgument(field_name, argument_name) => { let type_ref = node_resolution_info .type_path .resolve_current_type_reference(schema)?; if type_ref.inner().is_object() || type_ref.inner().is_interface() { let field_id = schema.named_field(type_ref.inner(), field_name)?; let field = schema.field(field_id); let argument = field.arguments.named(argument_name)?; let content = format!( "{}: {}", argument_name, schema.get_type_string(&argument.type_) ); Some(hover_content_wrapper(content)) } else { None } } NodeKind::FragmentSpread(fragment_name) => { let project_name = node_resolution_info.project_name; if let Some(source_program) = source_programs.get(&project_name) { let fragment = source_program.fragment(fragment_name)?; let mut hover_contents: Vec<MarkedString> = vec![]; hover_contents.push(graphql_marked_string(format!( "fragment {} on {} {{ ... }}", fragment.name.item, schema.get_type_name(fragment.type_condition), ))); if !fragment.variable_definitions.is_empty() { let mut variables_string: Vec<String> = vec!["This fragment accepts following arguments:".to_string()]; variables_string.push("```".to_string()); for var in &fragment.variable_definitions { let default_value = match var.default_value.clone() { Some(default_value) => format!( ", default_value = {}", print_value(schema, &Value::Constant(default_value)) ), None => "".to_string(), }; variables_string.push(format!( "- {}: {}{}", var.name.item, schema.get_type_string(&var.type_), default_value, )); } variables_string.push("```".to_string()); hover_contents.push(MarkedString::String(variables_string.join("\n"))) } let fragment_name_details: Vec<&str> = fragment_name.lookup().split('_').collect(); // We expect the fragment name to be `ComponentName_propName` if fragment_name_details.len() == 2 { hover_contents.push(MarkedString::from_markdown(format!( r#" To consume this fragment spread, pass it to the component that defined it. For example: ```js <{} {}={{data.{}}} /> ``` "#, fragment_name_details[0], fragment_name_details[1], fragment_name_details[1], ))); } // We may log an error (later), if that is not the case. hover_contents.push(MarkedString::String( "@see: https://relay.dev/docs/en/thinking-in-relay#data-masking".to_string(), )); Some(HoverContents::Array(hover_contents)) } else { None } } NodeKind::OperationDefinition(operation) => { let search_token = if let Some(operation_name) = operation.name { operation_name.value.lookup().to_string() } else { return None; }; let extra_data = extra_data_provider.fetch_query_stats(search_token); if !extra_data.is_empty() { Some(HoverContents::Array( extra_data .iter() .map(|str| MarkedString::String(str.to_string())) .collect::<_>(), )) } else { None } } NodeKind::FragmentDefinition(fragment) => { let type_ = node_resolution_info .type_path .resolve_current_type_reference(schema)?; let title = graphql_marked_string(format!( "fragment {} on {} {{ .. }}", fragment.name.value, schema.get_type_name(type_.inner()) )); let hover_contents = vec![ title, MarkedString::String( r#"Fragments let you construct sets of fields, and then include them in queries where you need to. --- @see: https://graphql.org/learn/queries/#fragments "# .to_string(), ), ]; Some(HoverContents::Array(hover_contents)) } NodeKind::InlineFragment => None, NodeKind::TypeCondition(_) => None, } } pub(crate) fn on_hover<TPerfLogger: PerfLogger + 'static>( state: &mut LSPState<TPerfLogger>, params: <HoverRequest as Request>::Params, ) -> LSPRuntimeResult<<HoverRequest as Request>::Result> { let node_resolution_info = state.resolve_node(params)?; log::debug!("Hovering over {:?}", node_resolution_info); if let Some(schema) = state.get_schemas().get(&node_resolution_info.project_name) { let schema_documentation = state.extra_data_provider.get_schema_documentation( state.get_schema_name_for_project(&node_resolution_info.project_name), ); let contents = get_hover_response_contents( node_resolution_info, &schema, &schema_documentation, state.get_source_programs_ref(), state.extra_data_provider.as_ref(), ) .ok_or_else(|| { LSPRuntimeError::UnexpectedError("Unable to get hover contents".to_string()) })?; Ok(Some(Hover { contents, range: None, })) } else { Err(LSPRuntimeError::ExpectedError) } }
38.176471
111
0.557096
017d9ad78075010962d69f88dbd4d895b050c92e
2,614
use block_chunk::mesh::fast_mesh; use block_chunk::mesh::greedy_mesh; use block_chunk::mesh::BlockDescriptor; use block_chunk::Chunk; use criterion::{criterion_group, criterion_main, Criterion}; fn benchmark_set<const SIZE: usize>(c: &mut Criterion) { { let chunk = Chunk::<u32, SIZE>::default(); c.bench_function(&format!("chunk_{} fast_mesh box", SIZE), |b| { b.iter(|| { fast_mesh(&chunk, |_| { Some(BlockDescriptor { is_standard_square: true, is_transparent: false, texture_id: (), }) }) }) }); } { let chunk = Chunk::<u32, SIZE>::new_checker(0, 1); c.bench_function(&format!("chunk_{} fast_mesh checker", SIZE), |b| { b.iter(|| { fast_mesh(&chunk, |val| { if *val == 0 { None } else { Some(BlockDescriptor { is_standard_square: true, is_transparent: false, texture_id: (), }) } }) }) }); } { let chunk = Chunk::<u32, SIZE>::default(); c.bench_function(&format!("chunk_{} greedy_mesh box", SIZE), |b| { b.iter(|| { greedy_mesh(&chunk, |_| { Some(BlockDescriptor { is_standard_square: true, is_transparent: false, texture_id: (), }) }) }) }); } { let chunk = Chunk::<u32, SIZE>::new_checker(0, 1); c.bench_function(&format!("chunk_{} greedy_mesh checker", SIZE), |b| { b.iter(|| { greedy_mesh(&chunk, |val| { if *val == 0 { None } else { Some(BlockDescriptor { is_standard_square: true, is_transparent: false, texture_id: (), }) } }) }) }); } } pub fn meshable_chunk(c: &mut Criterion) { benchmark_set::<64>(c); benchmark_set::<32>(c); benchmark_set::<16>(c); benchmark_set::<8>(c); benchmark_set::<4>(c); } criterion_group!(benches, meshable_chunk); criterion_main!(benches);
30.045977
78
0.409717
d76e9ce56fb6bea659370eea73ecc38f246db01f
1,845
//! This example shows how to use `actix_web::HttpServer::on_connect` to access a lower-level socket //! properties and pass them to a handler through request-local data. //! //! For an example of extracting a client TLS certificate, see: //! <https://github.com/actix/examples/tree/HEAD/security/rustls-client-cert> use std::{any::Any, io, net::SocketAddr}; use actix_web::{ dev::Extensions, rt::net::TcpStream, web, App, HttpRequest, HttpResponse, HttpServer, Responder, }; #[allow(dead_code)] #[derive(Debug, Clone)] struct ConnectionInfo { bind: SocketAddr, peer: SocketAddr, ttl: Option<u32>, } async fn route_whoami(req: HttpRequest) -> impl Responder { match req.conn_data::<ConnectionInfo>() { Some(info) => HttpResponse::Ok().body(format!( "Here is some info about your connection:\n\n{:#?}", info )), None => { HttpResponse::InternalServerError().body("Missing expected request extension data") } } } fn get_conn_info(connection: &dyn Any, data: &mut Extensions) { if let Some(sock) = connection.downcast_ref::<TcpStream>() { data.insert(ConnectionInfo { bind: sock.local_addr().unwrap(), peer: sock.peer_addr().unwrap(), ttl: sock.ttl().ok(), }); } else { unreachable!("connection should only be plaintext since no TLS is set up"); } } #[actix_web::main] async fn main() -> io::Result<()> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); let bind = ("127.0.0.1", 8080); log::info!("staring server at http://{}:{}", &bind.0, &bind.1); HttpServer::new(|| App::new().default_service(web::to(route_whoami))) .on_connect(get_conn_info) .bind(bind)? .workers(1) .run() .await }
30.75
100
0.621138
fc52f48d77c646173edf6c43d7c18c02e943ea41
8,918
//! The Python grammar definition. //! //! The grammar is defined using a metagrammar syntax and autogenerated //! using a procedural macro defined in the [`metagrammar`] module. //! //! The output of the macro is a function `next_state` which encodes the Python //! grammar rules and can be used to build a Python parse tree based on input //! Python tokens from a [`lexer::Lexer`]. //! //! [`metagrammar`]: ../metagrammar/index.html //! [`lexer::Lexer`]: ../lexer/struct.Lexer.html extern crate metagrammar; use metagrammar::build_grammar; use lexer::tok; use lexer::tokens::Token; build_grammar! { SingleInput: "NEWLINE" | SimpleStmt | CompoundStmt "NEWLINE"; FileInput: ("NEWLINE" | Stmt)* "ENDMARKER"; EvalInput: TestList "NEWLINE"* "ENDMARKER"; Decorator: "@" DottedName [ "(" [ArgList] ")" ] "NEWLINE"; Decorators: Decorator+; Decorated: Decorators (ClassDef | FuncDef | AsyncFuncDef); AsyncFuncDef: "async" FuncDef; FuncDef: "def" "NAME" Parameters ["->" Test] ":" FuncBodySuite; Parameters: "(" [TypedArgsList] ")"; TypedArgsList: ( TfpDef ["=" Test] ("," TfpDef ["=" Test])* ( ["," [ "*" [TfpDef] ("," TfpDef ["=" Test])* (["," ["**" TfpDef [","]]]) | "**" TfpDef [","]] ] ) | "*" [TfpDef] ("," TfpDef ["=" Test])* (["," ["**" TfpDef [","]]]) | "**" TfpDef [","]); TfpDef: "NAME" [":" Test]; VarArgsList: VfpDef ["=" Test] ("," VfpDef ["=" Test])* [ "," [ "*" [VfpDef] ("," VfpDef ["=" Test])* ["," ["**" VfpDef [","]]] | "**" VfpDef [","] ]] | "*" [VfpDef] ("," VfpDef ["=" Test])* ["," ["**" VfpDef [","]]] | "**" VfpDef [","]; VfpDef: "NAME"; Stmt: SimpleStmt | CompoundStmt; SimpleStmt: SmallStmt (";" SmallStmt)* [";"] "NEWLINE"; SmallStmt: (ExprStmt | DelStmt | PassStmt | FlowStmt | ImportStmt | GlobalStmt | NonLocalStmt | AssertStmt); ExprStmt: TestListStarExpr (AnnAssign | AugAssign (YieldExpr|TestList) | [("=" (YieldExpr|TestListStarExpr))+] ); AnnAssign: ":" Test ["=" (YieldExpr|TestList)]; TestListStarExpr: (Test|StarExpr) ("," (Test|StarExpr))* [","]; AugAssign: ("+=" | "-=" | "*=" | "@=" | "/=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "**=" | "//="); // For normal and annotated assignments, additional restrictions enforced by the interpreter DelStmt: "del" ExprList; PassStmt: "pass"; FlowStmt: BreakStmt | ContinueStmt | ReturnStmt | RaiseStmt | YieldStmt; BreakStmt: "break"; ContinueStmt: "continue"; ReturnStmt: "return" [TestListStarExpr]; YieldStmt: YieldExpr; RaiseStmt: "raise" [Test ["from" Test]]; ImportStmt: ImportName | ImportFrom; ImportName: "import" DottedAsNames; // note below: the ("." | "...") is necessary because "..." is tokenized as ELLIPSIS ImportFrom: ("from" (("." | "...")* DottedName | ("." | "...")+) "import" ("*" | "(" ImportAsNames ")" | ImportAsNames)); ImportAsName: "NAME" ["as" "NAME"]; DottedAsName: DottedName ["as" "NAME"]; ImportAsNames: ImportAsName ("," ImportAsName)* [","]; DottedAsNames: DottedAsName ("," DottedAsName)*; DottedName: "NAME" ("." "NAME")*; GlobalStmt: "global" "NAME" ("," "NAME")*; NonLocalStmt: "nonlocal" "NAME" ("," "NAME")*; AssertStmt: "assert" Test ["," Test]; CompoundStmt: IfStmt | WhileStmt | ForStmt | TryStmt | WithStmt | FuncDef | ClassDef | Decorated | AsyncStmt; AsyncStmt: "async" (FuncDef | WithStmt | ForStmt); IfStmt: "if" NamedExprTest ":" Suite ("elif" NamedExprTest ":" Suite)* ["else" ":" Suite]; WhileStmt: "while" NamedExprTest ":" Suite ["else" ":" Suite]; ForStmt: "for" ExprList "in" TestList ":" Suite ["else" ":" Suite]; TryStmt: ("try" ":" Suite ((ExceptClause ":" Suite)+ ["else" ":" Suite] ["finally" ":" Suite] | "finally" ":" Suite)); WithStmt: "with" WithItem ("," WithItem)* ":" Suite; WithItem: Test ["as" Expr]; // NB compile.c makes sure that the default except clause is last ExceptClause: "except" [Test ["as" "NAME"]]; Suite: SimpleStmt | "NEWLINE" "INDENT" Stmt+ "DEDENT"; NamedExprTest: Test [":=" Test]; Test: OrTest ["if" OrTest "else" Test] | LambdaDef; TestNoCond: OrTest | LambdaDefNoCond; LambdaDef: "lambda" [VarArgsList] ":" Test; LambdaDefNoCond: "lambda" [VarArgsList] ":" TestNoCond; OrTest: AndTest ("or" AndTest)*; AndTest: NotTest ("and" NotTest)*; NotTest: "not" NotTest | Comparison; Comparison: Expr (CompOp Expr)*; // <> isn't actually a valid comparison operator in Python. It's here for the // sake of a __future__ import described in PEP 401 (which really works :-) CompOp: "<"|">"|"=="|">="|"<="|"<>"|"!="|"in"|"not" "in"|"is"|"is" "not"; StarExpr: "*" Expr; Expr: XorExpr ("|" XorExpr)*; XorExpr: AndExpr ("^" AndExpr)*; AndExpr: ShiftExpr ("&" ShiftExpr)*; ShiftExpr: ArithExpr (("<<"|">>") ArithExpr)*; ArithExpr: Term (("+"|"-") Term)*; Term: Factor (("*"|"@"|"/"|"%"|"//") Factor)*; Factor: ("+"|"-"|"~") Factor | Power; Power: AtomExpr ["**" Factor]; AtomExpr: ["await"] Atom Trailer*; Atom: ("(" [YieldExpr|TestListComp] ")" | "[" [TestListComp] "]" | "{" [DictOrSetMaker] "}" | "NAME" | "NUMBER" | "STRING"+ | "..." | "None" | "True" | "False"); TestListComp: (NamedExprTest|StarExpr) ( CompFor | ("," (NamedExprTest|StarExpr))* [","] ); Trailer: "(" [ArgList] ")" | "[" SubScriptList "]" | "." "NAME"; SubScriptList: SubScript ("," SubScript)* [","]; SubScript: Test | [Test] ":" [Test] [SliceOp]; SliceOp: ":" [Test]; ExprList: (Expr|StarExpr) ("," (Expr|StarExpr))* [","]; TestList: Test ("," Test)* [","]; DictOrSetMaker: ( ((Test ":" Test | "**" Expr) (CompFor | ("," (Test ":" Test | "**" Expr))* [","])) | ((Test | StarExpr) (CompFor | ("," (Test | StarExpr))* [","])) ); ClassDef: "class" "NAME" ["(" [ArgList] ")"] ":" Suite; ArgList: Argument ("," Argument)* [","]; // The reason that keywords are test nodes instead of "NAME" is that using "NAME" // results in an ambiguity. ast.c makes sure it's a "NAME". // "test "=" test" is really "keyword "=" test", but we have no such token. // These need to be in a single rule to avoid grammar that is ambiguous // to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, // we explicitly match '*' here, too, to give it proper precedence. // Illegal combinations and orderings are blocked in ast.c: // multiple (test comp_for) arguments are blocked; keyword unpackings // that precede iterable unpackings are blocked; etc. Argument: ( Test [CompFor] | Test ":=" Test | Test "=" Test | "**" Test | "*" Test ); CompIter: CompFor | CompIf; SyncCompFor: "for" ExprList "in" OrTest [CompIter]; CompFor: ["async"] SyncCompFor; CompIf: "if" TestNoCond [CompIter]; // not used in grammar, but may appear in "node" passed from Parser to Compiler EncodingDecl: "NAME"; YieldExpr: "yield" [YieldArg]; YieldArg: "from" Test | TestListStarExpr; FuncBodySuite: SimpleStmt | "NEWLINE" "INDENT" Stmt+ "DEDENT"; FuncTypeInput: FuncType "NEWLINE"* "ENDMARKER"; FuncType: "(" [TypeList] ")" "->" Test; // typelist is a modified typedargslist (see above) TypeList: (Test ("," Test)* ["," ["*" [Test] ("," Test)* ["," "**" Test] | "**" Test]] | "*" [Test] ("," Test)* ["," "**" Test] | "**" Test); } #[cfg(test)] mod tests { use super::*; use lexer::tok; #[test] fn it_works() { let actual = next_state(NonTerminal::SingleInput, 0, &tok!("\n")); if let Next::Terminal(state, accept) = actual.unwrap() { assert_ne!(state, 0); assert_eq!(accept, true); } else { panic!("expected terminal"); } let actual = next_state(NonTerminal::SingleInput, 0, &tok!("not")); if let Next::NonTerminal(nt, state, accept) = actual.unwrap() { assert_ne!(state, 0); assert_eq!(nt, NonTerminal::SimpleStmt); assert_eq!(accept, true); } else { panic!("expected non terminal"); } let actual = next_state(NonTerminal::SingleInput, 0, &tok!("if")); if let Next::NonTerminal(nt, state, accept) = actual.unwrap() { assert_ne!(state, 0); assert_eq!(nt, NonTerminal::CompoundStmt); assert_eq!(accept, false); } else { panic!("expected non terminal"); } } }
41.47907
113
0.546871
61b9e8d038fcbe2adda30ba789d08a6159b13166
570
mod bounds_tree; mod naive; pub use self::bounds_tree::BoundsTreeBroadPhase; pub use self::naive::NaiveBroadPhase; use crate::collision::ContactConstraint; use crate::world::{Bodies, Body, ConstraintsMap}; pub type ProxyId = usize; pub trait BroadPhase { fn new_potential_pairs( &self, bodies: &Bodies, constraints: &mut ConstraintsMap<ContactConstraint>, ); fn create_proxy(&mut self, body: &Body) -> ProxyId; fn destroy_proxy(&mut self, proxy_id: ProxyId); fn update_proxy(&mut self, proxy_id: ProxyId, body: &Body); }
24.782609
63
0.701754
e8f71c18d3c07195bd96a3df1e683c60ecd31278
8,971
// Copyright 2020 Rob Patro, Avi Srivastava. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. extern crate bio_types; extern crate fasthash; extern crate quickersort; use crate as libradicl; use bio_types::strand::Strand; use fasthash::{sea, RandomState}; use std::collections::HashMap; use std::fmt; use std::str::FromStr; /** * Single-cell equivalence class **/ #[derive(Debug)] pub struct CellEQClass<'a> { // transcripts defining this eq. class pub transcripts: &'a Vec<u32>, // umis with multiplicities // the k-mer should be a k-mer class eventually pub umis: Vec<(u64, u32)>, } #[derive(Debug)] pub(super) struct EqMapEntry { pub umis: Vec<(u64, u32)>, pub eq_num: u32, } #[derive(Debug)] pub struct ProtocolInfo { // TODO: only makes sense // for single-strand protocols // right now. Expand to be generic. pub expected_ori: Strand, } #[derive(PartialEq, Debug, Clone, Copy)] pub enum ResolutionStrategy { Trivial, CellRangerLike, CellRangerLikeEM, Full, Parsimony, } impl fmt::Display for ResolutionStrategy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } } // Implement the trait impl FromStr for ResolutionStrategy { type Err = &'static str; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "trivial" => Ok(ResolutionStrategy::Trivial), "cr-like" => Ok(ResolutionStrategy::CellRangerLike), "cr-like-em" => Ok(ResolutionStrategy::CellRangerLikeEM), "full" => Ok(ResolutionStrategy::Full), "parsimony" => Ok(ResolutionStrategy::Parsimony), _ => Err("no match"), } } } pub(super) struct EqMap { // for each equivalence class, holds the (umi, freq) pairs // and the id of that class pub eqc_info: Vec<EqMapEntry>, // the total number of refrence targets pub nref: u32, // the total size of the list of all reference // ids over all equivalence classes that occur in this // cell pub label_list_size: usize, // concatenated lists of the labels of all equivalence classes pub eq_labels: Vec<u32>, // vector that deliniates where each equivalence class label // begins and ends. The label for equivalence class i begins // at offset eq_label_starts[i], and it ends at // eq_label_starts[i+1]. The length of this vector is 1 greater // than the number of equivalence classes. pub eq_label_starts: Vec<u32>, // the number of equivalence class labels in which each reference // appears pub label_counts: Vec<u32>, // the offset into the global list of equivalence class ids // where the equivalence class list for each reference starts pub ref_offsets: Vec<u32>, // the concatenated list of all equivalence class labels for // all transcripts pub ref_labels: Vec<u32>, } impl EqMap { pub(super) fn num_eq_classes(&self) -> usize { self.eqc_info.len() } #[allow(dead_code)] pub(super) fn clear(&mut self) { self.eqc_info.clear(); // keep nref self.label_list_size = 0usize; self.eq_labels.clear(); self.eq_label_starts.clear(); // clear the label_counts, but resize // and fill with 0 self.label_counts.clear(); self.label_counts.resize(self.nref as usize, 0u32); self.ref_offsets.clear(); self.ref_labels.clear(); } pub(super) fn new(nref_in: u32) -> EqMap { EqMap { eqc_info: vec![], //HashMap::with_hasher(rs), nref: nref_in, label_list_size: 0usize, eq_labels: vec![], eq_label_starts: vec![], label_counts: vec![0; nref_in as usize], ref_offsets: vec![], ref_labels: vec![], } } pub(super) fn fill_ref_offsets(&mut self) { self.ref_offsets = self .label_counts .iter() .scan(0, |sum, i| { *sum += i; Some(*sum) }) .collect::<Vec<_>>(); self.ref_offsets.push(*self.ref_offsets.last().unwrap()); } pub(super) fn fill_label_sizes(&mut self) { self.ref_labels = vec![u32::MAX; self.label_list_size + 1]; } pub(super) fn init_from_chunk(&mut self, cell_chunk: &mut libradicl::Chunk) { // temporary map of equivalence class label to assigned // index. let s = RandomState::<sea::Hash64>::new(); let mut eqid_map: HashMap<Vec<u32>, u32, fasthash::RandomState<fasthash::sea::Hash64>> = HashMap::with_hasher(s); // gather the equivalence class info for r in &mut cell_chunk.reads { // TODO: ensure this is done upstream so we // don't have to do it here. // NOTE: should be done if collate was run. // r.refs.sort(); match eqid_map.get_mut(&r.refs) { // if we've seen this equivalence class before, just add the new // umi. Some(v) => { self.eqc_info[*v as usize].umis.push((r.umi, 1)); } // otherwise, add the new umi, but we also have some extra bookkeeping None => { // each reference in this equivalence class label // will have to point to this equivalence class id let eq_num = self.eqc_info.len() as u32; self.label_list_size += r.refs.len(); for r in r.refs.iter() { let ridx = *r as usize; self.label_counts[ridx] += 1; //ref_to_eqid[*r as usize].push(eq_num); } self.eq_label_starts.push(self.eq_labels.len() as u32); self.eq_labels.extend(&r.refs); self.eqc_info.push(EqMapEntry { umis: vec![(r.umi, 1)], eq_num, }); eqid_map.insert(r.refs.clone(), eq_num); //self.eqc_map.insert(r.refs.clone(), EqMapEntry { umis : vec![(r.umi,1)], eq_num }); } } } // final value to avoid special cases self.eq_label_starts.push(self.eq_labels.len() as u32); self.fill_ref_offsets(); self.fill_label_sizes(); // initially we inserted duplicate UMIs // here, collapse them and keep track of their count for idx in 0..self.num_eq_classes() { //} self.eqc_info.iter_mut().enumerate() { // for each reference in this // label, put it in the next free spot // TODO: @k3yavi, can we avoid this copy? let label = self.refs_for_eqc(idx as u32).to_vec(); //println!("{:?}", label); for r in label { // k.iter() { self.ref_offsets[r as usize] -= 1; self.ref_labels[self.ref_offsets[r as usize] as usize] = self.eqc_info[idx].eq_num; } let v = &mut self.eqc_info[idx]; // sort so dups are adjacent quickersort::sort(&mut v.umis[..]); // we need a copy of the vector b/c we // can't easily modify it in place // at least I haven't seen how (@k3yavi, help here if you can). let cv = v.umis.clone(); // since we have a copy, clear the original to fill it // with the new contents. v.umis.clear(); let mut count = 1; let mut cur_elem = cv.first().unwrap().0; for e in cv.iter().skip(1) { if e.0 == cur_elem { count += 1; } else { v.umis.push((cur_elem, count)); cur_elem = e.0; count = 1; } } // remember to push the last element, since we // won't see a subsequent "different" element. v.umis.push((cur_elem, count)); } } pub(super) fn eq_classes_containing(&self, r: u32) -> &[u32] { &self.ref_labels [(self.ref_offsets[r as usize] as usize)..(self.ref_offsets[(r + 1) as usize] as usize)] } pub(super) fn refs_for_eqc(&self, idx: u32) -> &[u32] { &self.eq_labels[(self.eq_label_starts[idx as usize] as usize) ..(self.eq_label_starts[(idx + 1) as usize] as usize)] } } #[derive(Debug)] pub(super) enum PUGEdgeType { NoEdge, BiDirected, XToY, YToX, } #[derive(Debug)] pub(super) struct PUGResolutionStatistics { pub used_alternative_strategy: bool, pub total_mccs: u64, pub ambiguous_mccs: u64, pub trivial_mccs: u64, }
33.225926
105
0.559581
4a30a70bfc9bb9109bbb7a180393c986cefbad51
4,030
#![cfg_attr(not(feature = "std"), no_std)] use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch, traits::Get}; use frame_support::inherent::Vec; use frame_system::ensure_signed; #[cfg(test)] mod mock; #[cfg(test)] mod tests; //mod specifications; /// Configure the pallet by specifying the parameters and types on which it depends. pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type Event: From<Event<Self>> + Into<<Self as frame_system::Config>::Event>; } // The pallet's runtime storage items. // https://substrate.dev/docs/en/knowledgebase/runtime/storage decl_storage! { trait Store for Module<T: Config> as IdentityModule { pub Identity get(fn get_identity): map hasher(blake2_128_concat) Vec<u8> => Option<T::AccountId>; // ( identity, attribute_key ) => attribute_value pub Attribute get(fn get_attribute): map hasher(blake2_128_concat) (Vec<u8>, Vec<u8>) => Vec<u8>; } } // Pallets use events to inform users when important changes are made. // https://substrate.dev/docs/en/knowledgebase/runtime/events decl_event!( pub enum Event<T> where AccountId = <T as frame_system::Config>::AccountId { IdentityCreated(Vec<u8>, AccountId), // Identity, Attribute Key, Attribute Value AttributeAdded(Vec<u8>, Vec<u8>, Vec<u8>), // Identity, Attribute Key AttributeRemoved(Vec<u8>, Vec<u8>), } ); // Errors inform users that something went wrong. decl_error! { pub enum Error for Module<T: Config> { IdentityAlreadyClaimed, IdentityNotFound, NotAuthorized, AttributeNotFound, } } decl_module! { pub struct Module<T: Config> for enum Call where origin: T::Origin { // Errors must be initialized if they are used by the pallet. type Error = Error<T>; // Events must be initialized if they are used by the pallet. fn deposit_event() = default; #[weight = 10_000 + T::DbWeight::get().reads_writes(1, 1)] pub fn create_identity( origin, identity: Vec<u8> ) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; match <Identity<T>>::get(&identity) { // Return an error if signer is not identity owner None => { // Update storage. <Identity<T>>::insert(&identity, &who); // Emit an event. Self::deposit_event(RawEvent::IdentityCreated(identity, who)); // Return a successful DispatchResult Ok(()) }, Some(_) => Err(Error::<T>::IdentityAlreadyClaimed)? } } // Allows identity owners to add attribute to their identity (key, value) #[weight = 10_000 + T::DbWeight::get().reads_writes(1,1)] pub fn add_attribute( origin, identity: Vec<u8>, attribute_key: Vec<u8>, attribute_value: Vec<u8> ) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; // Read a value from storage. match <Identity<T>>::get(&identity) { // Return an error if signer is not identity owner None => Err(Error::<T>::IdentityNotFound)?, Some(address) => { if address != who { return Err(Error::<T>::NotAuthorized)? } else{ Attribute::insert((&identity, &attribute_key), &attribute_value); Self::deposit_event(RawEvent::AttributeAdded(identity, attribute_key, attribute_value)); Ok(()) } }, } } // Allows identity owners to remove identity #[weight = 10_000 + T::DbWeight::get().reads_writes(1,1)] pub fn remove_attribute( origin, identity: Vec<u8>, attribute_key: Vec<u8>, ) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; // Read a value from storage. match <Identity<T>>::get(&identity) { // Return an error if signer is not identity owner None => Err(Error::<T>::IdentityNotFound)?, Some(address) => { if address != who { return Err(Error::<T>::NotAuthorized)? } else{ Attribute::remove((&identity, &attribute_key)); Self::deposit_event(RawEvent::AttributeRemoved(identity, attribute_key)); Ok(()) } }, } } } }
29.202899
99
0.673449
38cab7e149a3d2863ccc6e36462b5c12c06c58fe
581
#[allow(dead_code)] pub fn next_call_by(account: &ink_env::AccountId) { // Get contract address. let callee = ink_env::account_id::<ink_env::DefaultEnvironment>().unwrap_or([0x0; 32].into()); // Create call. let mut data = ink_env::test::CallData::new(ink_env::call::Selector::new([0x00; 4])); data.push_arg(account); // Push the new execution context to set from as caller. ink_env::test::push_execution_context::<ink_env::DefaultEnvironment>( account.clone(), callee, 1000000, 1000000, data, ); }
29.05
89
0.629948
eb9e84220ed07f2b6cdb751c916fc1e0e1813130
551
use std::ops::Add; pub trait Encoder { type Size: Add<Output = Self::Size>; fn foo(&self) -> Self::Size; } pub trait SubEncoder : Encoder { type ActualSize; fn bar(&self) -> Self::Size; } impl<T> Encoder for T where T: SubEncoder { type Size = <Self as SubEncoder>::ActualSize; fn foo(&self) -> Self::Size { self.bar() + self.bar() } } pub struct UnitEncoder; impl SubEncoder for UnitEncoder { type ActualSize = (); fn bar(&self) {} } fn main() { fun(&UnitEncoder {}); } pub fn fun<R: Encoder>(encoder: &R) { encoder.foo(); }
14.128205
46
0.629764
38645fa66acff15a7064d5833efde4e0c04fd6c7
1,677
//! Minimal 1d solver //! //! Two functions are provided for the cases where the derivative is provided or not : //! - solver1d //! - solver1d_fd (fd stands for finite differences) //! //! # Examples //! ``` //! use util::solver_one_dimensional::{solver1d, solver1d_fd}; //! //! pub fn square2(x: f64) -> f64 { //! x.powi(2)-2.0 //! } //! pub fn dsquare(x: f64) -> f64 { //! 2.0*x //! } //! //! let x1 = solver1d(1.0, square2, dsquare, 50, 1e-6); //! let x2 = solver1d_fd(1.0, square2, 50, 1e-6, 1e-8); //! let x_sol = std::f64::consts::SQRT_2; //! //! println!("{}, {}", x1, x2); //! assert!((x1 - x_sol).abs() < 1e-5); //! assert!((x2 - x_sol).abs() < 1e-5);; //! ``` pub fn solver1d( init_guess: f64, func: fn(f64) -> f64, deriv: fn(f64) -> f64, max_iter: usize, tol: f64, ) -> f64 { let mut iter = 0; let mut res = func(init_guess); let mut error = res.abs(); let mut guess = init_guess; while error > tol && iter < max_iter { iter += 1; guess -= res / deriv(guess); res = func(guess); error = res.abs(); } guess } pub fn solver1d_fd( init_guess: f64, func: fn(f64) -> f64, max_iter: usize, tol: f64, dx: f64, ) -> f64 { let mut iter = 0; let mut res = func(init_guess); let mut error = res.abs(); let mut guess = init_guess; while error > tol && iter < max_iter { iter += 1; guess -= res / finite_diff(guess, res, func, dx); res = func(guess); error = res.abs(); } guess } fn finite_diff(x: f64, f_ref: f64, func: fn(f64) -> f64, dx: f64) -> f64 { let fx = func(x + dx); (fx - f_ref) / dx }
22.972603
86
0.533095
33f54d824efc6068660a5e30c953444bd273db7a
16,247
// Copyright 2015 The tiny-http Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use common::{Header, HTTPVersion, StatusCode, HTTPDate}; use std::ascii::AsciiExt; use std::cmp::Ordering; use std::sync::mpsc::Receiver; use std::io::{self, Read, Write, Cursor}; use std::io::Result as IoResult; use std::fs::File; use std::str::FromStr; /// Object representing an HTTP response whose purpose is to be given to a `Request`. /// /// Some headers cannot be changed. Trying to define the value /// of one of these will have no effect: /// /// - `Accept-Ranges` /// - `Connection` /// - `Content-Range` /// - `Trailer` /// - `Transfer-Encoding` /// - `Upgrade` /// /// Some headers have special behaviors: /// /// - `Content-Encoding`: If you define this header, the library /// will assume that the data from the `Read` object has the specified encoding /// and will just pass-through. /// /// - `Content-Length`: The length of the data should be set manually /// using the `Reponse` object's API. Attempting to set the value of this /// header will be equivalent to modifying the size of the data but the header /// itself may not be present in the final result. /// pub struct Response<R> where R: Read { reader: R, status_code: StatusCode, headers: Vec<Header>, data_length: Option<usize>, chunked_threshold: Option<usize> } /// A `Response` without a template parameter. pub type ResponseBox = Response<Box<Read + Send>>; /// Transfer encoding to use when sending the message. /// Note that only *supported* encoding are listed here. #[derive(Copy, Clone)] enum TransferEncoding { Identity, Chunked, } impl FromStr for TransferEncoding { type Err = (); fn from_str(input: &str) -> Result<TransferEncoding, ()> { if input.eq_ignore_ascii_case("identity") { Ok(TransferEncoding::Identity) } else if input.eq_ignore_ascii_case("chunked") { Ok(TransferEncoding::Chunked) } else { Err(()) } } } /// Builds a Date: header with the current date. fn build_date_header() -> Header { let d = HTTPDate::new(); Header::from_bytes(&b"Date"[..], &d.to_string().into_bytes()[..]).unwrap() } fn write_message_header<W>(mut writer: W, http_version: &HTTPVersion, status_code: &StatusCode, headers: &[Header]) -> IoResult<()> where W: Write { // writing status line try!(write!(&mut writer, "HTTP/{}.{} {} {}\r\n", http_version.0, http_version.1, status_code.0, status_code.default_reason_phrase() )); // writing headers for header in headers.iter() { try!(writer.write_all(header.field.as_str().as_ref())); try!(write!(&mut writer, ": ")); try!(writer.write_all(header.value.as_str().as_ref())); try!(write!(&mut writer, "\r\n")); } // separator between header and data try!(write!(&mut writer, "\r\n")); Ok(()) } fn choose_transfer_encoding(request_headers: &[Header], http_version: &HTTPVersion, entity_length: &Option<usize>, has_additional_headers: bool, chunked_threshold: usize) -> TransferEncoding { use util; // HTTP 1.0 doesn't support other encoding if *http_version <= (1, 0) { return TransferEncoding::Identity; } // parsing the request's TE header let user_request = request_headers.iter() // finding TE .find(|h| h.field.equiv(&"TE")) // getting its value .map(|h| h.value.clone()) // getting the corresponding TransferEncoding .and_then(|value| { // getting list of requested elements let mut parse = util::parse_header_value(value.as_str()); // TODO: remove conversion // sorting elements by most priority parse.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(Ordering::Equal)); // trying to parse each requested encoding for value in parse.iter() { // q=0 are ignored if value.1 <= 0.0 { continue } match <TransferEncoding as FromStr>::from_str(value.0) { Ok(te) => return Some(te), _ => () // unrecognized/unsupported encoding }; } // encoding not found None }); // if let Some(user_request) = user_request { return user_request; } // if we have additional headers, using chunked if has_additional_headers { return TransferEncoding::Chunked; } // if we don't have a Content-Length, or if the Content-Length is too big, using chunks writer if entity_length.as_ref().map_or(true, |val| *val >= chunked_threshold) { return TransferEncoding::Chunked; } // Identity by default TransferEncoding::Identity } impl<R> Response<R> where R: Read { /// Creates a new Response object. /// /// The `additional_headers` argument is a receiver that /// may provide headers even after the response has been sent. /// /// All the other arguments are straight-forward. pub fn new(status_code: StatusCode, headers: Vec<Header>, data: R, data_length: Option<usize>, additional_headers: Option<Receiver<Header>>) -> Response<R> { let mut response = Response { reader: data, status_code: status_code, headers: Vec::with_capacity(16), data_length: data_length, chunked_threshold: None, }; for h in headers { response.add_header(h) } // dummy implementation if let Some(additional_headers) = additional_headers { for h in additional_headers.iter() { response.add_header(h) } } response } /// Set a threshold for `Content-Length` where we chose chunked /// transfer. Notice that chunked transfer might happen regardless of /// this threshold, for instance when the request headers indicate /// it is wanted or when there is no `Content-Length`. pub fn with_chunked_threshold(mut self, length: usize) -> Response<R>{ self.chunked_threshold = Some(length); self } /// The current `Content-Length` threshold for switching over to /// chunked transfer. The default is 32768 bytes. Notice that /// chunked transfer is mutually exclusive with sending a /// `Content-Length` header as per the HTTP spec. pub fn chunked_threshold(&self) -> usize { self.chunked_threshold.unwrap_or(32768) } /// Adds a header to the list. /// Does all the checks. pub fn add_header<H>(&mut self, header: H) where H: Into<Header> { let header = header.into(); // ignoring forbidden headers if header.field.equiv(&"Accept-Ranges") || header.field.equiv(&"Connection") || header.field.equiv(&"Content-Range") || header.field.equiv(&"Trailer") || header.field.equiv(&"Transfer-Encoding") || header.field.equiv(&"Upgrade") { return; } // if the header is Content-Length, setting the data length if header.field.equiv(&"Content-Length") { match <usize as FromStr>::from_str(header.value.as_str()) { Ok(val) => self.data_length = Some(val), Err(_) => () // wrong value for content-length }; return; } self.headers.push(header); } /// Returns the same request, but with an additional header. /// /// Some headers cannot be modified and some other have a /// special behavior. See the documentation above. #[inline] pub fn with_header<H>(mut self, header: H) -> Response<R> where H: Into<Header> { self.add_header(header.into()); self } /// Returns the same request, but with a different status code. #[inline] pub fn with_status_code<S>(mut self, code: S) -> Response<R> where S: Into<StatusCode> { self.status_code = code.into(); self } /// Returns the same request, but with different data. pub fn with_data<S>(self, reader: S, data_length: Option<usize>) -> Response<S> where S: Read { Response { reader: reader, headers: self.headers, status_code: self.status_code, data_length: data_length, chunked_threshold: None, } } /// Prints the HTTP response to a writer. /// /// This function is the one used to send the response to the client's socket. /// Therefore you shouldn't expect anything pretty-printed or even readable. /// /// The HTTP version and headers passed as arguments are used to /// decide which features (most notably, encoding) to use. /// /// Note: does not flush the writer. pub fn raw_print<W: Write>(mut self, mut writer: W, http_version: HTTPVersion, request_headers: &[Header], do_not_send_body: bool, upgrade: Option<&str>) -> IoResult<()> { let mut transfer_encoding = Some(choose_transfer_encoding(request_headers, &http_version, &self.data_length, false /* TODO */, self.chunked_threshold())); // add `Date` if not in the headers if self.headers.iter().find(|h| h.field.equiv(&"Date")).is_none() { self.headers.insert(0, build_date_header()); } // add `Server` if not in the headers if self.headers.iter().find(|h| h.field.equiv(&"Server")).is_none() { self.headers.insert(0, Header::from_bytes(&b"Server"[..], &b"tiny-http (Rust)"[..]).unwrap() ); } // handling upgrade if let Some(upgrade) = upgrade { self.headers.insert(0, Header::from_bytes(&b"Upgrade"[..], upgrade.as_bytes()).unwrap()); self.headers.insert(0, Header::from_bytes(&b"Connection"[..], &b"upgrade"[..]).unwrap()); transfer_encoding = None; } // if the transfer encoding is identity, the content length must be known ; therefore if // we don't know it, we buffer the entire response first here // while this is an expensive operation, it is only ever needed for clients using HTTP 1.0 let (mut reader, data_length) = match (self.data_length, transfer_encoding) { (Some(l), _) => (Box::new(self.reader) as Box<Read>, Some(l)), (None, Some(TransferEncoding::Identity)) => { let mut buf = Vec::new(); try!(self.reader.read_to_end(&mut buf)); let l = buf.len(); (Box::new(Cursor::new(buf)) as Box<Read>, Some(l)) }, _ => (Box::new(self.reader) as Box<Read>, None), }; // checking whether to ignore the body of the response let do_not_send_body = do_not_send_body || match self.status_code.0 { // sattus code 1xx, 204 and 304 MUST not include a body 100...199 | 204 | 304 => true, _ => false }; // preparing headers for transfer match transfer_encoding { Some(TransferEncoding::Chunked) => { self.headers.push( Header::from_bytes(&b"Transfer-Encoding"[..], &b"chunked"[..]).unwrap() ) }, Some(TransferEncoding::Identity) => { assert!(data_length.is_some()); let data_length = data_length.unwrap(); self.headers.push( Header::from_bytes(&b"Content-Length"[..], format!("{}", data_length).as_bytes()).unwrap() ) }, _ => () }; // sending headers try!(write_message_header(writer.by_ref(), &http_version, &self.status_code, &self.headers)); // sending the body if !do_not_send_body { match transfer_encoding { Some(TransferEncoding::Chunked) => { use chunked_transfer::Encoder; let mut writer = Encoder::new(writer); try!(io::copy(&mut reader, &mut writer)); }, Some(TransferEncoding::Identity) => { use util::EqualReader; assert!(data_length.is_some()); let data_length = data_length.unwrap(); if data_length >= 1 { let (mut equ_reader, _) = EqualReader::new(reader.by_ref(), data_length); try!(io::copy(&mut equ_reader, &mut writer)); } }, _ => () } } Ok(()) } } impl<R> Response<R> where R: Read + Send + 'static { /// Turns this response into a `Response<Box<Read + Send>>`. pub fn boxed(self) -> ResponseBox { Response { reader: Box::new(self.reader) as Box<Read + Send>, status_code: self.status_code, headers: self.headers, data_length: self.data_length, chunked_threshold: None, } } } impl Response<File> { /// Builds a new `Response` from a `File`. /// /// The `Content-Type` will **not** be automatically detected, /// you must set it yourself. pub fn from_file(file: File) -> Response<File> { let file_size = file.metadata().ok().map(|v| v.len() as usize); Response::new( StatusCode(200), Vec::with_capacity(0), file, file_size, None, ) } } impl Response<Cursor<Vec<u8>>> { pub fn from_data<D>(data: D) -> Response<Cursor<Vec<u8>>> where D: Into<Vec<u8>> { let data = data.into(); let data_len = data.len(); Response::new( StatusCode(200), Vec::with_capacity(0), Cursor::new(data), Some(data_len), None, ) } pub fn from_string<S>(data: S) -> Response<Cursor<Vec<u8>>> where S: Into<String> { let data = data.into(); let data_len = data.len(); Response::new( StatusCode(200), vec![ Header::from_bytes(&b"Content-Type"[..], &b"text/plain; charset=UTF-8"[..]).unwrap() ], Cursor::new(data.into_bytes()), Some(data_len), None, ) } } impl Response<io::Empty> { /// Builds an empty `Response` with the given status code. pub fn empty<S>(status_code: S) -> Response<io::Empty> where S: Into<StatusCode> { Response::new( status_code.into(), Vec::with_capacity(0), io::empty(), Some(0), None, ) } /// DEPRECATED. Use `empty` instead. pub fn new_empty(status_code: StatusCode) -> Response<io::Empty> { Response::empty(status_code) } } impl Clone for Response<io::Empty> { fn clone(&self) -> Response<io::Empty> { Response { reader: io::empty(), status_code: self.status_code.clone(), headers: self.headers.clone(), data_length: self.data_length.clone(), chunked_threshold: self.chunked_threshold.clone(), } } }
33.022358
110
0.565704
f5cecc739a3a183f90a04d2ef093355e9b752a20
6,065
#![allow(non_snake_case)] use either::Either; use futures::{Stream, StreamExt}; use serde::de::DeserializeOwned; use std::marker::PhantomData; use crate::api::resource::{KubeObject, Object, ObjectList, WatchEvent}; use crate::api::{DeleteParams, ListParams, LogParams, PatchParams, PostParams, RawApi}; use crate::client::{APIClient, Status}; use crate::Result; /// A typed Api variant that does not expose request internals /// /// The upsides of working with this rather than `RawApi` direct are: /// - easiers interface (no figuring out return types) /// - openapi types for free /// /// But the downsides are: /// - k8s-openapi dependency required (behind feature) /// - openapi types are unnecessarily heavy on Option use /// - memory intensive structs because they contain the full data /// - no control over requests (opinionated) #[derive(Clone)] pub struct Api<K> { /// The request creator object pub(in crate::api) api: RawApi, /// The client to use (from this library) pub(in crate::api) client: APIClient, /// sPec and statUs structs pub(in crate::api) phantom: PhantomData<K>, } /// Expose same interface as Api for controlling scope/group/versions/ns impl<K> Api<K> { pub fn within(mut self, ns: &str) -> Self { self.api = self.api.within(ns); self } pub fn group(mut self, group: &str) -> Self { self.api = self.api.group(group); self } pub fn version(mut self, version: &str) -> Self { self.api = self.api.version(version); self } } /// PUSH/PUT/POST/GET abstractions impl<K> Api<K> where K: Clone + DeserializeOwned + KubeObject, { pub async fn get(&self, name: &str) -> Result<K> { let req = self.api.get(name)?; self.client.request::<K>(req).await } pub async fn create(&self, pp: &PostParams, data: Vec<u8>) -> Result<K> { let req = self.api.create(&pp, data)?; self.client.request::<K>(req).await } pub async fn delete(&self, name: &str, dp: &DeleteParams) -> Result<Either<K, Status>> { let req = self.api.delete(name, &dp)?; self.client.request_status::<K>(req).await } pub async fn list(&self, lp: &ListParams) -> Result<ObjectList<K>> { let req = self.api.list(&lp)?; self.client.request::<ObjectList<K>>(req).await } pub async fn delete_collection( &self, lp: &ListParams, ) -> Result<Either<ObjectList<K>, Status>> { let req = self.api.delete_collection(&lp)?; self.client.request_status::<ObjectList<K>>(req).await } pub async fn patch(&self, name: &str, pp: &PatchParams, patch: Vec<u8>) -> Result<K> { let req = self.api.patch(name, &pp, patch)?; self.client.request::<K>(req).await } pub async fn replace(&self, name: &str, pp: &PostParams, data: Vec<u8>) -> Result<K> { let req = self.api.replace(name, &pp, data)?; self.client.request::<K>(req).await } pub async fn watch( &self, lp: &ListParams, version: &str, ) -> Result<impl Stream<Item = WatchEvent<K>>> { let req = self.api.watch(&lp, &version)?; self.client .request_events::<WatchEvent<K>>(req) .await .map(|stream| stream.filter_map(|e| async move { e.ok() })) } pub async fn get_status(&self, name: &str) -> Result<K> { let req = self.api.get_status(name)?; self.client.request::<K>(req).await } pub async fn patch_status(&self, name: &str, pp: &PatchParams, patch: Vec<u8>) -> Result<K> { let req = self.api.patch_status(name, &pp, patch)?; self.client.request::<K>(req).await } pub async fn replace_status(&self, name: &str, pp: &PostParams, data: Vec<u8>) -> Result<K> { let req = self.api.replace_status(name, &pp, data)?; self.client.request::<K>(req).await } } /// Marker trait for objects that has logs pub trait LoggingObject {} impl<K> Api<K> where K: Clone + DeserializeOwned + KubeObject + LoggingObject, { pub async fn log(&self, name: &str, lp: &LogParams) -> Result<String> { let req = self.api.log(name, lp)?; Ok(self.client.request_text(req).await?) } pub async fn log_stream(&self, name: &str, lp: &LogParams) -> Result<impl Stream<Item = Result<Vec<u8>>>> { let req = self.api.log(name, lp)?; Ok(self.client.request_text_stream(req).await?) } } /// Scale spec from api::autoscaling::v1 #[derive(Deserialize, Serialize, Clone, Debug)] pub struct ScaleSpec { pub replicas: Option<i32>, } /// Scale status from api::autoscaling::v1 #[derive(Deserialize, Serialize, Clone, Default, Debug)] pub struct ScaleStatus { pub replicas: i32, pub selector: Option<String>, } pub type Scale = Object<ScaleSpec, ScaleStatus>; /// Scale subresource /// /// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#scale-subresource impl<K> Api<K> where K: Clone + DeserializeOwned, { pub async fn get_scale(&self, name: &str) -> Result<Scale> { let req = self.api.get_scale(name)?; self.client.request::<Scale>(req).await } pub async fn patch_scale(&self, name: &str, pp: &PatchParams, patch: Vec<u8>) -> Result<Scale> { let req = self.api.patch_scale(name, &pp, patch)?; self.client.request::<Scale>(req).await } pub async fn replace_scale(&self, name: &str, pp: &PostParams, data: Vec<u8>) -> Result<Scale> { let req = self.api.replace_scale(name, &pp, data)?; self.client.request::<Scale>(req).await } } /// Api Constructor for CRDs /// /// Because it relies entirely on user definitions, this ctor does not rely on openapi. impl<K> Api<K> where K: Clone + DeserializeOwned, { pub fn customResource(client: APIClient, name: &str) -> Self { Self { api: RawApi::customResource(name), client, phantom: PhantomData, } } } // all other native impls in openapi.rs
33.508287
122
0.624567
4bd3802b85fb96a0e79fec43ffcdb6aab455f0a2
7,064
use core::marker::PhantomData; use crate::machine::machine_indices::*; use crate::machine::raw_block::*; use std::mem; use std::ops::{Index, IndexMut}; use std::ptr; #[derive(Debug)] struct StackTraits {} impl RawBlockTraits for StackTraits { #[inline] fn init_size() -> usize { 10 * 1024 * 1024 } #[inline] fn align() -> usize { mem::align_of::<Addr>() } #[inline] fn base_offset(base: *const u8) -> *const u8 { unsafe { base.offset(Self::align() as isize) } } } const fn prelude_size<Prelude>() -> usize { let size = mem::size_of::<Prelude>(); let align = mem::align_of::<Addr>(); (size & !(align - 1)) + align } #[derive(Debug)] pub struct Stack { buf: RawBlock<StackTraits>, _marker: PhantomData<Addr>, } impl Drop for Stack { fn drop(&mut self) { self.drop_in_place(); self.buf.deallocate(); } } #[derive(Debug, Clone, Copy)] pub struct FramePrelude { pub num_cells: usize, } #[derive(Debug)] pub struct AndFramePrelude { pub univ_prelude: FramePrelude, pub e: usize, pub cp: LocalCodePtr, pub interrupt_cp: LocalCodePtr, } #[derive(Debug)] pub struct AndFrame { pub prelude: AndFramePrelude, } impl AndFrame { pub fn size_of(num_cells: usize) -> usize { prelude_size::<AndFramePrelude>() + num_cells * mem::size_of::<Addr>() } } impl Index<usize> for AndFrame { type Output = Addr; fn index(&self, index: usize) -> &Self::Output { let prelude_offset = prelude_size::<AndFramePrelude>(); let index_offset = (index - 1) * mem::size_of::<Addr>(); unsafe { let ptr = mem::transmute::<&AndFrame, *const u8>(self); let ptr = ptr as usize + prelude_offset + index_offset; &*(ptr as *const Addr) } } } impl IndexMut<usize> for AndFrame { fn index_mut(&mut self, index: usize) -> &mut Self::Output { let prelude_offset = prelude_size::<AndFramePrelude>(); let index_offset = (index - 1) * mem::size_of::<Addr>(); unsafe { let ptr = mem::transmute::<&mut AndFrame, *const u8>(self); let ptr = ptr as usize + prelude_offset + index_offset; &mut *(ptr as *mut Addr) } } } #[derive(Debug)] pub struct OrFramePrelude { pub univ_prelude: FramePrelude, pub e: usize, pub cp: LocalCodePtr, pub b: usize, pub bp: LocalCodePtr, pub tr: usize, pub pstr_tr: usize, pub h: usize, pub b0: usize, pub attr_var_init_queue_b: usize, pub attr_var_init_bindings_b: usize, } #[derive(Debug)] pub struct OrFrame { pub prelude: OrFramePrelude, } impl Index<usize> for OrFrame { type Output = Addr; #[inline] fn index(&self, index: usize) -> &Self::Output { let prelude_offset = prelude_size::<OrFramePrelude>(); let index_offset = index * mem::size_of::<Addr>(); unsafe { let ptr = mem::transmute::<&OrFrame, *const u8>(self); let ptr = ptr as usize + prelude_offset + index_offset; &*(ptr as *const Addr) } } } impl IndexMut<usize> for OrFrame { #[inline] fn index_mut(&mut self, index: usize) -> &mut Self::Output { let prelude_offset = prelude_size::<OrFramePrelude>(); let index_offset = index * mem::size_of::<Addr>(); unsafe { let ptr = mem::transmute::<&mut OrFrame, *const u8>(self); let ptr = ptr as usize + prelude_offset + index_offset; &mut *(ptr as *mut Addr) } } } impl OrFrame { pub fn size_of(num_cells: usize) -> usize { prelude_size::<OrFramePrelude>() + num_cells * mem::size_of::<Addr>() } } impl Stack { pub fn new() -> Self { Stack { buf: RawBlock::new(), _marker: PhantomData } } pub fn allocate_and_frame(&mut self, num_cells: usize) -> usize { let frame_size = AndFrame::size_of(num_cells); unsafe { let new_top = self.buf.new_block(frame_size); let e = self.buf.top as usize - self.buf.base as usize; for idx in 0 .. num_cells { let offset = prelude_size::<AndFramePrelude>() + idx * mem::size_of::<Addr>(); ptr::write( (self.buf.top as usize + offset) as *mut Addr, Addr::StackCell(e, idx + 1), ); } let and_frame = &mut *(self.buf.top as *mut AndFrame); and_frame.prelude.univ_prelude.num_cells = num_cells; self.buf.top = new_top; e } } pub fn allocate_or_frame(&mut self, num_cells: usize) -> usize { let frame_size = OrFrame::size_of(num_cells); unsafe { let new_top = self.buf.new_block(frame_size); let b = self.buf.top as usize - self.buf.base as usize; for idx in 0 .. num_cells { let offset = prelude_size::<OrFramePrelude>() + idx * mem::size_of::<Addr>(); ptr::write( (self.buf.top as usize + offset) as *mut Addr, Addr::StackCell(b, idx), ); } let or_frame = &mut *(self.buf.top as *mut OrFrame); or_frame.prelude.univ_prelude.num_cells = num_cells; self.buf.top = new_top; b } } #[inline] pub fn index_and_frame(&self, e: usize) -> &AndFrame { unsafe { let ptr = self.buf.base as usize + e; &*(ptr as *const AndFrame) } } #[inline] pub fn index_and_frame_mut(&mut self, e: usize) -> &mut AndFrame { unsafe { let ptr = self.buf.base as usize + e; &mut *(ptr as *mut AndFrame) } } #[inline] pub fn index_or_frame(&self, b: usize) -> &OrFrame { unsafe { let ptr = self.buf.base as usize + b; &*(ptr as *const OrFrame) } } #[inline] pub fn index_or_frame_mut(&mut self, b: usize) -> &mut OrFrame { unsafe { let ptr = self.buf.base as usize + b; &mut *(ptr as *mut OrFrame) } } pub fn take(&mut self) -> Self { Stack { buf: self.buf.take(), _marker: PhantomData } } #[inline] pub fn truncate(&mut self, b: usize) { if b == 0 { self.inner_truncate(mem::align_of::<Addr>()); } else { self.inner_truncate(b); } } #[inline] fn inner_truncate(&mut self, b: usize) { let base = b + self.buf.base as usize; if base < self.buf.top as usize { self.buf.top = base as *const _; } } pub fn drop_in_place(&mut self) { self.truncate(mem::align_of::<Addr>()); debug_assert!(if self.buf.top.is_null() { self.buf.top == self.buf.base } else { self.buf.top as usize == self.buf.base as usize + mem::align_of::<Addr>() }); } }
25.13879
94
0.547282
f772ccb3e6649ac497234be4fc6811c829750e0f
3,678
// // Copyright 2015-2016 the slack-rs authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // use std::fmt; use std::io; use std::error; use std::string::FromUtf8Error; use api; /// `slack::Error` represents errors that can happen while using the `RtmClient` #[derive(Debug)] pub enum Error { /// Http client error Http(::reqwest::Error), /// WebSocket connection error WebSocket(::tungstenite::Error), /// Error decoding websocket text frame Utf8 Utf8(FromUtf8Error), /// Error parsing url Url(::reqwest::UrlError), /// Error decoding Json Json(::serde_json::Error), /// Slack Api Error Api(String), /// Errors that do not fit under the other types, Internal is for EG channel errors. Internal(String), } impl From<::reqwest::Error> for Error { fn from(err: ::reqwest::Error) -> Error { Error::Http(err) } } impl From<::reqwest::UrlError> for Error { fn from(err: ::reqwest::UrlError) -> Error { Error::Url(err) } } impl From<::tungstenite::Error> for Error { fn from(err: ::tungstenite::Error) -> Error { Error::WebSocket(err) } } impl From<::serde_json::Error> for Error { fn from(err: ::serde_json::Error) -> Error { Error::Json(err) } } impl From<io::Error> for Error { fn from(err: io::Error) -> Error { Error::Internal(format!("{:?}", err)) } } impl From<FromUtf8Error> for Error { fn from(err: FromUtf8Error) -> Error { Error::Utf8(err) } } impl From<api::rtm::StartError<::reqwest::Error>> for Error { fn from(err: api::rtm::StartError<::reqwest::Error>) -> Error { Error::Api(format!("rtm::StartError: {}", err)) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::Http(ref e) => write!(f, "Http (reqwest) Error: {:?}", e), Error::WebSocket(ref e) => write!(f, "Websocket Error: {:?}", e), Error::Utf8(ref e) => write!(f, "Utf8 decode Error: {:?}", e), Error::Url(ref e) => write!(f, "Url Error: {:?}", e), Error::Json(ref e) => write!(f, "Json Error: {:?}", e), Error::Api(ref st) => write!(f, "Slack Api Error: {:?}", st), Error::Internal(ref st) => write!(f, "Internal Error: {:?}", st), } } } impl error::Error for Error { fn description(&self) -> &str { match *self { Error::Http(ref e) => e.description(), Error::WebSocket(ref e) => e.description(), Error::Utf8(ref e) => e.description(), Error::Url(ref e) => e.description(), Error::Json(ref e) => e.description(), Error::Api(ref st) | Error::Internal(ref st) => st, } } fn cause(&self) -> Option<&error::Error> { match *self { Error::Http(ref e) => Some(e), Error::WebSocket(ref e) => Some(e), Error::Utf8(ref e) => Some(e), Error::Url(ref e) => Some(e), Error::Json(ref e) => Some(e), Error::Api(_) | Error::Internal(_) => None, } } }
29.66129
88
0.572594
21f8f3169a123b4a78d38b262830e12a244f5f8a
1,470
#![allow(incomplete_features)] #![allow(clippy::all)] #![feature(test)] #![feature(const_generics)] extern crate test; use test::{black_box, Bencher}; use staticvec::*; use std::io::Write; #[bench] fn extend_with_constant(b: &mut Bencher) { let mut v = StaticVec::<u8, 512>::new(); let cap = v.capacity(); b.iter(|| { v.clear(); let constant = black_box(1); v.extend((0..cap).map(move |_| constant)); v[511] }); b.bytes = v.capacity() as u64; } #[bench] fn extend_with_range(b: &mut Bencher) { let mut v = StaticVec::<u8, 512>::new(); let cap = v.capacity(); b.iter(|| { v.clear(); let range = 0..cap; v.extend(range.map(|x| black_box(x as u8))); v[511] }); b.bytes = v.capacity() as u64; } #[bench] fn extend_with_slice(b: &mut Bencher) { let mut v = StaticVec::<u8, 512>::new(); let data = [1; 512]; b.iter(|| { v.clear(); let iter = data.iter().map(|&x| x); v.extend(iter); v[511] }); b.bytes = v.capacity() as u64; } #[bench] fn extend_with_write(b: &mut Bencher) { let mut v = StaticVec::<u8, 512>::new(); let data = [1; 512]; b.iter(|| { v.clear(); v.write(&data[..]).ok(); v[511] }); b.bytes = v.capacity() as u64; } #[bench] fn extend_from_slice(b: &mut Bencher) { let mut v = StaticVec::<u8, 512>::new(); let data = [1; 512]; b.iter(|| { v.clear(); v.try_extend_from_slice(&data).ok(); v[511] }); b.bytes = v.capacity() as u64; }
19.342105
48
0.564626
deb598f6525c66fa65afef810538d57a20543923
10,484
use super::*; use std::time::SystemTime; const TIMEOUT: Duration = Duration::from_millis(640); // 640ms ought to be enough for anybody. const METRIC: &str = "inbound_tcp_accept_errors_total"; /// A helper that builds a proxy with the above detect timeout and a TCP server that always drops /// the accepted socket. async fn default_proxy() -> (proxy::Listening, client::Client) { // We provide a mocked TCP server that always immediately drops accepted socket. This should // trigger errors. let srv = tcp::server() .accept_fut(move |sock| async { drop(sock) }) .run() .await; let identity = identity::Identity::new( "foo-ns1", "foo.ns1.serviceaccount.identity.linkerd.cluster.local".to_string(), ); run_proxy(proxy::new().inbound(srv), identity).await } /// A helper that configures and runs the provided proxy builder with the above /// detect timeout and the provided inbound server and identity. async fn run_proxy( proxy: proxy::Proxy, identity::Identity { mut env, mut certify_rsp, .. }: identity::Identity, ) -> (proxy::Listening, client::Client) { // The identity service is needed for the proxy to start. let id_svc = { certify_rsp.valid_until = Some((SystemTime::now() + Duration::from_secs(666)).into()); controller::identity() .certify(move |_| certify_rsp) .run() .await }; env.put( app::env::ENV_INBOUND_DETECT_TIMEOUT, format!("{:?}", TIMEOUT), ); let proxy = proxy.identity(id_svc).run_with_test_env(env).await; // Wait for the proxy's identity to be certified. let admin_client = client::http1(proxy.metrics, "localhost"); assert_eventually!( admin_client .request(admin_client.request_builder("/ready").method("GET")) .await .unwrap() .status() == http::StatusCode::OK ); (proxy, admin_client) } fn metric(proxy: &proxy::Listening) -> metrics::MetricMatch { metrics::metric(METRIC).label("target_addr", proxy.inbound_server.as_ref().unwrap().addr) } /// Tests that the detect metric is labeled and incremented on timeout. #[tokio::test] async fn inbound_timeout() { let _trace = trace_init(); let (proxy, metrics) = default_proxy().await; let client = client::tcp(proxy.inbound); let _tcp_client = client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; metric(&proxy) .label("error", "tls_detect_timeout") .value(1u64) .assert_in(&metrics) .await; } /// Tests that the detect metric is labeled and incremented on I/O error. #[tokio::test] async fn inbound_io_err() { let _trace = trace_init(); let (proxy, metrics) = default_proxy().await; let client = client::tcp(proxy.inbound); let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; drop(tcp_client); metric(&proxy) .label("error", "io") .value(1u64) .assert_in(&metrics) .await; } /// Tests that the detect metric is not incremented when TLS is successfully /// detected. #[tokio::test] async fn inbound_success() { let _trace = trace_init(); let srv = server::http2().route("/", "hello world").run().await; let id_name = "foo.ns1.serviceaccount.identity.linkerd.cluster.local"; let identity = identity::Identity::new("foo-ns1", id_name.to_string()); let client_config = client::TlsConfig::new(identity.client_config.clone(), id_name); let (proxy, metrics) = run_proxy(proxy::new().inbound(srv), identity).await; let tls_client = client::http2_tls( proxy.inbound, "foo.ns1.svc.cluster.local", client_config.clone(), ); let no_tls_client = client::tcp(proxy.inbound); let metric = metric(&proxy) .label("error", "tls_detect_timeout") .value(1u64); // Connect with TLS. The metric should not be incremented. tls_client.get("/").await; assert!(metric.is_not_in(metrics.get("/metrics").await)); drop(tls_client); // Now, allow detection to time out. let tcp_client = no_tls_client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; drop(tcp_client); metric.clone().assert_in(&metrics).await; // Connect with a new TLS client. The metric value should not have changed. let tls_client = client::http2_tls(proxy.inbound, "foo.ns1.svc.cluster.local", client_config); tls_client.get("/").await; metric.assert_in(&metrics).await; } /// Tests both of the above cases together. #[tokio::test] async fn inbound_multi() { let _trace = trace_init(); let (proxy, metrics) = default_proxy().await; let client = client::tcp(proxy.inbound); let metric = metric(&proxy); let timeout_metric = metric.clone().label("error", "tls_detect_timeout"); let io_metric = metric.label("error", "io"); let tcp_client = client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; timeout_metric.clone().value(1u64).assert_in(&metrics).await; drop(tcp_client); let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; drop(tcp_client); io_metric.clone().value(1u64).assert_in(&metrics).await; timeout_metric.clone().value(1u64).assert_in(&metrics).await; let tcp_client = client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; io_metric.clone().value(1u64).assert_in(&metrics).await; timeout_metric.clone().value(2u64).assert_in(&metrics).await; drop(tcp_client); } /// Tests that TLS detect failure metrics are collected for the direct stack. #[tokio::test] async fn inbound_direct_multi() { let _trace = trace_init(); let srv = tcp::server() .accept_fut(move |sock| async { drop(sock) }) .run() .await; let identity = identity::Identity::new( "foo-ns1", "foo.ns1.serviceaccount.identity.linkerd.cluster.local".to_string(), ); // Configure the mock SO_ORIGINAL_DST addr to behave as though the // connection's original destination was the proxy's inbound listener. let proxy = proxy::new().inbound(srv).inbound_direct(); let (proxy, metrics) = run_proxy(proxy, identity).await; let client = client::tcp(proxy.inbound); let metric = metrics::metric(METRIC).label("target_addr", proxy.inbound); let timeout_metric = metric.clone().label("error", "tls_detect_timeout"); let no_tls_metric = metric.clone().label("error", "other"); let tcp_client = client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; timeout_metric.clone().value(1u64).assert_in(&metrics).await; drop(tcp_client); let tcp_client = client.connect().await; tcp_client.write(TcpFixture::HELLO_MSG).await; drop(tcp_client); no_tls_metric.clone().value(1u64).assert_in(&metrics).await; timeout_metric.clone().value(1u64).assert_in(&metrics).await; let tcp_client = client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; no_tls_metric.clone().value(1u64).assert_in(&metrics).await; timeout_metric.clone().value(2u64).assert_in(&metrics).await; drop(tcp_client); } /// Tests that the detect metric is not incremented when TLS is successfully /// detected by the direct stack. #[tokio::test] async fn inbound_direct_success() { let _trace = trace_init(); let srv = server::http2().route("/", "hello world").run().await; // Configure the mock SO_ORIGINAL_DST addr to behave as though the // connection's original destination was the proxy's inbound listener. let proxy1 = proxy::new().inbound(srv).inbound_direct(); let proxy1_id_name = "foo.ns1.serviceaccount.identity.linkerd.cluster.local"; let proxy1_id = identity::Identity::new("foo-ns1", proxy1_id_name.to_string()); let (proxy1, metrics) = run_proxy(proxy1, proxy1_id).await; // Route the connection through a second proxy, because inbound direct // connections require mutual authentication. let auth = "bar.ns1.svc.cluster.local"; let ctrl = controller::new(); let dst = format!( "{}:{}", auth, proxy1.inbound_server.as_ref().unwrap().addr.port() ); let _profile_out = ctrl.profile_tx_default(proxy1.inbound, auth); let dst = ctrl.destination_tx(dst); dst.send(controller::destination_add_tls( proxy1.inbound, proxy1_id_name, )); let ctrl = ctrl.run().await; let proxy2 = proxy::new().outbound_ip(proxy1.inbound).controller(ctrl); let proxy2_id_name = "bar.ns1.serviceaccount.identity.linkerd.cluster.local"; let proxy2_id = identity::Identity::new("bar-ns1", proxy2_id_name.to_string()); let (proxy2, _) = run_proxy(proxy2, proxy2_id).await; let tls_client = client::http1(proxy2.outbound, auth); let no_tls_client = client::tcp(proxy1.inbound); let metric = metrics::metric(METRIC) .label("target_addr", proxy1.inbound) .label("error", "tls_detect_timeout") .value(1u64); // Connect with TLS. The metric should not be incremented. // (This request will get a 502 because the inbound proxy doesn't know how // to resolve the "gateway"ed service, but that doesn't actually matter for // this test --- what we care about is that the TLS handshake is accepted). let _ = tls_client .request(tls_client.request_builder("/").method("GET")) .await; assert!(metric.is_not_in(metrics.get("/metrics").await)); drop(tls_client); // Now, allow detection to time out. let tcp_client = no_tls_client.connect().await; tokio::time::sleep(TIMEOUT + Duration::from_millis(15)) // just in case .await; drop(tcp_client); metric.clone().assert_in(&metrics).await; // Connect with a new TLS client. The metric value should not have changed. // (This request also 502s but it's fine). let tls_client = client::http1(proxy2.outbound, auth); let _ = tls_client .request(tls_client.request_builder("/").method("GET")) .await; metric.assert_in(&metrics).await; }
34.038961
98
0.662629
8983e6efa3a1517edc8c277cc4e5a00e51c86cc6
3,346
use std::collections::HashMap; use super::super::utils::http_get; use crate::{ error::{Error, Result}, Fees, Market, Precision, QuantityLimit, }; use crypto_market_type::MarketType; use serde::{Deserialize, Serialize}; use serde_json::Value; #[derive(Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] struct SpotMarket { symbol: String, symbol_partition: String, price_precision: i64, min_order_amt: String, id: String, state: String, base_currency: String, amount_precision: i64, max_order_amt: Option<String>, quote_currency: String, #[serde(flatten)] extra: HashMap<String, Value>, } #[derive(Serialize, Deserialize)] struct ResMsg { message: String, method: Option<String>, code: String, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] struct Response { datas: Vec<SpotMarket>, resMsg: ResMsg, } // See https://zbgapi.github.io/docs/spot/v1/en/#public-get-all-supported-trading-symbols fn fetch_spot_markets_raw() -> Result<Vec<SpotMarket>> { let txt = http_get("https://www.zbg.com/exchange/api/v1/common/symbols", None)?; let resp = serde_json::from_str::<Response>(&txt)?; if resp.resMsg.code != "1" { Err(Error(txt)) } else { let valid: Vec<SpotMarket> = resp .datas .into_iter() .filter(|x| x.state == "online") .collect(); Ok(valid) } } pub(super) fn fetch_spot_symbols() -> Result<Vec<String>> { let markets = fetch_spot_markets_raw()?; let symbols: Vec<String> = markets.into_iter().map(|m| m.symbol).collect(); Ok(symbols) } pub(super) fn fetch_spot_markets() -> Result<Vec<Market>> { let markets: Vec<Market> = fetch_spot_markets_raw()? .into_iter() .map(|m| { let info = serde_json::to_value(&m) .unwrap() .as_object() .unwrap() .clone(); let pair = crypto_pair::normalize_pair(&m.symbol, "zbg").unwrap(); let (base, quote) = { let v: Vec<&str> = pair.split('/').collect(); (v[0].to_string(), v[1].to_string()) }; Market { exchange: "zbg".to_string(), market_type: MarketType::Spot, symbol: m.symbol, base_id: m.base_currency, quote_id: m.quote_currency, settle_id: None, base, quote, settle: None, active: m.state == "online", margin: false, // TODO: need to find zbg spot fees fees: Fees { maker: 0.002, taker: 0.002, }, precision: Precision { tick_size: 1.0 / (10_i64.pow(m.price_precision as u32) as f64), lot_size: 1.0 / (10_i64.pow(m.amount_precision as u32) as f64), }, quantity_limit: Some(QuantityLimit { min: m.min_order_amt.parse::<f64>().unwrap(), max: None, }), contract_value: None, delivery_date: None, info, } }) .collect(); Ok(markets) }
29.610619
89
0.532875
21d34b3bc3ba55ac4d420aaeaa0e45d8d86b530f
9,019
use std::collections::HashMap; use prelude::*; /// Latest provides an operator that will maintain the last record for every group. /// /// Whenever a new record arrives for a group, the latest operator will negative the previous /// latest for that group. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Latest { us: Option<IndexPair>, src: IndexPair, key: usize, } impl Latest { /// Construct a new latest operator. /// /// `src` should be the ancestor the operation is performed over, and `keys` should be a list /// of fields used to group records by. The latest record *within each group* will be /// maintained. pub fn new(src: NodeIndex, key: usize) -> Latest { Latest { us: None, src: src.into(), key, } } } impl Ingredient for Latest { fn take(&mut self) -> NodeOperator { Clone::clone(self).into() } fn ancestors(&self) -> Vec<NodeIndex> { vec![self.src.as_global()] } fn on_connected(&mut self, _: &Graph) {} fn on_commit(&mut self, us: NodeIndex, remap: &HashMap<NodeIndex, IndexPair>) { self.src.remap(remap); self.us = Some(remap[&us]); } fn on_input( &mut self, _: &mut dyn Executor, from: LocalNodeIndex, rs: Records, _: &mut Tracer, replay_key_cols: Option<&[usize]>, _: &DomainNodes, state: &StateMap, ) -> ProcessingResult { debug_assert_eq!(from, *self.src); // find the current value for each group let us = self.us.unwrap(); let db = state .get(*us) .expect("latest must have its own state materialized"); let mut misses = Vec::new(); let mut lookups = Vec::new(); let mut out = Vec::with_capacity(rs.len()); { let currents = rs.into_iter().filter_map(|r| { // We don't allow standalone negatives as input to a latest. This is because it // would be very computationally expensive (and currently impossible) to find what // the *previous* latest was if the current latest was revoked. if !r.is_positive() { return None; } match db.lookup(&[self.key], &KeyType::Single(&r[self.key])) { LookupResult::Some(rs) => { if replay_key_cols.is_some() { lookups.push(Lookup { on: *us, cols: vec![self.key], key: vec![r[self.key].clone()], }); } debug_assert!(rs.len() <= 1, "a group had more than 1 result"); Some((r, rs)) } LookupResult::Missing => { // we don't actively materialize holes unless requested by a read. this // can't be a read, because reads cause replay, which fill holes with an // empty set before processing! misses.push(Miss { on: *us, lookup_idx: vec![self.key], lookup_cols: vec![self.key], replay_cols: replay_key_cols.map(Vec::from), record: r.extract().0, }); None } } }); // buffer emitted records for (r, current_row) in currents { if let Some(row) = current_row.into_iter().next() { out.push(Record::Negative(row.into_owned())); } // if there was a previous latest for this key, revoke old record out.push(r); } } // TODO: check that there aren't any standalone negatives ProcessingResult { results: out.into(), lookups, misses, } } fn suggest_indexes(&self, this: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> { // index all key columns Some((this, vec![self.key])).into_iter().collect() } fn resolve(&self, col: usize) -> Option<Vec<(NodeIndex, usize)>> { Some(vec![(self.src.as_global(), col)]) } fn description(&self, detailed: bool) -> String { if !detailed { String::from("⧖") } else { format!("⧖ γ[{}]", self.key) } } fn parent_columns(&self, column: usize) -> Vec<(NodeIndex, Option<usize>)> { vec![(self.src.as_global(), Some(column))] } } #[cfg(test)] mod tests { use super::*; use ops; fn setup(key: usize, mat: bool) -> ops::test::MockGraph { let mut g = ops::test::MockGraph::new(); let s = g.add_base("source", &["x", "y"]); g.set_op("latest", &["x", "y"], Latest::new(s.as_global(), key), mat); g } // TODO: test when last *isn't* latest! #[test] fn it_describes() { let c = setup(0, false); assert_eq!(c.node().description(true), "⧖ γ[0]"); } #[test] fn it_forwards() { let mut c = setup(0, true); let u = vec![1.into(), 1.into()]; // first record for a group should emit just a positive let rs = c.narrow_one_row(u, true); assert_eq!(rs.len(), 1); let mut rs = rs.into_iter(); match rs.next().unwrap() { Record::Positive(r) => { assert_eq!(r[0], 1.into()); assert_eq!(r[1], 1.into()); } _ => unreachable!(), } let u = vec![2.into(), 2.into()]; // first record for a second group should also emit just a positive let rs = c.narrow_one_row(u, true); assert_eq!(rs.len(), 1); let mut rs = rs.into_iter(); match rs.next().unwrap() { Record::Positive(r) => { assert_eq!(r[0], 2.into()); assert_eq!(r[1], 2.into()); } _ => unreachable!(), } let u = vec![1.into(), 2.into()]; // new record for existing group should revoke the old latest, and emit the new let rs = c.narrow_one_row(u, true); assert_eq!(rs.len(), 2); let mut rs = rs.into_iter(); match rs.next().unwrap() { Record::Negative(r) => { assert_eq!(r[0], 1.into()); assert_eq!(r[1], 1.into()); } _ => unreachable!(), } match rs.next().unwrap() { Record::Positive(r) => { assert_eq!(r[0], 1.into()); assert_eq!(r[1], 2.into()); } _ => unreachable!(), } let u = vec![ (vec![1.into(), 1.into()], false), (vec![1.into(), 2.into()], false), (vec![1.into(), 3.into()], true), (vec![2.into(), 2.into()], false), (vec![2.into(), 4.into()], true), ]; // negatives and positives should still result in only one new current for each group let rs = c.narrow_one(u, true); assert_eq!(rs.len(), 4); // one - and one + for each group // group 1 lost 2 and gained 3 assert!(rs.iter().any(|r| if let Record::Negative(ref r) = *r { r[0] == 1.into() && r[1] == 2.into() } else { false })); assert!(rs.iter().any(|r| if let Record::Positive(ref r) = *r { r[0] == 1.into() && r[1] == 3.into() } else { false })); // group 2 lost 2 and gained 4 assert!(rs.iter().any(|r| if let Record::Negative(ref r) = *r { r[0] == 2.into() && r[1] == 2.into() } else { false })); assert!(rs.iter().any(|r| if let Record::Positive(ref r) = *r { r[0] == 2.into() && r[1] == 4.into() } else { false })); } #[test] fn it_suggests_indices() { let me = 1.into(); let c = setup(1, false); let idx = c.node().suggest_indexes(me); // should only add index on own columns assert_eq!(idx.len(), 1); assert!(idx.contains_key(&me)); // should only index on the group-by column assert_eq!(idx[&me], vec![1]); } #[test] fn it_resolves() { let c = setup(1, false); assert_eq!( c.node().resolve(0), Some(vec![(c.narrow_base_id().as_global(), 0)]) ); assert_eq!( c.node().resolve(1), Some(vec![(c.narrow_base_id().as_global(), 1)]) ); assert_eq!( c.node().resolve(2), Some(vec![(c.narrow_base_id().as_global(), 2)]) ); } }
30.993127
98
0.470673
e8cdbbdad48cd01b8e5f0b004789e3293061bd72
658
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(non_camel_case_types)] pub struct t { pub module_asm: ~str, pub meta_sect_name: ~str, pub data_layout: ~str, pub target_triple: ~str, pub cc_args: Vec<~str> , }
32.9
68
0.715805
39b710e0d5725779a8ce1348d5bc1769b730e7c7
69,679
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{Block, Crate, DeclLocal, ExprMac, PatMac}; use ast::{Local, Ident, MacInvocTT}; use ast::{ItemMac, Mrk, Stmt, StmtDecl, StmtMac, StmtExpr, StmtSemi}; use ast::TokenTree; use ast; use ext::mtwt; use ext::build::AstBuilder; use attr; use attr::AttrMetaMethods; use codemap; use codemap::{Span, Spanned, ExpnInfo, NameAndSpan, MacroBang, MacroAttribute}; use ext::base::*; use fold; use fold::*; use parse; use parse::token::{fresh_mark, fresh_name, intern}; use parse::token; use ptr::P; use util::small_vector::SmallVector; use visit; use visit::Visitor; enum Either<L,R> { Left(L), Right(R) } pub fn expand_expr(e: P<ast::Expr>, fld: &mut MacroExpander) -> P<ast::Expr> { e.and_then(|ast::Expr {id, node, span}| match node { // expr_mac should really be expr_ext or something; it's the // entry-point for all syntax extensions. ast::ExprMac(mac) => { let expanded_expr = match expand_mac_invoc(mac, span, |r| r.make_expr(), mark_expr, fld) { Some(expr) => expr, None => { return DummyResult::raw_expr(span); } }; // Keep going, outside-in. // let fully_expanded = fld.fold_expr(expanded_expr); fld.cx.bt_pop(); fully_expanded.map(|e| ast::Expr { id: ast::DUMMY_NODE_ID, node: e.node, span: span, }) } ast::ExprWhile(cond, body, opt_ident) => { let cond = fld.fold_expr(cond); let (body, opt_ident) = expand_loop_block(body, opt_ident, fld); fld.cx.expr(span, ast::ExprWhile(cond, body, opt_ident)) } // Desugar ExprWhileLet // From: `[opt_ident]: while let <pat> = <expr> <body>` ast::ExprWhileLet(pat, expr, body, opt_ident) => { // to: // // [opt_ident]: loop { // match <expr> { // <pat> => <body>, // _ => break // } // } // `<pat> => <body>` let pat_arm = { let body_expr = fld.cx.expr_block(body); fld.cx.arm(pat.span, vec![pat], body_expr) }; // `_ => break` let break_arm = { let pat_under = fld.cx.pat_wild(span); let break_expr = fld.cx.expr_break(span); fld.cx.arm(span, vec![pat_under], break_expr) }; // `match <expr> { ... }` let arms = vec![pat_arm, break_arm]; let match_expr = fld.cx.expr(span, ast::ExprMatch(expr, arms, ast::MatchWhileLetDesugar)); // `[opt_ident]: loop { ... }` let loop_block = fld.cx.block_expr(match_expr); let (loop_block, opt_ident) = expand_loop_block(loop_block, opt_ident, fld); fld.cx.expr(span, ast::ExprLoop(loop_block, opt_ident)) } // Desugar ExprIfLet // From: `if let <pat> = <expr> <body> [<elseopt>]` ast::ExprIfLet(pat, expr, body, mut elseopt) => { // to: // // match <expr> { // <pat> => <body>, // [_ if <elseopt_if_cond> => <elseopt_if_body>,] // _ => [<elseopt> | ()] // } // `<pat> => <body>` let pat_arm = { let body_expr = fld.cx.expr_block(body); fld.cx.arm(pat.span, vec![pat], body_expr) }; // `[_ if <elseopt_if_cond> => <elseopt_if_body>,]` let else_if_arms = { let mut arms = vec![]; loop { let elseopt_continue = elseopt .and_then(|els| els.and_then(|els| match els.node { // else if ast::ExprIf(cond, then, elseopt) => { let pat_under = fld.cx.pat_wild(span); arms.push(ast::Arm { attrs: vec![], pats: vec![pat_under], guard: Some(cond), body: fld.cx.expr_block(then) }); elseopt.map(|elseopt| (elseopt, true)) } _ => Some((P(els), false)) })); match elseopt_continue { Some((e, true)) => { elseopt = Some(e); } Some((e, false)) => { elseopt = Some(e); break; } None => { elseopt = None; break; } } } arms }; // `_ => [<elseopt> | ()]` let else_arm = { let pat_under = fld.cx.pat_wild(span); let else_expr = elseopt.unwrap_or_else(|| fld.cx.expr_lit(span, ast::LitNil)); fld.cx.arm(span, vec![pat_under], else_expr) }; let mut arms = Vec::with_capacity(else_if_arms.len() + 2); arms.push(pat_arm); arms.extend(else_if_arms.into_iter()); arms.push(else_arm); let match_expr = fld.cx.expr(span, ast::ExprMatch(expr, arms, ast::MatchIfLetDesugar)); fld.fold_expr(match_expr) } // Desugar support for ExprIfLet in the ExprIf else position ast::ExprIf(cond, blk, elseopt) => { let elseopt = elseopt.map(|els| els.and_then(|els| match els.node { ast::ExprIfLet(..) => { // wrap the if-let expr in a block let span = els.span; let blk = P(ast::Block { view_items: vec![], stmts: vec![], expr: Some(P(els)), id: ast::DUMMY_NODE_ID, rules: ast::DefaultBlock, span: span }); fld.cx.expr_block(blk) } _ => P(els) })); let if_expr = fld.cx.expr(span, ast::ExprIf(cond, blk, elseopt)); if_expr.map(|e| noop_fold_expr(e, fld)) } ast::ExprLoop(loop_block, opt_ident) => { let (loop_block, opt_ident) = expand_loop_block(loop_block, opt_ident, fld); fld.cx.expr(span, ast::ExprLoop(loop_block, opt_ident)) } ast::ExprForLoop(pat, head, body, opt_ident) => { let pat = fld.fold_pat(pat); let head = fld.fold_expr(head); let (body, opt_ident) = expand_loop_block(body, opt_ident, fld); fld.cx.expr(span, ast::ExprForLoop(pat, head, body, opt_ident)) } ast::ExprFnBlock(capture_clause, fn_decl, block) => { let (rewritten_fn_decl, rewritten_block) = expand_and_rename_fn_decl_and_block(fn_decl, block, fld); let new_node = ast::ExprFnBlock(capture_clause, rewritten_fn_decl, rewritten_block); P(ast::Expr{id:id, node: new_node, span: fld.new_span(span)}) } ast::ExprProc(fn_decl, block) => { let (rewritten_fn_decl, rewritten_block) = expand_and_rename_fn_decl_and_block(fn_decl, block, fld); let new_node = ast::ExprProc(rewritten_fn_decl, rewritten_block); P(ast::Expr{id:id, node: new_node, span: fld.new_span(span)}) } _ => { P(noop_fold_expr(ast::Expr { id: id, node: node, span: span }, fld)) } }) } /// Expand a (not-ident-style) macro invocation. Returns the result /// of expansion and the mark which must be applied to the result. /// Our current interface doesn't allow us to apply the mark to the /// result until after calling make_expr, make_items, etc. fn expand_mac_invoc<T>(mac: ast::Mac, span: codemap::Span, parse_thunk: |Box<MacResult>|->Option<T>, mark_thunk: |T,Mrk|->T, fld: &mut MacroExpander) -> Option<T> { match mac.node { // it would almost certainly be cleaner to pass the whole // macro invocation in, rather than pulling it apart and // marking the tts and the ctxt separately. This also goes // for the other three macro invocation chunks of code // in this file. // Token-tree macros: MacInvocTT(pth, tts, _) => { if pth.segments.len() > 1u { fld.cx.span_err(pth.span, "expected macro name without module \ separators"); // let compilation continue return None; } let extname = pth.segments[0].identifier; let extnamestr = token::get_ident(extname); match fld.cx.syntax_env.find(&extname.name) { None => { fld.cx.span_err( pth.span, format!("macro undefined: '{}!'", extnamestr.get()).as_slice()); // let compilation continue None } Some(rc) => match *rc { NormalTT(ref expandfun, exp_span) => { fld.cx.bt_push(ExpnInfo { call_site: span, callee: NameAndSpan { name: extnamestr.get().to_string(), format: MacroBang, span: exp_span, }, }); let fm = fresh_mark(); let marked_before = mark_tts(tts.as_slice(), fm); // The span that we pass to the expanders we want to // be the root of the call stack. That's the most // relevant span and it's the actual invocation of // the macro. let mac_span = fld.cx.original_span(); let opt_parsed = { let expanded = expandfun.expand(fld.cx, mac_span, marked_before.as_slice()); parse_thunk(expanded) }; let parsed = match opt_parsed { Some(e) => e, None => { fld.cx.span_err( pth.span, format!("non-expression macro in expression position: {}", extnamestr.get().as_slice() ).as_slice()); return None; } }; Some(mark_thunk(parsed,fm)) } _ => { fld.cx.span_err( pth.span, format!("'{}' is not a tt-style macro", extnamestr.get()).as_slice()); None } } } } } } /// Rename loop label and expand its loop body /// /// The renaming procedure for loop is different in the sense that the loop /// body is in a block enclosed by loop head so the renaming of loop label /// must be propagated to the enclosed context. fn expand_loop_block(loop_block: P<Block>, opt_ident: Option<Ident>, fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) { match opt_ident { Some(label) => { let new_label = fresh_name(&label); let rename = (label, new_label); // The rename *must not* be added to the pending list of current // syntax context otherwise an unrelated `break` or `continue` in // the same context will pick that up in the deferred renaming pass // and be renamed incorrectly. let mut rename_list = vec!(rename); let mut rename_fld = IdentRenamer{renames: &mut rename_list}; let renamed_ident = rename_fld.fold_ident(label); // The rename *must* be added to the enclosed syntax context for // `break` or `continue` to pick up because by definition they are // in a block enclosed by loop head. fld.cx.syntax_env.push_frame(); fld.cx.syntax_env.info().pending_renames.push(rename); let expanded_block = expand_block_elts(loop_block, fld); fld.cx.syntax_env.pop_frame(); (expanded_block, Some(renamed_ident)) } None => (fld.fold_block(loop_block), opt_ident) } } // eval $e with a new exts frame. // must be a macro so that $e isn't evaluated too early. macro_rules! with_exts_frame ( ($extsboxexpr:expr,$macros_escape:expr,$e:expr) => ({$extsboxexpr.push_frame(); $extsboxexpr.info().macros_escape = $macros_escape; let result = $e; $extsboxexpr.pop_frame(); result }) ) // When we enter a module, record it, for the sake of `module!` pub fn expand_item(it: P<ast::Item>, fld: &mut MacroExpander) -> SmallVector<P<ast::Item>> { let it = expand_item_modifiers(it, fld); let mut decorator_items = SmallVector::zero(); let mut new_attrs = Vec::new(); for attr in it.attrs.iter() { let mname = attr.name(); match fld.cx.syntax_env.find(&intern(mname.get())) { Some(rc) => match *rc { Decorator(ref dec) => { attr::mark_used(attr); fld.cx.bt_push(ExpnInfo { call_site: attr.span, callee: NameAndSpan { name: mname.get().to_string(), format: MacroAttribute, span: None } }); // we'd ideally decorator_items.push_all(expand_item(item, fld)), // but that double-mut-borrows fld let mut items: SmallVector<P<ast::Item>> = SmallVector::zero(); dec.expand(fld.cx, attr.span, &*attr.node.value, &*it, |item| items.push(item)); decorator_items.extend(items.into_iter() .flat_map(|item| expand_item(item, fld).into_iter())); fld.cx.bt_pop(); } _ => new_attrs.push((*attr).clone()), }, _ => new_attrs.push((*attr).clone()), } } let mut new_items = match it.node { ast::ItemMac(..) => expand_item_mac(it, fld), ast::ItemMod(_) | ast::ItemForeignMod(_) => { fld.cx.mod_push(it.ident); let macro_escape = contains_macro_escape(new_attrs.as_slice()); let result = with_exts_frame!(fld.cx.syntax_env, macro_escape, noop_fold_item(it, fld)); fld.cx.mod_pop(); result }, _ => { let it = P(ast::Item { attrs: new_attrs, ..(*it).clone() }); noop_fold_item(it, fld) } }; new_items.push_all(decorator_items); new_items } fn expand_item_modifiers(mut it: P<ast::Item>, fld: &mut MacroExpander) -> P<ast::Item> { // partition the attributes into ItemModifiers and others let (modifiers, other_attrs) = it.attrs.partitioned(|attr| { match fld.cx.syntax_env.find(&intern(attr.name().get())) { Some(rc) => match *rc { Modifier(_) => true, _ => false }, _ => false } }); // update the attrs, leave everything else alone. Is this mutation really a good idea? it = P(ast::Item { attrs: other_attrs, ..(*it).clone() }); if modifiers.is_empty() { return it; } for attr in modifiers.iter() { let mname = attr.name(); match fld.cx.syntax_env.find(&intern(mname.get())) { Some(rc) => match *rc { Modifier(ref mac) => { attr::mark_used(attr); fld.cx.bt_push(ExpnInfo { call_site: attr.span, callee: NameAndSpan { name: mname.get().to_string(), format: MacroAttribute, span: None, } }); it = mac.expand(fld.cx, attr.span, &*attr.node.value, it); fld.cx.bt_pop(); } _ => unreachable!() }, _ => unreachable!() } } // expansion may have added new ItemModifiers expand_item_modifiers(it, fld) } /// Expand item_underscore fn expand_item_underscore(item: ast::Item_, fld: &mut MacroExpander) -> ast::Item_ { match item { ast::ItemFn(decl, fn_style, abi, generics, body) => { let (rewritten_fn_decl, rewritten_body) = expand_and_rename_fn_decl_and_block(decl, body, fld); let expanded_generics = fold::noop_fold_generics(generics,fld); ast::ItemFn(rewritten_fn_decl, fn_style, abi, expanded_generics, rewritten_body) } _ => noop_fold_item_underscore(item, fld) } } // does this attribute list contain "macro_escape" ? fn contains_macro_escape(attrs: &[ast::Attribute]) -> bool { attr::contains_name(attrs, "macro_escape") } // Support for item-position macro invocations, exactly the same // logic as for expression-position macro invocations. pub fn expand_item_mac(it: P<ast::Item>, fld: &mut MacroExpander) -> SmallVector<P<ast::Item>> { let (extname, path_span, tts) = match it.node { ItemMac(codemap::Spanned { node: MacInvocTT(ref pth, ref tts, _), .. }) => { (pth.segments[0].identifier, pth.span, (*tts).clone()) } _ => fld.cx.span_bug(it.span, "invalid item macro invocation") }; let extnamestr = token::get_ident(extname); let fm = fresh_mark(); let def_or_items = { let mut expanded = match fld.cx.syntax_env.find(&extname.name) { None => { fld.cx.span_err(path_span, format!("macro undefined: '{}!'", extnamestr).as_slice()); // let compilation continue return SmallVector::zero(); } Some(rc) => match *rc { NormalTT(ref expander, span) => { if it.ident.name != parse::token::special_idents::invalid.name { fld.cx .span_err(path_span, format!("macro {}! expects no ident argument, \ given '{}'", extnamestr, token::get_ident(it.ident)).as_slice()); return SmallVector::zero(); } fld.cx.bt_push(ExpnInfo { call_site: it.span, callee: NameAndSpan { name: extnamestr.get().to_string(), format: MacroBang, span: span } }); // mark before expansion: let marked_before = mark_tts(tts.as_slice(), fm); expander.expand(fld.cx, it.span, marked_before.as_slice()) } IdentTT(ref expander, span) => { if it.ident.name == parse::token::special_idents::invalid.name { fld.cx.span_err(path_span, format!("macro {}! expects an ident argument", extnamestr.get()).as_slice()); return SmallVector::zero(); } fld.cx.bt_push(ExpnInfo { call_site: it.span, callee: NameAndSpan { name: extnamestr.get().to_string(), format: MacroBang, span: span } }); // mark before expansion: let marked_tts = mark_tts(tts.as_slice(), fm); expander.expand(fld.cx, it.span, it.ident, marked_tts) } LetSyntaxTT(ref expander, span) => { if it.ident.name == parse::token::special_idents::invalid.name { fld.cx.span_err(path_span, format!("macro {}! expects an ident argument", extnamestr.get()).as_slice()); return SmallVector::zero(); } fld.cx.bt_push(ExpnInfo { call_site: it.span, callee: NameAndSpan { name: extnamestr.get().to_string(), format: MacroBang, span: span } }); // DON'T mark before expansion: expander.expand(fld.cx, it.span, it.ident, tts) } _ => { fld.cx.span_err(it.span, format!("{}! is not legal in item position", extnamestr.get()).as_slice()); return SmallVector::zero(); } } }; match expanded.make_def() { Some(def) => Left(def), None => Right(expanded.make_items()) } }; let items = match def_or_items { Left(MacroDef { name, ext }) => { // hidden invariant: this should only be possible as the // result of expanding a LetSyntaxTT, and thus doesn't // need to be marked. Not that it could be marked anyway. // create issue to recommend refactoring here? fld.cx.syntax_env.insert(intern(name.as_slice()), ext); if attr::contains_name(it.attrs.as_slice(), "macro_export") { fld.cx.exported_macros.push(it); } SmallVector::zero() } Right(Some(items)) => { items.into_iter() .map(|i| mark_item(i, fm)) .flat_map(|i| fld.fold_item(i).into_iter()) .collect() } Right(None) => { fld.cx.span_err(path_span, format!("non-item macro in item position: {}", extnamestr.get()).as_slice()); return SmallVector::zero(); } }; fld.cx.bt_pop(); items } /// Expand a stmt // // I don't understand why this returns a vector... it looks like we're // half done adding machinery to allow macros to expand into multiple statements. fn expand_stmt(s: Stmt, fld: &mut MacroExpander) -> SmallVector<P<Stmt>> { let (mac, semi) = match s.node { StmtMac(mac, semi) => (mac, semi), _ => return expand_non_macro_stmt(s, fld) }; let expanded_stmt = match expand_mac_invoc(mac, s.span, |r| r.make_stmt(), mark_stmt, fld) { Some(stmt) => stmt, None => { return SmallVector::zero(); } }; // Keep going, outside-in. let fully_expanded = fld.fold_stmt(expanded_stmt); fld.cx.bt_pop(); if semi { fully_expanded.into_iter().map(|s| s.map(|Spanned {node, span}| { Spanned { node: match node { StmtExpr(e, stmt_id) => StmtSemi(e, stmt_id), _ => node /* might already have a semi */ }, span: span } })).collect() } else { fully_expanded } } // expand a non-macro stmt. this is essentially the fallthrough for // expand_stmt, above. fn expand_non_macro_stmt(Spanned {node, span: stmt_span}: Stmt, fld: &mut MacroExpander) -> SmallVector<P<Stmt>> { // is it a let? match node { StmtDecl(decl, node_id) => decl.and_then(|Spanned {node: decl, span}| match decl { DeclLocal(local) => { // take it apart: let rewritten_local = local.map(|Local {id, pat, ty, init, source, span}| { // expand the ty since TyFixedLengthVec contains an Expr // and thus may have a macro use let expanded_ty = fld.fold_ty(ty); // expand the pat (it might contain macro uses): let expanded_pat = fld.fold_pat(pat); // find the PatIdents in the pattern: // oh dear heaven... this is going to include the enum // names, as well... but that should be okay, as long as // the new names are gensyms for the old ones. // generate fresh names, push them to a new pending list let idents = pattern_bindings(&*expanded_pat); let mut new_pending_renames = idents.iter().map(|ident| (*ident, fresh_name(ident))).collect(); // rewrite the pattern using the new names (the old // ones have already been applied): let rewritten_pat = { // nested binding to allow borrow to expire: let mut rename_fld = IdentRenamer{renames: &mut new_pending_renames}; rename_fld.fold_pat(expanded_pat) }; // add them to the existing pending renames: fld.cx.syntax_env.info().pending_renames .extend(new_pending_renames.into_iter()); Local { id: id, ty: expanded_ty, pat: rewritten_pat, // also, don't forget to expand the init: init: init.map(|e| fld.fold_expr(e)), source: source, span: span } }); SmallVector::one(P(Spanned { node: StmtDecl(P(Spanned { node: DeclLocal(rewritten_local), span: span }), node_id), span: stmt_span })) } _ => { noop_fold_stmt(Spanned { node: StmtDecl(P(Spanned { node: decl, span: span }), node_id), span: stmt_span }, fld) } }), _ => { noop_fold_stmt(Spanned { node: node, span: stmt_span }, fld) } } } // expand the arm of a 'match', renaming for macro hygiene fn expand_arm(arm: ast::Arm, fld: &mut MacroExpander) -> ast::Arm { // expand pats... they might contain macro uses: let expanded_pats = arm.pats.move_map(|pat| fld.fold_pat(pat)); if expanded_pats.len() == 0 { fail!("encountered match arm with 0 patterns"); } // all of the pats must have the same set of bindings, so use the // first one to extract them and generate new names: let idents = pattern_bindings(&*expanded_pats[0]); let new_renames = idents.into_iter().map(|id| (id, fresh_name(&id))).collect(); // apply the renaming, but only to the PatIdents: let mut rename_pats_fld = PatIdentRenamer{renames:&new_renames}; let rewritten_pats = expanded_pats.move_map(|pat| rename_pats_fld.fold_pat(pat)); // apply renaming and then expansion to the guard and the body: let mut rename_fld = IdentRenamer{renames:&new_renames}; let rewritten_guard = arm.guard.map(|g| fld.fold_expr(rename_fld.fold_expr(g))); let rewritten_body = fld.fold_expr(rename_fld.fold_expr(arm.body)); ast::Arm { attrs: arm.attrs.move_map(|x| fld.fold_attribute(x)), pats: rewritten_pats, guard: rewritten_guard, body: rewritten_body, } } /// A visitor that extracts the PatIdent (binding) paths /// from a given thingy and puts them in a mutable /// array #[deriving(Clone)] struct PatIdentFinder { ident_accumulator: Vec<ast::Ident> } impl<'v> Visitor<'v> for PatIdentFinder { fn visit_pat(&mut self, pattern: &ast::Pat) { match *pattern { ast::Pat { id: _, node: ast::PatIdent(_, ref path1, ref inner), span: _ } => { self.ident_accumulator.push(path1.node); // visit optional subpattern of PatIdent: for subpat in inner.iter() { self.visit_pat(&**subpat) } } // use the default traversal for non-PatIdents _ => visit::walk_pat(self, pattern) } } } /// find the PatIdent paths in a pattern fn pattern_bindings(pat: &ast::Pat) -> Vec<ast::Ident> { let mut name_finder = PatIdentFinder{ident_accumulator:Vec::new()}; name_finder.visit_pat(pat); name_finder.ident_accumulator } /// find the PatIdent paths in a fn fn_decl_arg_bindings(fn_decl: &ast::FnDecl) -> Vec<ast::Ident> { let mut pat_idents = PatIdentFinder{ident_accumulator:Vec::new()}; for arg in fn_decl.inputs.iter() { pat_idents.visit_pat(&*arg.pat); } pat_idents.ident_accumulator } // expand a block. pushes a new exts_frame, then calls expand_block_elts pub fn expand_block(blk: P<Block>, fld: &mut MacroExpander) -> P<Block> { // see note below about treatment of exts table with_exts_frame!(fld.cx.syntax_env,false, expand_block_elts(blk, fld)) } // expand the elements of a block. pub fn expand_block_elts(b: P<Block>, fld: &mut MacroExpander) -> P<Block> { b.map(|Block {id, view_items, stmts, expr, rules, span}| { let new_view_items = view_items.into_iter().map(|x| fld.fold_view_item(x)).collect(); let new_stmts = stmts.into_iter().flat_map(|x| { // perform all pending renames let renamed_stmt = { let pending_renames = &mut fld.cx.syntax_env.info().pending_renames; let mut rename_fld = IdentRenamer{renames:pending_renames}; rename_fld.fold_stmt(x).expect_one("rename_fold didn't return one value") }; // expand macros in the statement fld.fold_stmt(renamed_stmt).into_iter() }).collect(); let new_expr = expr.map(|x| { let expr = { let pending_renames = &mut fld.cx.syntax_env.info().pending_renames; let mut rename_fld = IdentRenamer{renames:pending_renames}; rename_fld.fold_expr(x) }; fld.fold_expr(expr) }); Block { id: fld.new_id(id), view_items: new_view_items, stmts: new_stmts, expr: new_expr, rules: rules, span: span } }) } fn expand_pat(p: P<ast::Pat>, fld: &mut MacroExpander) -> P<ast::Pat> { match p.node { PatMac(_) => {} _ => return noop_fold_pat(p, fld) } p.map(|ast::Pat {node, span, ..}| { let (pth, tts) = match node { PatMac(mac) => match mac.node { MacInvocTT(pth, tts, _) => { (pth, tts) } }, _ => unreachable!() }; if pth.segments.len() > 1u { fld.cx.span_err(pth.span, "expected macro name without module separators"); return DummyResult::raw_pat(span); } let extname = pth.segments[0].identifier; let extnamestr = token::get_ident(extname); let marked_after = match fld.cx.syntax_env.find(&extname.name) { None => { fld.cx.span_err(pth.span, format!("macro undefined: '{}!'", extnamestr).as_slice()); // let compilation continue return DummyResult::raw_pat(span); } Some(rc) => match *rc { NormalTT(ref expander, tt_span) => { fld.cx.bt_push(ExpnInfo { call_site: span, callee: NameAndSpan { name: extnamestr.get().to_string(), format: MacroBang, span: tt_span } }); let fm = fresh_mark(); let marked_before = mark_tts(tts.as_slice(), fm); let mac_span = fld.cx.original_span(); let expanded = match expander.expand(fld.cx, mac_span, marked_before.as_slice()).make_pat() { Some(e) => e, None => { fld.cx.span_err( pth.span, format!( "non-pattern macro in pattern position: {}", extnamestr.get() ).as_slice() ); return DummyResult::raw_pat(span); } }; // mark after: mark_pat(expanded,fm) } _ => { fld.cx.span_err(span, format!("{}! is not legal in pattern position", extnamestr.get()).as_slice()); return DummyResult::raw_pat(span); } } }; let fully_expanded = fld.fold_pat(marked_after).node.clone(); fld.cx.bt_pop(); ast::Pat { id: ast::DUMMY_NODE_ID, node: fully_expanded, span: span } }) } /// A tree-folder that applies every rename in its (mutable) list /// to every identifier, including both bindings and varrefs /// (and lots of things that will turn out to be neither) pub struct IdentRenamer<'a> { renames: &'a mtwt::RenameList, } impl<'a> Folder for IdentRenamer<'a> { fn fold_ident(&mut self, id: Ident) -> Ident { Ident { name: id.name, ctxt: mtwt::apply_renames(self.renames, id.ctxt), } } fn fold_mac(&mut self, macro: ast::Mac) -> ast::Mac { fold::noop_fold_mac(macro, self) } } /// A tree-folder that applies every rename in its list to /// the idents that are in PatIdent patterns. This is more narrowly /// focused than IdentRenamer, and is needed for FnDecl, /// where we want to rename the args but not the fn name or the generics etc. pub struct PatIdentRenamer<'a> { renames: &'a mtwt::RenameList, } impl<'a> Folder for PatIdentRenamer<'a> { fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> { match pat.node { ast::PatIdent(..) => {}, _ => return noop_fold_pat(pat, self) } pat.map(|ast::Pat {id, node, span}| match node { ast::PatIdent(binding_mode, Spanned{span: sp, node: ident}, sub) => { let new_ident = Ident{name: ident.name, ctxt: mtwt::apply_renames(self.renames, ident.ctxt)}; let new_node = ast::PatIdent(binding_mode, Spanned{span: self.new_span(sp), node: new_ident}, sub.map(|p| self.fold_pat(p))); ast::Pat { id: id, node: new_node, span: self.new_span(span) } }, _ => unreachable!() }) } fn fold_mac(&mut self, macro: ast::Mac) -> ast::Mac { fold::noop_fold_mac(macro, self) } } // expand a method fn expand_method(m: P<ast::Method>, fld: &mut MacroExpander) -> SmallVector<P<ast::Method>> { m.and_then(|m| match m.node { ast::MethDecl(ident, generics, abi, explicit_self, fn_style, decl, body, vis) => { let id = fld.new_id(m.id); let (rewritten_fn_decl, rewritten_body) = expand_and_rename_fn_decl_and_block(decl,body,fld); SmallVector::one(P(ast::Method { attrs: m.attrs.move_map(|a| fld.fold_attribute(a)), id: id, span: fld.new_span(m.span), node: ast::MethDecl(fld.fold_ident(ident), noop_fold_generics(generics, fld), abi, fld.fold_explicit_self(explicit_self), fn_style, rewritten_fn_decl, rewritten_body, vis) })) }, ast::MethMac(mac) => { let maybe_new_methods = expand_mac_invoc(mac, m.span, |r| r.make_methods(), |meths, mark| meths.move_map(|m| mark_method(m, mark)), fld); let new_methods = match maybe_new_methods { Some(methods) => methods, None => SmallVector::zero() }; // expand again if necessary let new_methods = new_methods.into_iter() .flat_map(|m| fld.fold_method(m).into_iter()).collect(); fld.cx.bt_pop(); new_methods } }) } /// Given a fn_decl and a block and a MacroExpander, expand the fn_decl, then use the /// PatIdents in its arguments to perform renaming in the FnDecl and /// the block, returning both the new FnDecl and the new Block. fn expand_and_rename_fn_decl_and_block(fn_decl: P<ast::FnDecl>, block: P<ast::Block>, fld: &mut MacroExpander) -> (P<ast::FnDecl>, P<ast::Block>) { let expanded_decl = fld.fold_fn_decl(fn_decl); let idents = fn_decl_arg_bindings(&*expanded_decl); let renames = idents.iter().map(|id : &ast::Ident| (*id,fresh_name(id))).collect(); // first, a renamer for the PatIdents, for the fn_decl: let mut rename_pat_fld = PatIdentRenamer{renames: &renames}; let rewritten_fn_decl = rename_pat_fld.fold_fn_decl(expanded_decl); // now, a renamer for *all* idents, for the body: let mut rename_fld = IdentRenamer{renames: &renames}; let rewritten_body = fld.fold_block(rename_fld.fold_block(block)); (rewritten_fn_decl,rewritten_body) } /// A tree-folder that performs macro expansion pub struct MacroExpander<'a, 'b:'a> { pub cx: &'a mut ExtCtxt<'b>, } impl<'a, 'b> Folder for MacroExpander<'a, 'b> { fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> { expand_expr(expr, self) } fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> { expand_pat(pat, self) } fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> { expand_item(item, self) } fn fold_item_underscore(&mut self, item: ast::Item_) -> ast::Item_ { expand_item_underscore(item, self) } fn fold_stmt(&mut self, stmt: P<ast::Stmt>) -> SmallVector<P<ast::Stmt>> { stmt.and_then(|stmt| expand_stmt(stmt, self)) } fn fold_block(&mut self, block: P<Block>) -> P<Block> { expand_block(block, self) } fn fold_arm(&mut self, arm: ast::Arm) -> ast::Arm { expand_arm(arm, self) } fn fold_method(&mut self, method: P<ast::Method>) -> SmallVector<P<ast::Method>> { expand_method(method, self) } fn new_span(&mut self, span: Span) -> Span { new_span(self.cx, span) } } fn new_span(cx: &ExtCtxt, sp: Span) -> Span { /* this discards information in the case of macro-defining macros */ Span { lo: sp.lo, hi: sp.hi, expn_id: cx.backtrace(), } } pub struct ExpansionConfig { pub crate_name: String, pub deriving_hash_type_parameter: bool, pub enable_quotes: bool, pub recursion_limit: uint, } impl ExpansionConfig { pub fn default(crate_name: String) -> ExpansionConfig { ExpansionConfig { crate_name: crate_name, deriving_hash_type_parameter: false, enable_quotes: false, recursion_limit: 64, } } } pub struct ExportedMacros { pub crate_name: Ident, pub macros: Vec<String>, } pub fn expand_crate(parse_sess: &parse::ParseSess, cfg: ExpansionConfig, // these are the macros being imported to this crate: imported_macros: Vec<ExportedMacros>, user_exts: Vec<NamedSyntaxExtension>, c: Crate) -> Crate { let mut cx = ExtCtxt::new(parse_sess, c.config.clone(), cfg); let mut expander = MacroExpander { cx: &mut cx, }; for ExportedMacros { crate_name, macros } in imported_macros.into_iter() { let name = format!("<{} macros>", token::get_ident(crate_name)) .into_string(); for source in macros.into_iter() { let item = parse::parse_item_from_source_str(name.clone(), source, expander.cx.cfg(), expander.cx.parse_sess()) .expect("expected a serialized item"); expand_item_mac(item, &mut expander); } } for (name, extension) in user_exts.into_iter() { expander.cx.syntax_env.insert(name, extension); } let mut ret = expander.fold_crate(c); ret.exported_macros = expander.cx.exported_macros.clone(); parse_sess.span_diagnostic.handler().abort_if_errors(); return ret; } // HYGIENIC CONTEXT EXTENSION: // all of these functions are for walking over // ASTs and making some change to the context of every // element that has one. a CtxtFn is a trait-ified // version of a closure in (SyntaxContext -> SyntaxContext). // the ones defined here include: // Marker - add a mark to a context // A Marker adds the given mark to the syntax context struct Marker { mark: Mrk } impl Folder for Marker { fn fold_ident(&mut self, id: Ident) -> Ident { ast::Ident { name: id.name, ctxt: mtwt::apply_mark(self.mark, id.ctxt) } } fn fold_mac(&mut self, Spanned {node, span}: ast::Mac) -> ast::Mac { Spanned { node: match node { MacInvocTT(path, tts, ctxt) => { MacInvocTT(self.fold_path(path), self.fold_tts(tts.as_slice()), mtwt::apply_mark(self.mark, ctxt)) } }, span: span, } } } // apply a given mark to the given token trees. Used prior to expansion of a macro. fn mark_tts(tts: &[TokenTree], m: Mrk) -> Vec<TokenTree> { noop_fold_tts(tts, &mut Marker{mark:m}) } // apply a given mark to the given expr. Used following the expansion of a macro. fn mark_expr(expr: P<ast::Expr>, m: Mrk) -> P<ast::Expr> { Marker{mark:m}.fold_expr(expr) } // apply a given mark to the given pattern. Used following the expansion of a macro. fn mark_pat(pat: P<ast::Pat>, m: Mrk) -> P<ast::Pat> { Marker{mark:m}.fold_pat(pat) } // apply a given mark to the given stmt. Used following the expansion of a macro. fn mark_stmt(expr: P<ast::Stmt>, m: Mrk) -> P<ast::Stmt> { Marker{mark:m}.fold_stmt(expr) .expect_one("marking a stmt didn't return exactly one stmt") } // apply a given mark to the given item. Used following the expansion of a macro. fn mark_item(expr: P<ast::Item>, m: Mrk) -> P<ast::Item> { Marker{mark:m}.fold_item(expr) .expect_one("marking an item didn't return exactly one item") } // apply a given mark to the given item. Used following the expansion of a macro. fn mark_method(expr: P<ast::Method>, m: Mrk) -> P<ast::Method> { Marker{mark:m}.fold_method(expr) .expect_one("marking an item didn't return exactly one method") } /// Check that there are no macro invocations left in the AST: pub fn check_for_macros(sess: &parse::ParseSess, krate: &ast::Crate) { visit::walk_crate(&mut MacroExterminator{sess:sess}, krate); } /// A visitor that ensures that no macro invocations remain in an AST. struct MacroExterminator<'a>{ sess: &'a parse::ParseSess } impl<'a, 'v> Visitor<'v> for MacroExterminator<'a> { fn visit_mac(&mut self, macro: &ast::Mac) { self.sess.span_diagnostic.span_bug(macro.span, "macro exterminator: expected AST \ with no macro invocations"); } } #[cfg(test)] mod test { use super::{pattern_bindings, expand_crate, contains_macro_escape}; use super::{PatIdentFinder, IdentRenamer, PatIdentRenamer, ExpansionConfig}; use ast; use ast::{Attribute_, AttrOuter, MetaWord, Name}; use attr; use codemap; use codemap::Spanned; use ext::mtwt; use fold::Folder; use parse; use parse::token; use ptr::P; use util::parser_testing::{string_to_parser}; use util::parser_testing::{string_to_pat, string_to_crate, strs_to_idents}; use visit; use visit::Visitor; // a visitor that extracts the paths // from a given thingy and puts them in a mutable // array (passed in to the traversal) #[deriving(Clone)] struct PathExprFinderContext { path_accumulator: Vec<ast::Path> , } impl<'v> Visitor<'v> for PathExprFinderContext { fn visit_expr(&mut self, expr: &ast::Expr) { match expr.node { ast::ExprPath(ref p) => { self.path_accumulator.push(p.clone()); // not calling visit_path, but it should be fine. } _ => visit::walk_expr(self, expr) } } } // find the variable references in a crate fn crate_varrefs(the_crate : &ast::Crate) -> Vec<ast::Path> { let mut path_finder = PathExprFinderContext{path_accumulator:Vec::new()}; visit::walk_crate(&mut path_finder, the_crate); path_finder.path_accumulator } /// A Visitor that extracts the identifiers from a thingy. // as a side note, I'm starting to want to abstract over these.... struct IdentFinder { ident_accumulator: Vec<ast::Ident> } impl<'v> Visitor<'v> for IdentFinder { fn visit_ident(&mut self, _: codemap::Span, id: ast::Ident){ self.ident_accumulator.push(id); } } /// Find the idents in a crate fn crate_idents(the_crate: &ast::Crate) -> Vec<ast::Ident> { let mut ident_finder = IdentFinder{ident_accumulator: Vec::new()}; visit::walk_crate(&mut ident_finder, the_crate); ident_finder.ident_accumulator } // these following tests are quite fragile, in that they don't test what // *kind* of failure occurs. fn test_ecfg() -> ExpansionConfig { ExpansionConfig::default("test".to_string()) } // make sure that macros can't escape fns #[should_fail] #[test] fn macros_cant_escape_fns_test () { let src = "fn bogus() {macro_rules! z (() => (3+4))}\ fn inty() -> int { z!() }".to_string(); let sess = parse::new_parse_sess(); let crate_ast = parse::parse_crate_from_source_str( "<test>".to_string(), src, Vec::new(), &sess); // should fail: expand_crate(&sess,test_ecfg(),vec!(),vec!(),crate_ast); } // make sure that macros can't escape modules #[should_fail] #[test] fn macros_cant_escape_mods_test () { let src = "mod foo {macro_rules! z (() => (3+4))}\ fn inty() -> int { z!() }".to_string(); let sess = parse::new_parse_sess(); let crate_ast = parse::parse_crate_from_source_str( "<test>".to_string(), src, Vec::new(), &sess); expand_crate(&sess,test_ecfg(),vec!(),vec!(),crate_ast); } // macro_escape modules should allow macros to escape #[test] fn macros_can_escape_flattened_mods_test () { let src = "#[macro_escape] mod foo {macro_rules! z (() => (3+4))}\ fn inty() -> int { z!() }".to_string(); let sess = parse::new_parse_sess(); let crate_ast = parse::parse_crate_from_source_str( "<test>".to_string(), src, Vec::new(), &sess); expand_crate(&sess, test_ecfg(), vec!(), vec!(), crate_ast); } #[test] fn test_contains_flatten (){ let attr1 = make_dummy_attr ("foo"); let attr2 = make_dummy_attr ("bar"); let escape_attr = make_dummy_attr ("macro_escape"); let attrs1 = vec!(attr1.clone(), escape_attr, attr2.clone()); assert_eq!(contains_macro_escape(attrs1.as_slice()),true); let attrs2 = vec!(attr1,attr2); assert_eq!(contains_macro_escape(attrs2.as_slice()),false); } // make a MetaWord outer attribute with the given name fn make_dummy_attr(s: &str) -> ast::Attribute { Spanned { span:codemap::DUMMY_SP, node: Attribute_ { id: attr::mk_attr_id(), style: AttrOuter, value: P(Spanned { node: MetaWord(token::intern_and_get_ident(s)), span: codemap::DUMMY_SP, }), is_sugared_doc: false, } } } fn expand_crate_str(crate_str: String) -> ast::Crate { let ps = parse::new_parse_sess(); let crate_ast = string_to_parser(&ps, crate_str).parse_crate_mod(); // the cfg argument actually does matter, here... expand_crate(&ps,test_ecfg(),vec!(),vec!(),crate_ast) } // find the pat_ident paths in a crate fn crate_bindings(the_crate : &ast::Crate) -> Vec<ast::Ident> { let mut name_finder = PatIdentFinder{ident_accumulator:Vec::new()}; visit::walk_crate(&mut name_finder, the_crate); name_finder.ident_accumulator } #[test] fn macro_tokens_should_match(){ expand_crate_str( "macro_rules! m((a)=>(13)) fn main(){m!(a);}".to_string()); } // should be able to use a bound identifier as a literal in a macro definition: #[test] fn self_macro_parsing(){ expand_crate_str( "macro_rules! foo ((zz) => (287u;)) fn f(zz : int) {foo!(zz);}".to_string() ); } // renaming tests expand a crate and then check that the bindings match // the right varrefs. The specification of the test case includes the // text of the crate, and also an array of arrays. Each element in the // outer array corresponds to a binding in the traversal of the AST // induced by visit. Each of these arrays contains a list of indexes, // interpreted as the varrefs in the varref traversal that this binding // should match. So, for instance, in a program with two bindings and // three varrefs, the array ~[~[1,2],~[0]] would indicate that the first // binding should match the second two varrefs, and the second binding // should match the first varref. // // Put differently; this is a sparse representation of a boolean matrix // indicating which bindings capture which identifiers. // // Note also that this matrix is dependent on the implicit ordering of // the bindings and the varrefs discovered by the name-finder and the path-finder. // // The comparisons are done post-mtwt-resolve, so we're comparing renamed // names; differences in marks don't matter any more. // // oog... I also want tests that check "bound-identifier-=?". That is, // not just "do these have the same name", but "do they have the same // name *and* the same marks"? Understanding this is really pretty painful. // in principle, you might want to control this boolean on a per-varref basis, // but that would make things even harder to understand, and might not be // necessary for thorough testing. type RenamingTest = (&'static str, Vec<Vec<uint>>, bool); #[test] fn automatic_renaming () { let tests: Vec<RenamingTest> = vec!(// b & c should get new names throughout, in the expr too: ("fn a() -> int { let b = 13; let c = b; b+c }", vec!(vec!(0,1),vec!(2)), false), // both x's should be renamed (how is this causing a bug?) ("fn main () {let x: int = 13;x;}", vec!(vec!(0)), false), // the use of b after the + should be renamed, the other one not: ("macro_rules! f (($x:ident) => (b + $x)) fn a() -> int { let b = 13; f!(b)}", vec!(vec!(1)), false), // the b before the plus should not be renamed (requires marks) ("macro_rules! f (($x:ident) => ({let b=9; ($x + b)})) fn a() -> int { f!(b)}", vec!(vec!(1)), false), // the marks going in and out of letty should cancel, allowing that $x to // capture the one following the semicolon. // this was an awesome test case, and caught a *lot* of bugs. ("macro_rules! letty(($x:ident) => (let $x = 15;)) macro_rules! user(($x:ident) => ({letty!($x); $x})) fn main() -> int {user!(z)}", vec!(vec!(0)), false) ); for (idx,s) in tests.iter().enumerate() { run_renaming_test(s,idx); } } // no longer a fixme #8062: this test exposes a *potential* bug; our system does // not behave exactly like MTWT, but a conversation with Matthew Flatt // suggests that this can only occur in the presence of local-expand, which // we have no plans to support. ... unless it's needed for item hygiene.... #[ignore] #[test] fn issue_8062(){ run_renaming_test( &("fn main() {let hrcoo = 19; macro_rules! getx(()=>(hrcoo)); getx!();}", vec!(vec!(0)), true), 0) } // FIXME #6994: // the z flows into and out of two macros (g & f) along one path, and one // (just g) along the other, so the result of the whole thing should // be "let z_123 = 3; z_123" #[ignore] #[test] fn issue_6994(){ run_renaming_test( &("macro_rules! g (($x:ident) => ({macro_rules! f(($y:ident)=>({let $y=3;$x}));f!($x)})) fn a(){g!(z)}", vec!(vec!(0)),false), 0) } // match variable hygiene. Should expand into // fn z() {match 8 {x_1 => {match 9 {x_2 | x_2 if x_2 == x_1 => x_2 + x_1}}}} #[test] fn issue_9384(){ run_renaming_test( &("macro_rules! bad_macro (($ex:expr) => ({match 9 {x | x if x == $ex => x + $ex}})) fn z() {match 8 {x => bad_macro!(x)}}", // NB: the third "binding" is the repeat of the second one. vec!(vec!(1,3),vec!(0,2),vec!(0,2)), true), 0) } // interpolated nodes weren't getting labeled. // should expand into // fn main(){let g1_1 = 13; g1_1}} #[test] fn pat_expand_issue_15221(){ run_renaming_test( &("macro_rules! inner ( ($e:pat ) => ($e)) macro_rules! outer ( ($e:pat ) => (inner!($e))) fn main() { let outer!(g) = 13; g;}", vec!(vec!(0)), true), 0) } // create a really evil test case where a $x appears inside a binding of $x // but *shouldn't* bind because it was inserted by a different macro.... // can't write this test case until we have macro-generating macros. // method arg hygiene // method expands to fn get_x(&self_0, x_1:int) {self_0 + self_2 + x_3 + x_1} #[test] fn method_arg_hygiene(){ run_renaming_test( &("macro_rules! inject_x (()=>(x)) macro_rules! inject_self (()=>(self)) struct A; impl A{fn get_x(&self, x: int) {self + inject_self!() + inject_x!() + x;} }", vec!(vec!(0),vec!(3)), true), 0) } // ooh, got another bite? // expands to struct A; impl A {fn thingy(&self_1) {self_1;}} #[test] fn method_arg_hygiene_2(){ run_renaming_test( &("struct A; macro_rules! add_method (($T:ty) => (impl $T { fn thingy(&self) {self;} })) add_method!(A)", vec!(vec!(0)), true), 0) } // item fn hygiene // expands to fn q(x_1:int){fn g(x_2:int){x_2 + x_1};} #[test] fn issue_9383(){ run_renaming_test( &("macro_rules! bad_macro (($ex:expr) => (fn g(x:int){ x + $ex })) fn q(x:int) { bad_macro!(x); }", vec!(vec!(1),vec!(0)),true), 0) } // closure arg hygiene (ExprFnBlock) // expands to fn f(){(|x_1 : int| {(x_2 + x_1)})(3);} #[test] fn closure_arg_hygiene(){ run_renaming_test( &("macro_rules! inject_x (()=>(x)) fn f(){(|x : int| {(inject_x!() + x)})(3);}", vec!(vec!(1)), true), 0) } // closure arg hygiene (ExprProc) // expands to fn f(){(proc(x_1 : int) {(x_2 + x_1)})(3);} #[test] fn closure_arg_hygiene_2(){ run_renaming_test( &("macro_rules! inject_x (()=>(x)) fn f(){ (proc(x : int){(inject_x!() + x)})(3); }", vec!(vec!(1)), true), 0) } // macro_rules in method position. Sadly, unimplemented. #[test] fn macro_in_method_posn(){ expand_crate_str( "macro_rules! my_method (() => (fn thirteen(&self) -> int {13})) struct A; impl A{ my_method!()} fn f(){A.thirteen;}".to_string()); } // another nested macro // expands to impl Entries {fn size_hint(&self_1) {self_1;} #[test] fn item_macro_workaround(){ run_renaming_test( &("macro_rules! item { ($i:item) => {$i}} struct Entries; macro_rules! iterator_impl { () => { item!( impl Entries { fn size_hint(&self) { self;}})}} iterator_impl! { }", vec!(vec!(0)), true), 0) } // run one of the renaming tests fn run_renaming_test(t: &RenamingTest, test_idx: uint) { let invalid_name = token::special_idents::invalid.name; let (teststr, bound_connections, bound_ident_check) = match *t { (ref str,ref conns, bic) => (str.to_string(), conns.clone(), bic) }; let cr = expand_crate_str(teststr.to_string()); let bindings = crate_bindings(&cr); let varrefs = crate_varrefs(&cr); // must be one check clause for each binding: assert_eq!(bindings.len(),bound_connections.len()); for (binding_idx,shouldmatch) in bound_connections.iter().enumerate() { let binding_name = mtwt::resolve(bindings[binding_idx]); let binding_marks = mtwt::marksof(bindings[binding_idx].ctxt, invalid_name); // shouldmatch can't name varrefs that don't exist: assert!((shouldmatch.len() == 0) || (varrefs.len() > *shouldmatch.iter().max().unwrap())); for (idx,varref) in varrefs.iter().enumerate() { let print_hygiene_debug_info = || { // good lord, you can't make a path with 0 segments, can you? let final_varref_ident = match varref.segments.last() { Some(pathsegment) => pathsegment.identifier, None => fail!("varref with 0 path segments?") }; let varref_name = mtwt::resolve(final_varref_ident); let varref_idents : Vec<ast::Ident> = varref.segments.iter().map(|s| s.identifier) .collect(); println!("varref #{}: {}, resolves to {}",idx, varref_idents, varref_name); let string = token::get_ident(final_varref_ident); println!("varref's first segment's string: \"{}\"", string.get()); println!("binding #{}: {}, resolves to {}", binding_idx, bindings[binding_idx], binding_name); mtwt::with_sctable(|x| mtwt::display_sctable(x)); }; if shouldmatch.contains(&idx) { // it should be a path of length 1, and it should // be free-identifier=? or bound-identifier=? to the given binding assert_eq!(varref.segments.len(),1); let varref_name = mtwt::resolve(varref.segments[0].identifier); let varref_marks = mtwt::marksof(varref.segments[0] .identifier .ctxt, invalid_name); if !(varref_name==binding_name) { println!("uh oh, should match but doesn't:"); print_hygiene_debug_info(); } assert_eq!(varref_name,binding_name); if bound_ident_check { // we're checking bound-identifier=?, and the marks // should be the same, too: assert_eq!(varref_marks,binding_marks.clone()); } } else { let varref_name = mtwt::resolve(varref.segments[0].identifier); let fail = (varref.segments.len() == 1) && (varref_name == binding_name); // temp debugging: if fail { println!("failure on test {}",test_idx); println!("text of test case: \"{}\"", teststr); println!(""); println!("uh oh, matches but shouldn't:"); print_hygiene_debug_info(); } assert!(!fail); } } } } #[test] fn fmt_in_macro_used_inside_module_macro() { let crate_str = "macro_rules! fmt_wrap(($b:expr)=>($b.to_string())) macro_rules! foo_module (() => (mod generated { fn a() { let xx = 147; fmt_wrap!(xx);}})) foo_module!() ".to_string(); let cr = expand_crate_str(crate_str); // find the xx binding let bindings = crate_bindings(&cr); let cxbinds: Vec<&ast::Ident> = bindings.iter().filter(|b| { let ident = token::get_ident(**b); let string = ident.get(); "xx" == string }).collect(); let cxbinds: &[&ast::Ident] = cxbinds.as_slice(); let cxbind = match cxbinds { [b] => b, _ => fail!("expected just one binding for ext_cx") }; let resolved_binding = mtwt::resolve(*cxbind); let varrefs = crate_varrefs(&cr); // the xx binding should bind all of the xx varrefs: for (idx,v) in varrefs.iter().filter(|p| { p.segments.len() == 1 && "xx" == token::get_ident(p.segments[0].identifier).get() }).enumerate() { if mtwt::resolve(v.segments[0].identifier) != resolved_binding { println!("uh oh, xx binding didn't match xx varref:"); println!("this is xx varref \\# {}", idx); println!("binding: {}", cxbind); println!("resolves to: {}", resolved_binding); println!("varref: {}", v.segments[0].identifier); println!("resolves to: {}", mtwt::resolve(v.segments[0].identifier)); mtwt::with_sctable(|x| mtwt::display_sctable(x)); } assert_eq!(mtwt::resolve(v.segments[0].identifier), resolved_binding); }; } #[test] fn pat_idents(){ let pat = string_to_pat( "(a,Foo{x:c @ (b,9),y:Bar(4,d)})".to_string()); let idents = pattern_bindings(&*pat); assert_eq!(idents, strs_to_idents(vec!("a","c","b","d"))); } // test the list of identifier patterns gathered by the visitor. Note that // 'None' is listed as an identifier pattern because we don't yet know that // it's the name of a 0-ary variant, and that 'i' appears twice in succession. #[test] fn crate_bindings_test(){ let the_crate = string_to_crate("fn main (a : int) -> int {|b| { match 34 {None => 3, Some(i) | i => j, Foo{k:z,l:y} => \"banana\"}} }".to_string()); let idents = crate_bindings(&the_crate); assert_eq!(idents, strs_to_idents(vec!("a","b","None","i","i","z","y"))); } // test the IdentRenamer directly #[test] fn ident_renamer_test () { let the_crate = string_to_crate("fn f(x : int){let x = x; x}".to_string()); let f_ident = token::str_to_ident("f"); let x_ident = token::str_to_ident("x"); let int_ident = token::str_to_ident("int"); let renames = vec!((x_ident,Name(16))); let mut renamer = IdentRenamer{renames: &renames}; let renamed_crate = renamer.fold_crate(the_crate); let idents = crate_idents(&renamed_crate); let resolved : Vec<ast::Name> = idents.iter().map(|id| mtwt::resolve(*id)).collect(); assert_eq!(resolved,vec!(f_ident.name,Name(16),int_ident.name,Name(16),Name(16),Name(16))); } // test the PatIdentRenamer; only PatIdents get renamed #[test] fn pat_ident_renamer_test () { let the_crate = string_to_crate("fn f(x : int){let x = x; x}".to_string()); let f_ident = token::str_to_ident("f"); let x_ident = token::str_to_ident("x"); let int_ident = token::str_to_ident("int"); let renames = vec!((x_ident,Name(16))); let mut renamer = PatIdentRenamer{renames: &renames}; let renamed_crate = renamer.fold_crate(the_crate); let idents = crate_idents(&renamed_crate); let resolved : Vec<ast::Name> = idents.iter().map(|id| mtwt::resolve(*id)).collect(); let x_name = x_ident.name; assert_eq!(resolved,vec!(f_ident.name,Name(16),int_ident.name,Name(16),x_name,x_name)); } }
39.411199
99
0.506595
872627b29cd74e364a39a960473ace585d1b7d14
1,417
use crate::postgres::database::Postgres; #[derive(Debug)] #[repr(u8)] pub enum TransactionStatus { /// Not in a transaction block. Idle = b'I', /// In a transaction block. Transaction = b'T', /// In a _failed_ transaction block. Queries will be rejected until block is ended. Error = b'E', } /// `ReadyForQuery` is sent whenever the database is ready for a new query cycle. #[derive(Debug)] pub struct ReadyForQuery { status: TransactionStatus, } impl ReadyForQuery { pub(crate) fn read(buf: &[u8]) -> crate::Result<Postgres, Self> { Ok(Self { status: match buf[0] { b'I' => TransactionStatus::Idle, b'T' => TransactionStatus::Transaction, b'E' => TransactionStatus::Error, status => { return Err(protocol_err!( "received {:?} for TransactionStatus in ReadyForQuery", status ) .into()); } }, }) } } #[cfg(test)] mod tests { use super::{ReadyForQuery, TransactionStatus}; use matches::assert_matches; const READY_FOR_QUERY: &[u8] = b"E"; #[test] fn it_decodes_ready_for_query() { let message = ReadyForQuery::read(READY_FOR_QUERY).unwrap(); assert_matches!(message.status, TransactionStatus::Error); } }
25.303571
87
0.557516
72d41938e758ec4612ab5961d7e84827026d2288
234
// Generated by `scripts/generate.js` pub type VkRayTracingShaderGroupType = super::super::khr::VkRayTracingShaderGroupType; #[doc(hidden)] pub type RawVkRayTracingShaderGroupType = super::super::khr::RawVkRayTracingShaderGroupType;
39
92
0.816239
48c86c260816f9536dc5bfad76dba9455f9c442f
10,184
// Generated from definition io.k8s.api.core.v1.SecurityContext /// SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence. #[derive(Clone, Debug, Default, PartialEq)] pub struct SecurityContext { /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN pub allow_privilege_escalation: Option<bool>, /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. pub capabilities: Option<crate::api::core::v1::Capabilities>, /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. pub privileged: Option<bool>, /// Whether this container has a read-only root filesystem. Default is false. pub read_only_root_filesystem: Option<bool>, /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. pub run_as_group: Option<i64>, /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. pub run_as_non_root: Option<bool>, /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. pub run_as_user: Option<i64>, /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. pub se_linux_options: Option<crate::api::core::v1::SELinuxOptions>, } impl<'de> crate::serde::Deserialize<'de> for SecurityContext { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_allow_privilege_escalation, Key_capabilities, Key_privileged, Key_read_only_root_filesystem, Key_run_as_group, Key_run_as_non_root, Key_run_as_user, Key_se_linux_options, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "allowPrivilegeEscalation" => Field::Key_allow_privilege_escalation, "capabilities" => Field::Key_capabilities, "privileged" => Field::Key_privileged, "readOnlyRootFilesystem" => Field::Key_read_only_root_filesystem, "runAsGroup" => Field::Key_run_as_group, "runAsNonRoot" => Field::Key_run_as_non_root, "runAsUser" => Field::Key_run_as_user, "seLinuxOptions" => Field::Key_se_linux_options, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = SecurityContext; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("SecurityContext") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_allow_privilege_escalation: Option<bool> = None; let mut value_capabilities: Option<crate::api::core::v1::Capabilities> = None; let mut value_privileged: Option<bool> = None; let mut value_read_only_root_filesystem: Option<bool> = None; let mut value_run_as_group: Option<i64> = None; let mut value_run_as_non_root: Option<bool> = None; let mut value_run_as_user: Option<i64> = None; let mut value_se_linux_options: Option<crate::api::core::v1::SELinuxOptions> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_allow_privilege_escalation => value_allow_privilege_escalation = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_capabilities => value_capabilities = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_privileged => value_privileged = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_read_only_root_filesystem => value_read_only_root_filesystem = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_run_as_group => value_run_as_group = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_run_as_non_root => value_run_as_non_root = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_run_as_user => value_run_as_user = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_se_linux_options => value_se_linux_options = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(SecurityContext { allow_privilege_escalation: value_allow_privilege_escalation, capabilities: value_capabilities, privileged: value_privileged, read_only_root_filesystem: value_read_only_root_filesystem, run_as_group: value_run_as_group, run_as_non_root: value_run_as_non_root, run_as_user: value_run_as_user, se_linux_options: value_se_linux_options, }) } } deserializer.deserialize_struct( "SecurityContext", &[ "allowPrivilegeEscalation", "capabilities", "privileged", "readOnlyRootFilesystem", "runAsGroup", "runAsNonRoot", "runAsUser", "seLinuxOptions", ], Visitor, ) } } impl crate::serde::Serialize for SecurityContext { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( "SecurityContext", self.allow_privilege_escalation.as_ref().map_or(0, |_| 1) + self.capabilities.as_ref().map_or(0, |_| 1) + self.privileged.as_ref().map_or(0, |_| 1) + self.read_only_root_filesystem.as_ref().map_or(0, |_| 1) + self.run_as_group.as_ref().map_or(0, |_| 1) + self.run_as_non_root.as_ref().map_or(0, |_| 1) + self.run_as_user.as_ref().map_or(0, |_| 1) + self.se_linux_options.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.allow_privilege_escalation { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "allowPrivilegeEscalation", value)?; } if let Some(value) = &self.capabilities { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "capabilities", value)?; } if let Some(value) = &self.privileged { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "privileged", value)?; } if let Some(value) = &self.read_only_root_filesystem { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "readOnlyRootFilesystem", value)?; } if let Some(value) = &self.run_as_group { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "runAsGroup", value)?; } if let Some(value) = &self.run_as_non_root { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "runAsNonRoot", value)?; } if let Some(value) = &self.run_as_user { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "runAsUser", value)?; } if let Some(value) = &self.se_linux_options { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "seLinuxOptions", value)?; } crate::serde::ser::SerializeStruct::end(state) } }
56.893855
421
0.618421
fb725dc19581e1da64fabba8475844dcfe108725
34
// fn write() {} // fn read() {}
8.5
16
0.382353
7aa175fd19ad69c0c00f5eab1a62c4021cb72055
1,250
//! Peripheral access API for STM32F7 microcontrollers //! (generated using [svd2rust](https://github.com/rust-embedded/svd2rust) //! 0.14.0) //! //! You can find an overview of the API here: //! [svd2rust/#peripheral-api](https://docs.rs/svd2rust/0.14.0/svd2rust/#peripheral-api) //! //! For more details see the README here: //! [stm32-rs](https://github.com/stm32-rs/stm32-rs) //! //! This crate supports all STM32F7 devices; for the complete list please //! see: //! [stm32f7](https://github.com/stm32-rs/stm32-rs/tree/master/stm32f7) //! //! Due to doc build limitations, not all devices may be shown on docs.rs; //! a representative few have been selected instead. For a complete list of //! available registers and fields see: [stm32-rs Device Coverage](https://stm32.agg.io/rs) #![allow(non_camel_case_types)] #![no_std] extern crate vcell; extern crate bare_metal; extern crate cortex_m; #[cfg(feature = "rt")] extern crate cortex_m_rt; #[cfg(feature = "stm32f7x2")] pub mod stm32f7x2; #[cfg(feature = "stm32f7x3")] pub mod stm32f7x3; #[cfg(feature = "stm32f7x5")] pub mod stm32f7x5; #[cfg(feature = "stm32f7x6")] pub mod stm32f7x6; #[cfg(feature = "stm32f7x7")] pub mod stm32f7x7; #[cfg(feature = "stm32f7x9")] pub mod stm32f7x9;
26.595745
91
0.7096
38741a1d4f8bd99df5a3c4a001ea9b14e0d0322f
8,995
/// this module manages reading and translating /// the arguments passed on launch of the application. use { crate::{ app::{App, AppContext}, conf::Conf, display, errors::{ProgramError, TreeBuildError}, launchable::Launchable, shell_install::{ShellInstall, ShellInstallState}, tree::TreeOptions, verb::VerbStore, }, clap::{self, ArgMatches}, crossterm::{ self, cursor, event::{DisableMouseCapture, EnableMouseCapture}, terminal::{EnterAlternateScreen, LeaveAlternateScreen}, QueueableCommand, }, std::{ env, io::{self, Write}, path::{Path, PathBuf}, str::FromStr, }, }; /// launch arguments related to installation /// (not used by the application after the first step) struct InstallLaunchArgs { install: Option<bool>, // installation is required set_install_state: Option<ShellInstallState>, // the state to set print_shell_function: Option<String>, // shell function to print on stdout } impl InstallLaunchArgs { fn from(cli_args: &ArgMatches<'_>) -> Result<Self, ProgramError> { let mut install = None; if let Ok(s) = env::var("BR_INSTALL") { if s == "yes" { install = Some(true); } else if s == "no" { install = Some(false); } else { warn!("Unexpected value of BR_INSTALL: {:?}", s); } } // the cli arguments may override the env var value if cli_args.is_present("install") { install = Some(true); } else if cli_args.value_of("cmd-export-path").is_some() { install = Some(false); } let print_shell_function = cli_args .value_of("print-shell-function") .map(str::to_string); let set_install_state = cli_args .value_of("set-install-state") .map(ShellInstallState::from_str) .transpose()?; Ok(Self { install, set_install_state, print_shell_function, }) } } /// the parsed program launch arguments which are kept for the /// life of the program pub struct AppLaunchArgs { pub root: PathBuf, // what should be the initial root pub file_export_path: Option<String>, // where to write the produced path (if required with --out) pub cmd_export_path: Option<String>, // where to write the produced command (if required with --outcmd) pub tree_options: TreeOptions, // initial tree options pub commands: Option<String>, // commands passed as cli argument, still unparsed pub height: Option<u16>, // an optional height to replace the screen's one pub no_style: bool, // whether to remove all styles (including colors) #[cfg(feature="client-server")] pub listen: Option<String>, } #[cfg(not(windows))] fn canonicalize_root(root: &Path) -> io::Result<PathBuf> { root.canonicalize() } #[cfg(windows)] fn canonicalize_root(root: &Path) -> io::Result<PathBuf> { Ok(if root.is_relative() { env::current_dir()?.join(root) } else { root.to_path_buf() }) } fn get_root_path(cli_args: &ArgMatches<'_>) -> Result<PathBuf, ProgramError> { let mut root = cli_args .value_of("ROOT") .map_or(env::current_dir()?, PathBuf::from); if !root.exists() { return Err(TreeBuildError::FileNotFound { path: format!("{:?}", &root), }.into()); } if !root.is_dir() { // we try to open the parent directory if the passed file isn't one if let Some(parent) = root.parent() { info!("Passed path isn't a directory => opening parent instead"); root = parent.to_path_buf(); } else { // let's give up return Err(TreeBuildError::NotADirectory { path: format!("{:?}", &root), }.into()); } } Ok(canonicalize_root(&root)?) } /// run the application, and maybe return a launchable /// which must be run after broot pub fn run() -> Result<Option<Launchable>, ProgramError> { let clap_app = crate::clap::clap_app(); // parse the launch arguments we got from cli let cli_matches = clap_app.get_matches(); // read the install related arguments let install_args = InstallLaunchArgs::from(&cli_matches)?; // execute installation things required by launch args let mut must_quit = false; if let Some(state) = install_args.set_install_state { state.write_file()?; must_quit = true; } if let Some(shell) = &install_args.print_shell_function { ShellInstall::print(shell)?; must_quit = true; } if must_quit { return Ok(None); } // read the list of specific config files let specific_conf: Option<Vec<PathBuf>> = cli_matches .value_of("conf") .map(|s| s.split(';').map(PathBuf::from).collect()); // if we don't run on a specific config file, we check the // configuration if specific_conf.is_none() && install_args.install != Some(false) { let mut shell_install = ShellInstall::new(install_args.install == Some(true)); shell_install.check()?; if shell_install.should_quit { return Ok(None); } } // read the configuration file(s): either the standard one // or the ones required by the launch args let mut config = match &specific_conf { Some(conf_paths) => { let mut conf = Conf::default(); for path in conf_paths { conf.read_file(path)?; } conf } _ => Conf::from_default_location()?, }; // tree options are built from the default_flags // found in the config file(s) (if any) then overriden // by the cli args let mut tree_options = TreeOptions::default(); if !config.default_flags.is_empty() { let clap_app = crate::clap::clap_app().setting(clap::AppSettings::NoBinaryName); let flags_args = format!("-{}", &config.default_flags); let conf_matches = clap_app.get_matches_from(vec![&flags_args]); tree_options.apply(&conf_matches); } tree_options.apply(&cli_matches); if let Some(format) = &config.date_time_format { tree_options.set_date_time_format(format.clone()); } // verb store is completed from the config file(s) let mut verb_store = VerbStore::default(); verb_store.init(&mut config); // reading the other arguments let file_export_path = cli_matches.value_of("file-export-path").map(str::to_string); let cmd_export_path = cli_matches.value_of("cmd-export-path").map(str::to_string); let commands = cli_matches.value_of("commands").map(str::to_string); let no_style = cli_matches.is_present("no-style"); let height = cli_matches.value_of("height").and_then(|s| s.parse().ok()); let root = get_root_path(&cli_matches)?; #[cfg(feature="client-server")] if let Some(server_name) = cli_matches.value_of("send") { use crate::{ command::Sequence, net::{Client, Message}, }; let client = Client::new(server_name); if let Some(seq) = &commands { let message = Message::Sequence(Sequence::new_local(seq.to_string())); client.send(&message)?; } else if !cli_matches.is_present("get-root") { let message = Message::Command(format!(":focus {}", root.to_string_lossy())); client.send(&message)?; }; if cli_matches.is_present("get-root") { client.send(&Message::GetRoot)?; } return Ok(None); } let launch_args = AppLaunchArgs { root, file_export_path, cmd_export_path, tree_options, commands, height, no_style, #[cfg(feature="client-server")] listen: cli_matches.value_of("listen").map(str::to_string), }; let context = AppContext::from(launch_args, verb_store, &config); let mut w = display::writer(); let app = App::new(&context)?; w.queue(EnterAlternateScreen)?; w.queue(cursor::DisableBlinking)?; w.queue(cursor::Hide)?; if !config.disable_mouse_capture { w.queue(EnableMouseCapture)?; } let r = app.run(&mut w, &context, &config); if !config.disable_mouse_capture { w.queue(DisableMouseCapture)?; } w.queue(cursor::Show)?; w.queue(cursor::EnableBlinking)?; w.queue(LeaveAlternateScreen)?; w.flush()?; r } /// wait for user input, return `true` if they didn't answer 'n' pub fn ask_authorization() -> Result<bool, ProgramError> { let mut answer = String::new(); io::stdin().read_line(&mut answer)?; let answer = answer.trim(); Ok(!matches!(answer, "n" | "N")) }
34.07197
108
0.601334
72b1895748f42179af0f19fa536bf883898f136c
3,268
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use crate::root_variables::InferVariablesVisitor; use common::NamedItem; use graphql_ir::{ FragmentDefinition, OperationDefinition, Program, ValidationError, ValidationMessage, ValidationResult, Validator, }; use interner::{Intern, StringKey}; pub fn validate_unused_variables(program: &Program) -> ValidationResult<()> { ValidateUnusedVariables::new(program).validate_program(program) } pub struct ValidateUnusedVariables<'program> { visitor: InferVariablesVisitor<'program>, ignore_directive_name: StringKey, } impl<'program> ValidateUnusedVariables<'program> { fn new(program: &'program Program) -> Self { Self { visitor: InferVariablesVisitor::new(program), ignore_directive_name: "DEPRECATED__relay_ignore_unused_variables_error".intern(), } } } /// Validates that there are no unused variables in the operation. /// former `graphql-js`` NoUnusedVariablesRule impl Validator for ValidateUnusedVariables<'_> { const NAME: &'static str = "ValidateUnusedVariables"; const VALIDATE_ARGUMENTS: bool = false; const VALIDATE_DIRECTIVES: bool = false; fn validate_operation(&mut self, operation: &OperationDefinition) -> ValidationResult<()> { let variables = self.visitor.infer_operation_variables(operation); let unused_variables: Vec<_> = operation .variable_definitions .iter() .filter(|var| !variables.contains_key(&var.name.item)) .collect(); let ignore_directive = operation.directives.named(self.ignore_directive_name); if !unused_variables.is_empty() && ignore_directive.is_none() { let is_plural = unused_variables.len() > 1; return Err(vec![ValidationError::new( ValidationMessage::UnusedVariables { operation_name: operation.name.item, variables_string: format!( "Variable{} '${}' {}", if is_plural { "s" } else { "" }, unused_variables .iter() .map(|var| var.name.item.lookup()) .collect::<Vec<_>>() .join("', '$"), if is_plural { "are" } else { "is" }, ), }, unused_variables .into_iter() .map(|var| var.name.location) .collect(), )]); } if unused_variables.is_empty() { if let Some(directive) = ignore_directive { return Err(vec![ValidationError::new( ValidationMessage::UnusedIgnoreUnusedVariablesDirective { operation_name: operation.name.item, }, vec![directive.name.location], )]); } } Ok(()) } fn validate_fragment(&mut self, _: &FragmentDefinition) -> ValidationResult<()> { Ok(()) } }
36.719101
95
0.578335
fcf978bbff9c0941c9d0557705dc5a8cb8a78ad3
4,900
#![forbid(unsafe_code)] // #![warn(clippy::cargo)] #![deny(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] mod app; mod components; mod keys; mod poll; mod queue; mod spinner; mod strings; mod tabs; mod ui; mod version; use crate::{app::App, poll::QueueEvent}; use asyncgit::AsyncNotification; use backtrace::Backtrace; use crossbeam_channel::{tick, unbounded, Receiver, Select}; use crossterm::{ terminal::{ disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen, }, ExecutableCommand, Result, }; use io::Write; use log::error; use scopeguard::defer; use scopetime::scope_time; use simplelog::{Config, LevelFilter, WriteLogger}; use spinner::Spinner; use std::{ env, fs, fs::File, io, panic, time::{Duration, Instant}, }; use tui::{ backend::{Backend, CrosstermBackend}, Terminal, }; static TICK_INTERVAL: Duration = Duration::from_secs(5); static SPINNER_INTERVAL: Duration = Duration::from_millis(50); fn main() -> Result<()> { setup_logging(); if invalid_path() { eprintln!("invalid git path"); return Ok(()); } enable_raw_mode()?; io::stdout().execute(EnterAlternateScreen)?; defer! { io::stdout().execute(LeaveAlternateScreen).unwrap(); disable_raw_mode().unwrap(); } set_panic_handlers(); let mut terminal = start_terminal(io::stdout())?; let (tx_git, rx_git) = unbounded(); let mut app = App::new(&tx_git); let rx_input = poll::start_polling_thread(); let ticker = tick(TICK_INTERVAL); let spinner_ticker = tick(SPINNER_INTERVAL); app.update(); draw(&mut terminal, &mut app)?; let mut spinner = Spinner::default(); loop { let events: Vec<QueueEvent> = select_event( &rx_input, &rx_git, &ticker, &spinner_ticker, ); { scope_time!("loop"); let mut needs_draw = true; for e in events { match e { QueueEvent::InputEvent(ev) => app.event(ev), QueueEvent::Tick => app.update(), QueueEvent::GitEvent(ev) => app.update_git(ev), QueueEvent::SpinnerUpdate => { needs_draw = false; spinner.update() } } } if needs_draw { draw(&mut terminal, &mut app)?; } spinner.draw(&mut terminal, app.any_work_pending())?; if app.is_quit() { break; } } } Ok(()) } fn draw<B: Backend>( terminal: &mut Terminal<B>, app: &mut App, ) -> io::Result<()> { terminal.draw(|mut f| app.draw(&mut f)) } fn invalid_path() -> bool { !asyncgit::is_repo(asyncgit::CWD) } fn select_event( rx_input: &Receiver<Vec<QueueEvent>>, rx_git: &Receiver<AsyncNotification>, rx_ticker: &Receiver<Instant>, rx_spinner: &Receiver<Instant>, ) -> Vec<QueueEvent> { let mut events: Vec<QueueEvent> = Vec::new(); let mut sel = Select::new(); sel.recv(rx_input); sel.recv(rx_git); sel.recv(rx_ticker); sel.recv(rx_spinner); let oper = sel.select(); let index = oper.index(); match index { 0 => oper.recv(rx_input).map(|inputs| events.extend(inputs)), 1 => oper .recv(rx_git) .map(|ev| events.push(QueueEvent::GitEvent(ev))), 2 => oper .recv(rx_ticker) .map(|_| events.push(QueueEvent::Tick)), 3 => oper .recv(rx_spinner) .map(|_| events.push(QueueEvent::SpinnerUpdate)), _ => panic!("unknown select source"), } .unwrap(); events } fn start_terminal<W: Write>( buf: W, ) -> io::Result<Terminal<CrosstermBackend<W>>> { let backend = CrosstermBackend::new(buf); let mut terminal = Terminal::new(backend)?; terminal.hide_cursor()?; terminal.clear()?; Ok(terminal) } fn setup_logging() { if env::var("GITUI_LOGGING").is_ok() { let mut path = dirs::cache_dir().unwrap(); path.push("gitui"); path.push("gitui.log"); fs::create_dir_all(path.parent().unwrap()).unwrap(); let _ = WriteLogger::init( LevelFilter::Trace, Config::default(), File::create(path).unwrap(), ); } } fn set_panic_handlers() { // regular panic handler panic::set_hook(Box::new(|e| { let backtrace = Backtrace::new(); error!("panic: {:?}\ntrace:\n{:?}", e, backtrace); })); // global threadpool rayon_core::ThreadPoolBuilder::new() .panic_handler(|e| { error!("thread panic: {:?}", e); panic!(e) }) .num_threads(4) .build_global() .unwrap(); }
23.333333
69
0.559592
f4632d03aebcfc2dfbb38496c6d775aac468a531
9,992
use crate::rule_prelude::*; use ast::*; use SyntaxKind::*; declare_lint! { /** Disallow for loops which update their counter in the wrong direction. A for loop with a counter may update its value in the wrong direction. that is to say, if i made a counter with a value of `0`, if the for statement checked if `counter < 10` and the update went `counter--`, that loop would be infinite. This is because `counter` will never be smaller than `10` because `counter--` always yields a value smaller than 10. A for loop which does this is almost always a bug because it is either unreachable or infinite. ## Incorrect Code Examples ```ignore for (var i = 0; i < 10; i--) { /* infinite loop */ } ``` ```ignore for (var i = 10; i >= 20; i++) { /* unreachable */ } ``` ## Correct Code Examples ```ignore for (var i = 0; i < 10; i++) { } ``` */ #[derive(Default)] ForDirection, errors, "for-direction" } #[typetag::serde] impl CstRule for ForDirection { fn check_node(&self, node: &SyntaxNode, ctx: &mut RuleCtx) -> Option<()> { if let Some(test) = node .try_to::<ForStmt>() .and_then(|f| f.test()) .and_then(|test| test.expr()) { let for_stmt = node.to::<ForStmt>(); if for_stmt.update().is_some() && test.syntax().try_to::<BinExpr>()?.lhs()?.syntax().kind() == NAME_REF { let test_bin = test.syntax().to::<BinExpr>(); if test_bin.rhs().is_none() || for_stmt.init().is_none() { return None; } let counter = test_bin.lhs().unwrap().syntax().to::<NameRef>(); let op = test_bin.op()?; let wrong_direction = if op == BinOp::LessThan || op == BinOp::LessThanOrEqual { -1 } else if op == BinOp::GreaterThan || op == BinOp::GreaterThanOrEqual { 1 } else { return None; }; if let Some(direction) = update_direction(&for_stmt, &counter) { if direction == wrong_direction { throw_err(for_stmt, &counter, ctx); } } } } None } } fn update_direction(for_stmt: &ForStmt, counter: &NameRef) -> Option<i8> { let update = for_stmt.update()?.syntax().first_child()?; match update.kind() { UNARY_EXPR => { let expr = update.to::<UnaryExpr>(); if expr.expr()?.syntax().try_to::<NameRef>()?.syntax().text() == counter.syntax().text() { let op = expr.op().unwrap(); Some(if op == UnaryOp::Increment { 1 } else { -1 }) } else { None } } ASSIGN_EXPR => assign_direction(update.to(), counter), _ => None, } } fn assign_direction(assign: AssignExpr, counter: &NameRef) -> Option<i8> { if assign.lhs()?.syntax().text() == counter.syntax().text() { match assign.op()? { AssignOp::AddAssign => maybe_negate_direction(assign.rhs()?, 1), AssignOp::SubtractAssign => maybe_negate_direction(assign.rhs()?, -1), _ => Some(0), } } else { None } } fn maybe_negate_direction(rhs: Expr, direction: i8) -> Option<i8> { Some(match rhs { Expr::UnaryExpr(unexpr) => { if unexpr.op()? == UnaryOp::Minus { -direction } else { direction } } Expr::NameRef(_) => 0, _ => direction, }) } // TODO: we can say if the loop is unreachable once we have number parsing fn throw_err(for_stmt: ForStmt, counter: &NameRef, ctx: &mut RuleCtx) { let bin = for_stmt .test() .unwrap() .syntax() .first_child() .unwrap() .to::<BinExpr>(); let lhs = bin.lhs().unwrap().syntax().trimmed_text(); let rhs = bin.rhs().unwrap().syntax().clone(); let op = bin.op().unwrap(); if let Some(lit) = rhs .try_to::<Literal>() .filter(|literal| literal.is_number()) { if try_offer_context(&for_stmt, counter, op, lit, ctx).is_some() { return; } } let err = ctx .err( "for-direction", "For loop is updating the counter in the wrong direction", ) .secondary( for_stmt.test().unwrap().range(), format!( "this test is checking if `{}` is {} `{}`...", lhs, lt_gt_name(op), rhs ), ) .primary( for_stmt.update().unwrap().range(), format!( "...but `{}` is updating in the same direction", for_stmt.update().unwrap().syntax().trimmed_text() ), ); ctx.add_err(err); } fn lt_gt_name(op: BinOp) -> &'static str { match op { BinOp::LessThan => "less than", BinOp::LessThanOrEqual => "less than or equal to", BinOp::GreaterThan => "greater than", BinOp::GreaterThanOrEqual => "greater than or equal to", _ => unreachable!(), } } /// try to offer even more context around the error if we know the initial numeric value of the counter fn try_offer_context( for_stmt: &ForStmt, counter: &NameRef, op: BinOp, checked_value: Literal, ctx: &mut RuleCtx, ) -> Option<()> { let init = for_stmt.init()?; let initial_value = match init.inner().unwrap() { ForHead::Decl(decl) => { let decl = decl.declared().find(|declarator| { declarator.pattern().map_or(false, |pat| { if let Pattern::SinglePattern(single) = pat { single.syntax().text() == counter.syntax().text() } else { false } }) })?; decl.value()? } ForHead::Expr(Expr::AssignExpr(assign)) => { assign.lhs().and_then(|lhs| { if let PatternOrExpr::Expr(Expr::NameRef(name)) = lhs { Some(name).filter(|name| name.syntax().text() == counter.syntax().text()) } else { None } })?; assign.rhs()? } _ => return None, }; let mut err = ctx.err( "for-direction", "For loop is updating the counter in the wrong direction", ); if let Some(LiteralKind::Number(num)) = initial_value .syntax() .try_to::<Literal>() .map(|lit| lit.kind()) { if is_initially_unreachable(num, checked_value.as_number().unwrap(), op) { err = err .secondary( init.syntax().trimmed_range(), format!( "{} is first declared as `{}`...", counter.syntax().text(), initial_value.syntax().text() ), ) .secondary( for_stmt.test().unwrap().range(), format!( "...which makes this test unreachable because `{}` is not {} `{}`...", initial_value.syntax().text(), lt_gt_name(op), checked_value.syntax().text() ), ) .primary( for_stmt.update().unwrap().range(), "...and this update will never make it true", ); } else { err = err .secondary( init.syntax(), format!( "{} is first declared as `{}`...", counter.syntax().text(), initial_value.syntax().text() ), ) .secondary( for_stmt.test().unwrap().range(), format!( "...which makes this test always true because `{}` is always {} `{}`...", initial_value.syntax().text(), lt_gt_name(op), checked_value.syntax().text() ), ) .primary( for_stmt.update().unwrap().range(), "...and this update will never make the condition false", ); } ctx.add_err(err); return Some(()); } None } fn is_initially_unreachable(initial_value: f64, checked_value: f64, op: BinOp) -> bool { match op { BinOp::LessThan => initial_value >= checked_value, BinOp::LessThanOrEqual => initial_value > checked_value, BinOp::GreaterThan => initial_value <= checked_value, BinOp::GreaterThanOrEqual => initial_value < checked_value, _ => unreachable!(), } } rule_tests! { ForDirection::default(), err: { "for (var i = 0; i < 10; i--) {}", "for(let i = 0; i < 2; i--) {}", "for(let i = 0; i <= 2; i += -1) {}", "for(let i = 2; i >= 0; i -= -1) {}", "for(let i = 0; i < 2; i -= 1) {}", "for(let i = 2; i > 2; i++) {}", "for(let i = 2; i > 2; i += 1) {}", "for(let i = 5n; i < 2; i--) {}" }, ok: { "for (var i = 0; i < 10; i++) {}", "for(let i = 2; i > 2; i -= 1) {}", "for(let i = 2; i >= 0; i -= 1) {}", "for(let i = 2; i > 2; i += -1) {}", "for(let i = 2; i >= 0; i += -1) {}", "for(let i = 0; i < 3;) {}", "for(let i = 5; i < 2; i |= 2) {}", "for(let i = 5n; i < 2n; i &= 2) {}" } }
31.720635
117
0.458867
1cebfaa3be992321bf5da4e1c5696ebb90f35fe4
4,373
use super::id_generation::TicketId; use super::recap::Status; /// `chrono` is the go-to crate in the Rust ecosystem when working with time. /// `DateTime` deals with timezone-aware datetimes - it takes the timezone as a type parameter. /// `DateTime<Utc>` is the type for datetimes expressed in the coordinated universal time. /// See: /// - https://en.wikipedia.org/wiki/Coordinated_Universal_Time /// - https://docs.rs/chrono/0.4.11/chrono/ use chrono::{DateTime, Utc}; use std::collections::HashMap; struct TicketStore { data: HashMap<TicketId, Ticket>, current_id: TicketId, } /// When we retrieve a ticket we saved, we'd like to receive with it a bunch of metadata: /// - the generated id; /// - the datetime of its creation. /// /// Make the necessary changes without touching the types of the inputs and the returned /// objects in our methods! /// You can make inputs mutable, if needed. impl TicketStore { pub fn new() -> TicketStore { TicketStore { data: HashMap::new(), current_id: 0, } } pub fn save(&mut self, mut ticket: Ticket) -> TicketId { let id = self.generate_id(); ticket.id = id; ticket.created_at = Some(Utc::now()); self.data.insert(id, ticket); id } pub fn get(&self, id: &TicketId) -> Option<&Ticket> { self.data.get(id) } fn generate_id(&mut self) -> TicketId { self.current_id += 1; self.current_id } } #[derive(Debug, Clone, PartialEq)] pub struct Ticket { title: String, description: String, status: Status, created_at: Option<DateTime<Utc>>, id: TicketId, } impl Ticket { pub fn title(&self) -> &String { &self.title } pub fn description(&self) -> &String { &self.description } pub fn status(&self) -> &Status { &self.status } // The datetime when the ticket was saved in the store, if it was saved. pub fn created_at(&self) -> Option<DateTime<Utc>> { self.created_at } // The id associated with the ticket when it was saved in the store, if it was saved. pub fn id(&self) -> Option<&TicketId> { if self.id == (0 as u32) { return None; } Some(&self.id) } } pub fn create_ticket(title: String, description: String, status: Status) -> Ticket { if title.is_empty() { panic!("Title cannot be empty!"); } if title.len() > 50 { panic!("A title cannot be longer than 50 characters!"); } if description.len() > 3000 { panic!("A description cannot be longer than 3000 characters!"); } Ticket { title, description, status, id: 0, created_at: None, } } #[cfg(test)] mod tests { use super::*; use fake::{Fake, Faker}; #[test] fn ticket_creation() { // Not saved to store, so no id, no created_at. let ticket = generate_ticket(Status::ToDo); assert!(ticket.id().is_none()); assert!(ticket.created_at().is_none()); } #[test] fn a_ticket_with_a_home() { let ticket = generate_ticket(Status::ToDo); let mut store = TicketStore::new(); let ticket_id = store.save(ticket.clone()); let retrieved_ticket = store.get(&ticket_id).unwrap(); assert_eq!(Some(&ticket_id), retrieved_ticket.id()); assert_eq!(&ticket.title, retrieved_ticket.title()); assert_eq!(&ticket.description, retrieved_ticket.description()); assert_eq!(&ticket.status, retrieved_ticket.status()); assert!(retrieved_ticket.created_at().is_some()); } #[test] fn a_missing_ticket() { let ticket_store = TicketStore::new(); let ticket_id = Faker.fake(); assert_eq!(ticket_store.get(&ticket_id), None); } #[test] fn id_generation_is_monotonic() { let n_tickets = 100; let mut store = TicketStore::new(); for expected_id in 1..n_tickets { let ticket = generate_ticket(Status::ToDo); let ticket_id = store.save(ticket); assert_eq!(expected_id, ticket_id); } } fn generate_ticket(status: Status) -> Ticket { let description = (0..3000).fake(); let title = (1..50).fake(); create_ticket(title, description, status) } }
26.993827
95
0.601418
e59b97922bea131e931791cbff303f0bdd21c185
1,090
#![feature(min_const_generics)] use std::mem::transmute; fn get_flag<const FlagSet: bool, const ShortName: char>() -> Option<char> { if FlagSet { Some(ShortName) } else { None } } union CharRaw { byte: u8, character: char, } union BoolRaw { byte: u8, boolean: bool, } const char_raw: CharRaw = CharRaw { byte: 0xFF }; const bool_raw: BoolRaw = BoolRaw { byte: 0x42 }; fn main() { // Test that basic cases don't work assert!(get_flag::<true, 'c'>().is_some()); assert!(get_flag::<false, 'x'>().is_none()); get_flag::<false, 0xFF>(); //~^ ERROR mismatched types get_flag::<7, 'c'>(); //~^ ERROR mismatched types get_flag::<42, 0x5ad>(); //~^ ERROR mismatched types //~| ERROR mismatched types get_flag::<false, { unsafe { char_raw.character } }>(); //~^ ERROR it is undefined behavior get_flag::<{ unsafe { bool_raw.boolean } }, 'z'>(); //~^ ERROR it is undefined behavior get_flag::<{ unsafe { bool_raw.boolean } }, { unsafe { char_raw.character } }>(); //~^ ERROR it is undefined behavior //~| ERROR it is undefined behavior }
23.695652
83
0.629358
e4d8ddc3c7d25cd19061972627a214820b9f259f
572
#![cfg(feature = "lightsail")] extern crate rusoto_core; extern crate rusoto_lightsail; use rusoto_lightsail::{Lightsail, LightsailClient, GetDomainsRequest}; use rusoto_core::{DefaultCredentialsProvider, Region, default_tls_client}; #[test] fn should_list_domains() { let credentials = DefaultCredentialsProvider::new().unwrap(); let client = LightsailClient::new(default_tls_client().unwrap(), credentials, Region::UsEast1); let request = GetDomainsRequest::default(); let result = client.get_domains(&request).unwrap(); println!("{:#?}", result); }
33.647059
99
0.746503
56b9173c5f2ca515484281186e15b8ed94a34b13
1,517
//! Resets authority on an auction account. use crate::{ errors::AuctionError, processor::{AuctionData, BASE_AUCTION_DATA_SIZE}, utils::assert_owned_by, PREFIX, }; use { borsh::{BorshDeserialize, BorshSerialize}, solana_program::{ account_info::{next_account_info, AccountInfo}, entrypoint::ProgramResult, msg, pubkey::Pubkey, }, }; pub fn set_authority(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { msg!("+ Processing SetAuthority"); let account_iter = &mut accounts.iter(); let auction_act = next_account_info(account_iter)?; let current_authority = next_account_info(account_iter)?; let new_authority = next_account_info(account_iter)?; let mut auction = AuctionData::from_account_info(auction_act)?; assert_owned_by(auction_act, program_id)?; if auction.authority != *current_authority.key { return Err(AuctionError::InvalidAuthority.into()); } if !current_authority.is_signer { return Err(AuctionError::InvalidAuthority.into()); } // Make sure new authority actually exists in some form. if new_authority.data_is_empty() || new_authority.lamports() == 0 { msg!("Disallowing new authority because it does not exist."); return Err(AuctionError::InvalidAuthority.into()); } auction.authority = *new_authority.key; auction.serialize(&mut *auction_act.data.borrow_mut())?; Ok(()) }
31.604167
87
0.666447
0ade8d7a75a44467e67739b3a3b472232a78358f
10,078
//! The `Poh` module provides an object for generating a Proof of History. use log::*; use solana_sdk::hash::{hash, hashv, Hash}; use std::time::{Duration, Instant}; pub struct Poh { pub hash: Hash, num_hashes: u64, hashes_per_tick: u64, remaining_hashes: u64, ticks_per_slot: u64, tick_number: u64, slot_start_time: Instant, } #[derive(Debug)] pub struct PohEntry { pub num_hashes: u64, pub hash: Hash, } impl Poh { pub fn new(hash: Hash, hashes_per_tick: Option<u64>) -> Self { Self::new_with_slot_info(hash, hashes_per_tick, 0, 0) } pub fn new_with_slot_info( hash: Hash, hashes_per_tick: Option<u64>, ticks_per_slot: u64, tick_number: u64, ) -> Self { let hashes_per_tick = hashes_per_tick.unwrap_or(std::u64::MAX); assert!(hashes_per_tick > 1); let now = Instant::now(); Poh { hash, num_hashes: 0, hashes_per_tick, remaining_hashes: hashes_per_tick, ticks_per_slot, tick_number, slot_start_time: now, } } pub fn reset(&mut self, hash: Hash, hashes_per_tick: Option<u64>) { // retains ticks_per_slot: this cannot change without restarting the validator let tick_number = 0; let mut poh = Poh::new_with_slot_info(hash, hashes_per_tick, self.ticks_per_slot, tick_number); std::mem::swap(&mut poh, self); } pub fn target_poh_time(&self, target_ns_per_tick: u64) -> Instant { assert!(self.hashes_per_tick > 0); let offset_tick_ns = target_ns_per_tick * self.tick_number; let offset_ns = target_ns_per_tick * self.num_hashes / self.hashes_per_tick; self.slot_start_time + Duration::from_nanos(offset_ns + offset_tick_ns) } pub fn hash(&mut self, max_num_hashes: u64) -> bool { let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes); for _ in 0..num_hashes { self.hash = hash(&self.hash.as_ref()); } self.num_hashes += num_hashes; self.remaining_hashes -= num_hashes; assert!(self.remaining_hashes > 0); self.remaining_hashes == 1 // Return `true` if caller needs to `tick()` next } pub fn record(&mut self, mixin: Hash) -> Option<PohEntry> { if self.remaining_hashes == 1 { return None; // Caller needs to `tick()` first } self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]); let num_hashes = self.num_hashes + 1; self.num_hashes = 0; self.remaining_hashes -= 1; Some(PohEntry { num_hashes, hash: self.hash, }) } pub fn tick(&mut self) -> Option<PohEntry> { self.hash = hash(&self.hash.as_ref()); self.num_hashes += 1; self.remaining_hashes -= 1; // If the hashes_per_tick is variable (std::u64::MAX) then always generate a tick. // Otherwise only tick if there are no remaining hashes if self.hashes_per_tick < std::u64::MAX && self.remaining_hashes != 0 { return None; } let num_hashes = self.num_hashes; self.remaining_hashes = self.hashes_per_tick; self.num_hashes = 0; self.tick_number += 1; Some(PohEntry { num_hashes, hash: self.hash, }) } } pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 { info!("Running {} hashes...", hashes_sample_size); let mut v = Hash::default(); let start = Instant::now(); for _ in 0..hashes_sample_size { v = hash(&v.as_ref()); } start.elapsed().as_nanos() as u64 } pub fn compute_hashes_per_tick(duration: Duration, hashes_sample_size: u64) -> u64 { let elapsed = compute_hash_time_ns(hashes_sample_size) / (1000 * 1000); duration.as_millis() as u64 * hashes_sample_size / elapsed } #[cfg(test)] mod tests { use crate::poh::{Poh, PohEntry}; use matches::assert_matches; use solana_sdk::hash::{hash, hashv, Hash}; use std::time::Duration; fn verify(initial_hash: Hash, entries: &[(PohEntry, Option<Hash>)]) -> bool { let mut current_hash = initial_hash; for (entry, mixin) in entries { assert_ne!(entry.num_hashes, 0); for _ in 1..entry.num_hashes { current_hash = hash(&current_hash.as_ref()); } current_hash = match mixin { Some(mixin) => hashv(&[&current_hash.as_ref(), &mixin.as_ref()]), None => hash(&current_hash.as_ref()), }; if current_hash != entry.hash { return false; } } true } #[test] fn test_target_poh_time() { let zero = Hash::default(); for target_ns_per_tick in 10..12 { let mut poh = Poh::new(zero, None); assert_eq!(poh.target_poh_time(target_ns_per_tick), poh.slot_start_time); poh.tick_number = 2; assert_eq!( poh.target_poh_time(target_ns_per_tick), poh.slot_start_time + Duration::from_nanos(target_ns_per_tick * 2) ); let mut poh = Poh::new(zero, Some(5)); assert_eq!(poh.target_poh_time(target_ns_per_tick), poh.slot_start_time); poh.tick_number = 2; assert_eq!( poh.target_poh_time(target_ns_per_tick), poh.slot_start_time + Duration::from_nanos(target_ns_per_tick * 2) ); poh.num_hashes = 3; assert_eq!( poh.target_poh_time(target_ns_per_tick), poh.slot_start_time + Duration::from_nanos(target_ns_per_tick * 2 + target_ns_per_tick * 3 / 5) ); } } #[test] #[should_panic(expected = "assertion failed: hashes_per_tick > 1")] fn test_target_poh_time_hashes_per_tick() { let zero = Hash::default(); let poh = Poh::new(zero, Some(0)); let target_ns_per_tick = 10; poh.target_poh_time(target_ns_per_tick); } #[test] fn test_poh_verify() { let zero = Hash::default(); let one = hash(&zero.as_ref()); let two = hash(&one.as_ref()); let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]); let mut poh = Poh::new(zero, None); assert!(verify( zero, &[ (poh.tick().unwrap(), None), (poh.record(zero).unwrap(), Some(zero)), (poh.record(zero).unwrap(), Some(zero)), (poh.tick().unwrap(), None), ], )); assert!(verify( zero, &[( PohEntry { num_hashes: 1, hash: one, }, None )], )); assert!(verify( zero, &[( PohEntry { num_hashes: 2, hash: two, }, None )] )); assert!(verify( zero, &[( PohEntry { num_hashes: 1, hash: one_with_zero, }, Some(zero) )] )); assert!(!verify( zero, &[( PohEntry { num_hashes: 1, hash: zero, }, None )] )); assert!(verify( zero, &[ ( PohEntry { num_hashes: 1, hash: one_with_zero, }, Some(zero) ), ( PohEntry { num_hashes: 1, hash: hash(&one_with_zero.as_ref()), }, None ) ] )); } #[test] #[should_panic] fn test_poh_verify_assert() { verify( Hash::default(), &[( PohEntry { num_hashes: 0, hash: Hash::default(), }, None, )], ); } #[test] fn test_poh_tick() { let mut poh = Poh::new(Hash::default(), Some(2)); assert_eq!(poh.remaining_hashes, 2); assert!(poh.tick().is_none()); assert_eq!(poh.remaining_hashes, 1); assert_matches!(poh.tick(), Some(PohEntry { num_hashes: 2, .. })); assert_eq!(poh.remaining_hashes, 2); // Ready for the next tick } #[test] fn test_poh_tick_large_batch() { let mut poh = Poh::new(Hash::default(), Some(2)); assert_eq!(poh.remaining_hashes, 2); assert!(poh.hash(1_000_000)); // Stop hashing before the next tick assert_eq!(poh.remaining_hashes, 1); assert!(poh.hash(1_000_000)); // Does nothing... assert_eq!(poh.remaining_hashes, 1); poh.tick(); assert_eq!(poh.remaining_hashes, 2); // Ready for the next tick } #[test] fn test_poh_tick_too_soon() { let mut poh = Poh::new(Hash::default(), Some(2)); assert_eq!(poh.remaining_hashes, 2); assert!(poh.tick().is_none()); } #[test] fn test_poh_record_not_permitted_at_final_hash() { let mut poh = Poh::new(Hash::default(), Some(10)); assert!(poh.hash(9)); assert_eq!(poh.remaining_hashes, 1); assert!(poh.record(Hash::default()).is_none()); // <-- record() rejected to avoid exceeding hashes_per_tick assert_matches!(poh.tick(), Some(PohEntry { num_hashes: 10, .. })); assert_matches!( poh.record(Hash::default()), Some(PohEntry { num_hashes: 1, .. }) // <-- record() ok ); assert_eq!(poh.remaining_hashes, 9); } }
30.44713
115
0.51925
fcfde5136dd35776705581e1b45b5cea71597347
3,754
//! Tool that generates interval reports from profiling data. //! //! # Usage //! //! The tool reads a //! [JSON-formatted event log](https://github.com/enso-org/design/blob/main/epics/profiling/implementation.md#file-format) //! from stdin, and writes a report to stdout. //! //! For example: //! //! ```console //! ~/git/enso/data $ cargo run --bin intervals < profile.json | less //! ``` // === Features === #![feature(test)] // === Standard Linter Configuration === #![deny(non_ascii_idents)] #![warn(unsafe_code)] // === Non-Standard Linter Configuration === #![deny(unconditional_recursion)] #![warn(missing_copy_implementations)] #![warn(missing_debug_implementations)] #![warn(missing_docs)] #![warn(trivial_casts)] #![warn(trivial_numeric_casts)] #![warn(unused_import_braces)] use enso_prelude::*; use enso_profiler::format::AnyMetadata; use enso_profiler_data as data; use std::collections; // ============ // === main === // ============ fn main() { use std::io::Read; let mut log = String::new(); std::io::stdin().read_to_string(&mut log).unwrap(); let profile: data::Profile<AnyMetadata> = log.parse().unwrap(); let mut aggregator = data::aggregate::Aggregator::default(); aggregator.add_profile(&profile); let root = data::aggregate::Frame::from(aggregator); let funcs = FuncCollector::run(&root); let kv_to_func = |(label, timings)| Func { label, timings }; let mut funcs: Vec<_> = funcs.into_iter().map(kv_to_func).collect(); funcs.sort_unstable_by(|a, b| a.timings.self_duration.total_cmp(&b.timings.self_duration)); println!("self_duration total_duration count profiler"); for Func { label, timings } in funcs.iter().rev() { let FuncTimings { total_duration, self_duration, count } = timings; println!("{:>6.1} {:>6.1} {} {}", self_duration, total_duration, count, label); } let mut total_duration = 0.0; for Func { timings, .. } in funcs.iter() { total_duration += timings.self_duration; } println!("0.0 {:>6.1} 1 (total_self_duration)", total_duration); } // ===================== // === FuncCollector === // ===================== /// Aggregates all intervals created by a particular profiler, abstracting away where in the stack /// it occurs. #[derive(Default)] struct FuncCollector { funcs: HashMap<Label, FuncTimings>, } impl FuncCollector { /// Aggregate all intervals created by a particular profiler. fn run(root: &data::aggregate::Frame) -> collections::HashMap<Label, FuncTimings> { let mut collector = FuncCollector::default(); for (label, frame) in &root.children { collector.visit(label, frame); } let FuncCollector { funcs, .. } = collector; funcs } } impl FuncCollector { /// Add time spent in an interval to the running sums; recurse into children. fn visit(&mut self, label: &Label, frame: &data::aggregate::Frame) { let func = self.funcs.entry(label.clone()).or_default(); func.self_duration += frame.self_duration(); func.total_duration += frame.total_duration(); func.count += frame.interval_count(); for (label, frame) in &frame.children { self.visit(label, frame); } } } type Label = ImString; // =================== // === FuncTimings === // =================== /// Aggregate of all time spent in a particular profiler's intervals. #[derive(Default)] struct FuncTimings { total_duration: f64, self_duration: f64, count: usize, } // ============ // === Func === // ============ /// Identifies a profiler, and contains information about the time spent in its intervals. struct Func { label: Label, timings: FuncTimings, }
28.656489
122
0.628396
89cbbe8d4775c3d46340605f016dd0c2df3428ab
2,768
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::ops; use enumflags2::bitflags; use enumflags2::make_bitflags; use enumflags2::BitFlags; #[bitflags] #[repr(u64)] #[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug, Eq, PartialEq)] pub enum UserPrivilegeType { // UsagePrivilege is a synonym for “no privileges” Usage = 1 << 0, // Privilege to create databases and tables. Create = 1 << 1, // Privilege to select rows from tables in a database. Select = 1 << 2, // Privilege to insert into tables in a database. Insert = 1 << 3, // Privilege to SET variables. Set = 1 << 4, } const ALL_PRIVILEGES: BitFlags<UserPrivilegeType> = make_bitflags!( UserPrivilegeType::{Create | Select | Insert | Set} ); #[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Default, Debug, Eq, PartialEq)] pub struct UserPrivilege { privileges: BitFlags<UserPrivilegeType>, } impl UserPrivilege { pub fn empty() -> Self { UserPrivilege { privileges: BitFlags::empty(), } } pub fn all_privileges() -> Self { ALL_PRIVILEGES.into() } pub fn set_privilege(&mut self, privilege: UserPrivilegeType) { self.privileges |= privilege; } pub fn has_privilege(&self, privilege: UserPrivilegeType) -> bool { self.privileges.contains(privilege) } pub fn set_all_privileges(&mut self) { self.privileges |= ALL_PRIVILEGES; } } impl ops::BitOr for UserPrivilege { type Output = Self; #[inline(always)] fn bitor(self, other: Self) -> Self { Self { privileges: self.privileges | other.privileges, } } } impl ops::BitOrAssign for UserPrivilege { #[inline(always)] fn bitor_assign(&mut self, other: Self) { self.privileges |= other.privileges } } impl From<UserPrivilege> for BitFlags<UserPrivilegeType> { fn from(privilege: UserPrivilege) -> BitFlags<UserPrivilegeType> { privilege.privileges } } impl From<BitFlags<UserPrivilegeType>> for UserPrivilege { fn from(privileges: BitFlags<UserPrivilegeType>) -> UserPrivilege { UserPrivilege { privileges } } }
27.405941
91
0.667269
26445bf61408d9a62d63fa53052c1c94cd8bfa59
585
use std::path::PathBuf; use std::collections::BTreeMap; use serde::{Serialize, Deserialize}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct FontMetadata { pub atlas_path: PathBuf, pub atlas_width: u32, pub atlas_height: u32, pub line_height: u32, pub glyphs: BTreeMap<String, GlyphMetadata>, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GlyphMetadata { pub x: u32, pub y: u32, pub width: u32, pub height: u32, pub hori_bearing_x: i32, pub hori_bearing_y: i32, pub advance_x: i32, pub advance_y: i32, }
22.5
48
0.683761