hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
4a7c33ba56a6c11f31cfad661cab78c2655aea28
| 25,272 |
#[doc = "Reader of register TAMPCTRL"]
pub type R = crate::R<u32, super::TAMPCTRL>;
#[doc = "Writer for register TAMPCTRL"]
pub type W = crate::W<u32, super::TAMPCTRL>;
#[doc = "Register TAMPCTRL `reset()`'s with value 0"]
impl crate::ResetValue for super::TAMPCTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Tamper Input 0 Action\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IN0ACT_A {
#[doc = "0: Off (Disabled)"]
OFF,
#[doc = "1: Wake without timestamp"]
WAKE,
#[doc = "2: Capture timestamp"]
CAPTURE,
#[doc = "3: Compare IN0 to OUT"]
ACTL,
}
impl From<IN0ACT_A> for u8 {
#[inline(always)]
fn from(variant: IN0ACT_A) -> Self {
match variant {
IN0ACT_A::OFF => 0,
IN0ACT_A::WAKE => 1,
IN0ACT_A::CAPTURE => 2,
IN0ACT_A::ACTL => 3,
}
}
}
#[doc = "Reader of field `IN0ACT`"]
pub type IN0ACT_R = crate::R<u8, IN0ACT_A>;
impl IN0ACT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IN0ACT_A {
match self.bits {
0 => IN0ACT_A::OFF,
1 => IN0ACT_A::WAKE,
2 => IN0ACT_A::CAPTURE,
3 => IN0ACT_A::ACTL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `OFF`"]
#[inline(always)]
pub fn is_off(&self) -> bool {
*self == IN0ACT_A::OFF
}
#[doc = "Checks if the value of the field is `WAKE`"]
#[inline(always)]
pub fn is_wake(&self) -> bool {
*self == IN0ACT_A::WAKE
}
#[doc = "Checks if the value of the field is `CAPTURE`"]
#[inline(always)]
pub fn is_capture(&self) -> bool {
*self == IN0ACT_A::CAPTURE
}
#[doc = "Checks if the value of the field is `ACTL`"]
#[inline(always)]
pub fn is_actl(&self) -> bool {
*self == IN0ACT_A::ACTL
}
}
#[doc = "Write proxy for field `IN0ACT`"]
pub struct IN0ACT_W<'a> {
w: &'a mut W,
}
impl<'a> IN0ACT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IN0ACT_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Off (Disabled)"]
#[inline(always)]
pub fn off(self) -> &'a mut W {
self.variant(IN0ACT_A::OFF)
}
#[doc = "Wake without timestamp"]
#[inline(always)]
pub fn wake(self) -> &'a mut W {
self.variant(IN0ACT_A::WAKE)
}
#[doc = "Capture timestamp"]
#[inline(always)]
pub fn capture(self) -> &'a mut W {
self.variant(IN0ACT_A::CAPTURE)
}
#[doc = "Compare IN0 to OUT"]
#[inline(always)]
pub fn actl(self) -> &'a mut W {
self.variant(IN0ACT_A::ACTL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03);
self.w
}
}
#[doc = "Tamper Input 1 Action\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IN1ACT_A {
#[doc = "0: Off (Disabled)"]
OFF,
#[doc = "1: Wake without timestamp"]
WAKE,
#[doc = "2: Capture timestamp"]
CAPTURE,
#[doc = "3: Compare IN1 to OUT"]
ACTL,
}
impl From<IN1ACT_A> for u8 {
#[inline(always)]
fn from(variant: IN1ACT_A) -> Self {
match variant {
IN1ACT_A::OFF => 0,
IN1ACT_A::WAKE => 1,
IN1ACT_A::CAPTURE => 2,
IN1ACT_A::ACTL => 3,
}
}
}
#[doc = "Reader of field `IN1ACT`"]
pub type IN1ACT_R = crate::R<u8, IN1ACT_A>;
impl IN1ACT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IN1ACT_A {
match self.bits {
0 => IN1ACT_A::OFF,
1 => IN1ACT_A::WAKE,
2 => IN1ACT_A::CAPTURE,
3 => IN1ACT_A::ACTL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `OFF`"]
#[inline(always)]
pub fn is_off(&self) -> bool {
*self == IN1ACT_A::OFF
}
#[doc = "Checks if the value of the field is `WAKE`"]
#[inline(always)]
pub fn is_wake(&self) -> bool {
*self == IN1ACT_A::WAKE
}
#[doc = "Checks if the value of the field is `CAPTURE`"]
#[inline(always)]
pub fn is_capture(&self) -> bool {
*self == IN1ACT_A::CAPTURE
}
#[doc = "Checks if the value of the field is `ACTL`"]
#[inline(always)]
pub fn is_actl(&self) -> bool {
*self == IN1ACT_A::ACTL
}
}
#[doc = "Write proxy for field `IN1ACT`"]
pub struct IN1ACT_W<'a> {
w: &'a mut W,
}
impl<'a> IN1ACT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IN1ACT_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Off (Disabled)"]
#[inline(always)]
pub fn off(self) -> &'a mut W {
self.variant(IN1ACT_A::OFF)
}
#[doc = "Wake without timestamp"]
#[inline(always)]
pub fn wake(self) -> &'a mut W {
self.variant(IN1ACT_A::WAKE)
}
#[doc = "Capture timestamp"]
#[inline(always)]
pub fn capture(self) -> &'a mut W {
self.variant(IN1ACT_A::CAPTURE)
}
#[doc = "Compare IN1 to OUT"]
#[inline(always)]
pub fn actl(self) -> &'a mut W {
self.variant(IN1ACT_A::ACTL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | (((value as u32) & 0x03) << 2);
self.w
}
}
#[doc = "Tamper Input 2 Action\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IN2ACT_A {
#[doc = "0: Off (Disabled)"]
OFF,
#[doc = "1: Wake without timestamp"]
WAKE,
#[doc = "2: Capture timestamp"]
CAPTURE,
#[doc = "3: Compare IN2 to OUT"]
ACTL,
}
impl From<IN2ACT_A> for u8 {
#[inline(always)]
fn from(variant: IN2ACT_A) -> Self {
match variant {
IN2ACT_A::OFF => 0,
IN2ACT_A::WAKE => 1,
IN2ACT_A::CAPTURE => 2,
IN2ACT_A::ACTL => 3,
}
}
}
#[doc = "Reader of field `IN2ACT`"]
pub type IN2ACT_R = crate::R<u8, IN2ACT_A>;
impl IN2ACT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IN2ACT_A {
match self.bits {
0 => IN2ACT_A::OFF,
1 => IN2ACT_A::WAKE,
2 => IN2ACT_A::CAPTURE,
3 => IN2ACT_A::ACTL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `OFF`"]
#[inline(always)]
pub fn is_off(&self) -> bool {
*self == IN2ACT_A::OFF
}
#[doc = "Checks if the value of the field is `WAKE`"]
#[inline(always)]
pub fn is_wake(&self) -> bool {
*self == IN2ACT_A::WAKE
}
#[doc = "Checks if the value of the field is `CAPTURE`"]
#[inline(always)]
pub fn is_capture(&self) -> bool {
*self == IN2ACT_A::CAPTURE
}
#[doc = "Checks if the value of the field is `ACTL`"]
#[inline(always)]
pub fn is_actl(&self) -> bool {
*self == IN2ACT_A::ACTL
}
}
#[doc = "Write proxy for field `IN2ACT`"]
pub struct IN2ACT_W<'a> {
w: &'a mut W,
}
impl<'a> IN2ACT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IN2ACT_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Off (Disabled)"]
#[inline(always)]
pub fn off(self) -> &'a mut W {
self.variant(IN2ACT_A::OFF)
}
#[doc = "Wake without timestamp"]
#[inline(always)]
pub fn wake(self) -> &'a mut W {
self.variant(IN2ACT_A::WAKE)
}
#[doc = "Capture timestamp"]
#[inline(always)]
pub fn capture(self) -> &'a mut W {
self.variant(IN2ACT_A::CAPTURE)
}
#[doc = "Compare IN2 to OUT"]
#[inline(always)]
pub fn actl(self) -> &'a mut W {
self.variant(IN2ACT_A::ACTL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 4)) | (((value as u32) & 0x03) << 4);
self.w
}
}
#[doc = "Tamper Input 3 Action\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IN3ACT_A {
#[doc = "0: Off (Disabled)"]
OFF,
#[doc = "1: Wake without timestamp"]
WAKE,
#[doc = "2: Capture timestamp"]
CAPTURE,
#[doc = "3: Compare IN3 to OUT"]
ACTL,
}
impl From<IN3ACT_A> for u8 {
#[inline(always)]
fn from(variant: IN3ACT_A) -> Self {
match variant {
IN3ACT_A::OFF => 0,
IN3ACT_A::WAKE => 1,
IN3ACT_A::CAPTURE => 2,
IN3ACT_A::ACTL => 3,
}
}
}
#[doc = "Reader of field `IN3ACT`"]
pub type IN3ACT_R = crate::R<u8, IN3ACT_A>;
impl IN3ACT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IN3ACT_A {
match self.bits {
0 => IN3ACT_A::OFF,
1 => IN3ACT_A::WAKE,
2 => IN3ACT_A::CAPTURE,
3 => IN3ACT_A::ACTL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `OFF`"]
#[inline(always)]
pub fn is_off(&self) -> bool {
*self == IN3ACT_A::OFF
}
#[doc = "Checks if the value of the field is `WAKE`"]
#[inline(always)]
pub fn is_wake(&self) -> bool {
*self == IN3ACT_A::WAKE
}
#[doc = "Checks if the value of the field is `CAPTURE`"]
#[inline(always)]
pub fn is_capture(&self) -> bool {
*self == IN3ACT_A::CAPTURE
}
#[doc = "Checks if the value of the field is `ACTL`"]
#[inline(always)]
pub fn is_actl(&self) -> bool {
*self == IN3ACT_A::ACTL
}
}
#[doc = "Write proxy for field `IN3ACT`"]
pub struct IN3ACT_W<'a> {
w: &'a mut W,
}
impl<'a> IN3ACT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IN3ACT_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Off (Disabled)"]
#[inline(always)]
pub fn off(self) -> &'a mut W {
self.variant(IN3ACT_A::OFF)
}
#[doc = "Wake without timestamp"]
#[inline(always)]
pub fn wake(self) -> &'a mut W {
self.variant(IN3ACT_A::WAKE)
}
#[doc = "Capture timestamp"]
#[inline(always)]
pub fn capture(self) -> &'a mut W {
self.variant(IN3ACT_A::CAPTURE)
}
#[doc = "Compare IN3 to OUT"]
#[inline(always)]
pub fn actl(self) -> &'a mut W {
self.variant(IN3ACT_A::ACTL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u32) & 0x03) << 6);
self.w
}
}
#[doc = "Tamper Input 4 Action\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IN4ACT_A {
#[doc = "0: Off (Disabled)"]
OFF,
#[doc = "1: Wake without timestamp"]
WAKE,
#[doc = "2: Capture timestamp"]
CAPTURE,
#[doc = "3: Compare IN4 to OUT"]
ACTL,
}
impl From<IN4ACT_A> for u8 {
#[inline(always)]
fn from(variant: IN4ACT_A) -> Self {
match variant {
IN4ACT_A::OFF => 0,
IN4ACT_A::WAKE => 1,
IN4ACT_A::CAPTURE => 2,
IN4ACT_A::ACTL => 3,
}
}
}
#[doc = "Reader of field `IN4ACT`"]
pub type IN4ACT_R = crate::R<u8, IN4ACT_A>;
impl IN4ACT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IN4ACT_A {
match self.bits {
0 => IN4ACT_A::OFF,
1 => IN4ACT_A::WAKE,
2 => IN4ACT_A::CAPTURE,
3 => IN4ACT_A::ACTL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `OFF`"]
#[inline(always)]
pub fn is_off(&self) -> bool {
*self == IN4ACT_A::OFF
}
#[doc = "Checks if the value of the field is `WAKE`"]
#[inline(always)]
pub fn is_wake(&self) -> bool {
*self == IN4ACT_A::WAKE
}
#[doc = "Checks if the value of the field is `CAPTURE`"]
#[inline(always)]
pub fn is_capture(&self) -> bool {
*self == IN4ACT_A::CAPTURE
}
#[doc = "Checks if the value of the field is `ACTL`"]
#[inline(always)]
pub fn is_actl(&self) -> bool {
*self == IN4ACT_A::ACTL
}
}
#[doc = "Write proxy for field `IN4ACT`"]
pub struct IN4ACT_W<'a> {
w: &'a mut W,
}
impl<'a> IN4ACT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IN4ACT_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Off (Disabled)"]
#[inline(always)]
pub fn off(self) -> &'a mut W {
self.variant(IN4ACT_A::OFF)
}
#[doc = "Wake without timestamp"]
#[inline(always)]
pub fn wake(self) -> &'a mut W {
self.variant(IN4ACT_A::WAKE)
}
#[doc = "Capture timestamp"]
#[inline(always)]
pub fn capture(self) -> &'a mut W {
self.variant(IN4ACT_A::CAPTURE)
}
#[doc = "Compare IN4 to OUT"]
#[inline(always)]
pub fn actl(self) -> &'a mut W {
self.variant(IN4ACT_A::ACTL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8);
self.w
}
}
#[doc = "Reader of field `TAMLVL0`"]
pub type TAMLVL0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAMLVL0`"]
pub struct TAMLVL0_W<'a> {
w: &'a mut W,
}
impl<'a> TAMLVL0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `TAMLVL1`"]
pub type TAMLVL1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAMLVL1`"]
pub struct TAMLVL1_W<'a> {
w: &'a mut W,
}
impl<'a> TAMLVL1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `TAMLVL2`"]
pub type TAMLVL2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAMLVL2`"]
pub struct TAMLVL2_W<'a> {
w: &'a mut W,
}
impl<'a> TAMLVL2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `TAMLVL3`"]
pub type TAMLVL3_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAMLVL3`"]
pub struct TAMLVL3_W<'a> {
w: &'a mut W,
}
impl<'a> TAMLVL3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `TAMLVL4`"]
pub type TAMLVL4_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAMLVL4`"]
pub struct TAMLVL4_W<'a> {
w: &'a mut W,
}
impl<'a> TAMLVL4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `DEBNC0`"]
pub type DEBNC0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DEBNC0`"]
pub struct DEBNC0_W<'a> {
w: &'a mut W,
}
impl<'a> DEBNC0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Reader of field `DEBNC1`"]
pub type DEBNC1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DEBNC1`"]
pub struct DEBNC1_W<'a> {
w: &'a mut W,
}
impl<'a> DEBNC1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Reader of field `DEBNC2`"]
pub type DEBNC2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DEBNC2`"]
pub struct DEBNC2_W<'a> {
w: &'a mut W,
}
impl<'a> DEBNC2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Reader of field `DEBNC3`"]
pub type DEBNC3_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DEBNC3`"]
pub struct DEBNC3_W<'a> {
w: &'a mut W,
}
impl<'a> DEBNC3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
#[doc = "Reader of field `DEBNC4`"]
pub type DEBNC4_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DEBNC4`"]
pub struct DEBNC4_W<'a> {
w: &'a mut W,
}
impl<'a> DEBNC4_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
impl R {
#[doc = "Bits 0:1 - Tamper Input 0 Action"]
#[inline(always)]
pub fn in0act(&self) -> IN0ACT_R {
IN0ACT_R::new((self.bits & 0x03) as u8)
}
#[doc = "Bits 2:3 - Tamper Input 1 Action"]
#[inline(always)]
pub fn in1act(&self) -> IN1ACT_R {
IN1ACT_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 4:5 - Tamper Input 2 Action"]
#[inline(always)]
pub fn in2act(&self) -> IN2ACT_R {
IN2ACT_R::new(((self.bits >> 4) & 0x03) as u8)
}
#[doc = "Bits 6:7 - Tamper Input 3 Action"]
#[inline(always)]
pub fn in3act(&self) -> IN3ACT_R {
IN3ACT_R::new(((self.bits >> 6) & 0x03) as u8)
}
#[doc = "Bits 8:9 - Tamper Input 4 Action"]
#[inline(always)]
pub fn in4act(&self) -> IN4ACT_R {
IN4ACT_R::new(((self.bits >> 8) & 0x03) as u8)
}
#[doc = "Bit 16 - Tamper Level Select 0"]
#[inline(always)]
pub fn tamlvl0(&self) -> TAMLVL0_R {
TAMLVL0_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - Tamper Level Select 1"]
#[inline(always)]
pub fn tamlvl1(&self) -> TAMLVL1_R {
TAMLVL1_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - Tamper Level Select 2"]
#[inline(always)]
pub fn tamlvl2(&self) -> TAMLVL2_R {
TAMLVL2_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - Tamper Level Select 3"]
#[inline(always)]
pub fn tamlvl3(&self) -> TAMLVL3_R {
TAMLVL3_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - Tamper Level Select 4"]
#[inline(always)]
pub fn tamlvl4(&self) -> TAMLVL4_R {
TAMLVL4_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 24 - Debouncer Enable 0"]
#[inline(always)]
pub fn debnc0(&self) -> DEBNC0_R {
DEBNC0_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - Debouncer Enable 1"]
#[inline(always)]
pub fn debnc1(&self) -> DEBNC1_R {
DEBNC1_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - Debouncer Enable 2"]
#[inline(always)]
pub fn debnc2(&self) -> DEBNC2_R {
DEBNC2_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 27 - Debouncer Enable 3"]
#[inline(always)]
pub fn debnc3(&self) -> DEBNC3_R {
DEBNC3_R::new(((self.bits >> 27) & 0x01) != 0)
}
#[doc = "Bit 28 - Debouncer Enable 4"]
#[inline(always)]
pub fn debnc4(&self) -> DEBNC4_R {
DEBNC4_R::new(((self.bits >> 28) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:1 - Tamper Input 0 Action"]
#[inline(always)]
pub fn in0act(&mut self) -> IN0ACT_W {
IN0ACT_W { w: self }
}
#[doc = "Bits 2:3 - Tamper Input 1 Action"]
#[inline(always)]
pub fn in1act(&mut self) -> IN1ACT_W {
IN1ACT_W { w: self }
}
#[doc = "Bits 4:5 - Tamper Input 2 Action"]
#[inline(always)]
pub fn in2act(&mut self) -> IN2ACT_W {
IN2ACT_W { w: self }
}
#[doc = "Bits 6:7 - Tamper Input 3 Action"]
#[inline(always)]
pub fn in3act(&mut self) -> IN3ACT_W {
IN3ACT_W { w: self }
}
#[doc = "Bits 8:9 - Tamper Input 4 Action"]
#[inline(always)]
pub fn in4act(&mut self) -> IN4ACT_W {
IN4ACT_W { w: self }
}
#[doc = "Bit 16 - Tamper Level Select 0"]
#[inline(always)]
pub fn tamlvl0(&mut self) -> TAMLVL0_W {
TAMLVL0_W { w: self }
}
#[doc = "Bit 17 - Tamper Level Select 1"]
#[inline(always)]
pub fn tamlvl1(&mut self) -> TAMLVL1_W {
TAMLVL1_W { w: self }
}
#[doc = "Bit 18 - Tamper Level Select 2"]
#[inline(always)]
pub fn tamlvl2(&mut self) -> TAMLVL2_W {
TAMLVL2_W { w: self }
}
#[doc = "Bit 19 - Tamper Level Select 3"]
#[inline(always)]
pub fn tamlvl3(&mut self) -> TAMLVL3_W {
TAMLVL3_W { w: self }
}
#[doc = "Bit 20 - Tamper Level Select 4"]
#[inline(always)]
pub fn tamlvl4(&mut self) -> TAMLVL4_W {
TAMLVL4_W { w: self }
}
#[doc = "Bit 24 - Debouncer Enable 0"]
#[inline(always)]
pub fn debnc0(&mut self) -> DEBNC0_W {
DEBNC0_W { w: self }
}
#[doc = "Bit 25 - Debouncer Enable 1"]
#[inline(always)]
pub fn debnc1(&mut self) -> DEBNC1_W {
DEBNC1_W { w: self }
}
#[doc = "Bit 26 - Debouncer Enable 2"]
#[inline(always)]
pub fn debnc2(&mut self) -> DEBNC2_W {
DEBNC2_W { w: self }
}
#[doc = "Bit 27 - Debouncer Enable 3"]
#[inline(always)]
pub fn debnc3(&mut self) -> DEBNC3_W {
DEBNC3_W { w: self }
}
#[doc = "Bit 28 - Debouncer Enable 4"]
#[inline(always)]
pub fn debnc4(&mut self) -> DEBNC4_W {
DEBNC4_W { w: self }
}
}
| 28.331839 | 86 | 0.527936 |
793f8373851613ab4e14fcc05c5f2ff88413f047
| 8,129 |
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors
// SPDX-License-Identifier: MIT
#![forbid(unsafe_code)]
use std::path::{Path, PathBuf};
use std::{collections::HashMap, convert::TryFrom};
use std::{fs, io};
use hex::{FromHex, FromHexError};
use serde_json::Value;
use thiserror::Error;
use crypto::{crypto_box::PublicKeyError, hash::CryptoboxPublicKeyHash};
use crypto::{
crypto_box::{random_keypair, PublicKey, SecretKey},
proof_of_work::ProofOfWork,
};
#[derive(Error, Debug)]
pub enum IdentityError {
#[error("I/O error: {reason}")]
IoError { reason: io::Error },
#[error("Serde error, reason: {reason}")]
IdentitySerdeError { reason: serde_json::Error },
#[error("Invalid field error, reason: {reason}")]
IdentityFieldError { reason: String },
#[error("Invalid public key, reason: {reason}")]
InvalidPublicKeyError { reason: FromHexError },
#[error("Identity invalid peer_id check")]
InvalidPeerIdError,
#[error("Public key error: {0}")]
PublicKeyError(PublicKeyError),
}
impl From<PublicKeyError> for IdentityError {
fn from(source: PublicKeyError) -> Self {
Self::PublicKeyError(source)
}
}
/// This node identity information compatible with Tezos
#[derive(Clone, Debug)]
pub struct Identity {
/// Peer_id is calculated hash of public_key [`crypto_box::PublicKey`]
pub peer_id: CryptoboxPublicKeyHash,
/// Hex encoded public key: [`crypto_box::PublicKey`]
pub public_key: PublicKey,
/// Hex encoded secret key: [`crypto_box::SecretKey`]
pub secret_key: SecretKey,
/// Hex encoded pow: [`crypto::ProofOfWork`]
pub proof_of_work_stamp: ProofOfWork,
}
impl Identity {
pub fn generate(expected_pow: f64) -> Result<Self, PublicKeyError> {
let (sk, pk, peer_id) = random_keypair()?;
let pow = ProofOfWork::generate(&pk, expected_pow);
Ok(Identity {
peer_id,
public_key: pk,
secret_key: sk,
proof_of_work_stamp: pow,
})
}
pub fn check_peer_id(&self) -> Result<(), IdentityError> {
if self.peer_id == self.public_key.public_key_hash()? {
Ok(())
} else {
Err(IdentityError::InvalidPeerIdError)
}
}
pub fn from_json(json: &str) -> Result<Identity, IdentityError> {
let identity: HashMap<String, Value> = serde_json::from_str(json)
.map_err(|e| IdentityError::IdentitySerdeError { reason: e })?;
let peer_id_str = identity
.get("peer_id")
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing 'peer_id'".to_string(),
})?
.as_str()
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing valid 'peer_id'".to_string(),
})?;
let peer_id = CryptoboxPublicKeyHash::try_from(peer_id_str).map_err(|e| {
IdentityError::IdentityFieldError {
reason: format!("Missing valid 'peer_id': {}", e),
}
})?;
let public_key_str = identity
.get("public_key")
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing 'public_key'".to_string(),
})?
.as_str()
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing valid 'public_key'".to_string(),
})?;
let public_key =
PublicKey::from_hex(public_key_str).map_err(|e| IdentityError::IdentityFieldError {
reason: format!("Missing valid 'public_key': {}", e),
})?;
let secret_key_str = identity
.get("secret_key")
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing 'secret_key'".to_string(),
})?
.as_str()
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing valid 'secret_key'".to_string(),
})?;
let secret_key =
SecretKey::from_hex(secret_key_str).map_err(|e| IdentityError::IdentityFieldError {
reason: format!("Missing valid 'secret_key': {}", e),
})?;
let proof_of_work_stamp_str = identity
.get("proof_of_work_stamp")
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing 'proof_of_work_stamp'".to_string(),
})?
.as_str()
.ok_or(IdentityError::IdentityFieldError {
reason: "Missing valid 'proof_of_work_stamp'".to_string(),
})?;
let proof_of_work_stamp = ProofOfWork::from_hex(proof_of_work_stamp_str).map_err(|e| {
IdentityError::IdentityFieldError {
reason: format!("Missing valid 'proof_of_work_stamp': {}", e),
}
})?;
Ok(Identity {
peer_id,
public_key,
secret_key,
proof_of_work_stamp,
})
}
pub fn as_json(&self) -> Result<String, IdentityError> {
let mut identity: HashMap<&'static str, String> = Default::default();
identity.insert("peer_id", self.peer_id.to_base58_check());
identity.insert("public_key", hex::encode(self.public_key.as_ref()));
identity.insert("secret_key", hex::encode(self.secret_key.as_ref()));
identity.insert(
"proof_of_work_stamp",
hex::encode(self.proof_of_work_stamp.as_ref()),
);
serde_json::to_string(&identity)
.map_err(|e| IdentityError::IdentitySerdeError { reason: e })
}
pub fn peer_id(&self) -> CryptoboxPublicKeyHash {
self.peer_id.clone()
}
}
/// Load identity from tezos configuration file.
pub fn load_identity<P: AsRef<Path>>(
identity_json_file_path: P,
) -> Result<Identity, IdentityError> {
fs::read_to_string(identity_json_file_path)
.map(|contents| Identity::from_json(&contents))
.map_err(|e| IdentityError::IoError { reason: e })?
}
/// Stores provided identity into the file specified by path
pub fn store_identity(path: &PathBuf, identity: &Identity) -> Result<(), IdentityError> {
let identity_json = identity.as_json()?;
fs::write(&path, &identity_json).map_err(|e| IdentityError::IoError { reason: e })?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_identity_generate() -> Result<(), anyhow::Error> {
// generate
let identity = Identity::generate(16f64)?;
// check
assert!(identity.check_peer_id().is_ok());
// convert json and back
let converted = identity.as_json()?;
let converted = Identity::from_json(&converted)?;
assert!(identity.check_peer_id().is_ok());
assert_eq!(identity.peer_id, converted.peer_id);
assert_eq!(identity.public_key, converted.public_key);
assert_eq!(identity.secret_key, converted.secret_key);
assert_eq!(identity.proof_of_work_stamp, converted.proof_of_work_stamp);
Ok(())
}
#[test]
fn test_identity_json_serde_generated_by_tezos() -> Result<(), anyhow::Error> {
let expected_json = serde_json::json!(
{
"peer_id": "idtqxHUjbjbCfaDn4jczoPGsnhacKX",
"public_key": "a072c7b3e477142689cadee638078b377df5e5793e3cea529d0b718cde59f212",
"secret_key": "d37c77a8643c7f7fce9219e9769ed4dd23bc542265da47a64a2613bd199ad74e",
"proof_of_work_stamp": "0cfe810d9b4591f0f50721b6811f2981a4274e9d0593bbd0"
}
);
let converted = Identity::from_json(serde_json::to_string(&expected_json)?.as_str())?;
assert!(converted.check_peer_id().is_ok());
let converted = converted.as_json()?;
let converted = Identity::from_json(&converted)?;
assert!(converted.check_peer_id().is_ok());
let converted = converted.as_json()?;
// check
assert_json_diff::assert_json_eq!(
serde_json::from_str::<serde_json::Value>(&converted)?,
expected_json
);
Ok(())
}
}
| 34.444915 | 95 | 0.610407 |
33eb838e19be676c73b2abca89f0d5e6970c13d4
| 19,734 |
use crate::bitboard::{BitBoard, EMPTY};
use crate::board::Board;
use crate::chess_move::ChessMove;
use crate::magic::between;
use crate::movegen::piece_type::*;
use crate::piece::{Piece, NUM_PROMOTION_PIECES, PROMOTION_PIECES};
use crate::square::Square;
use crate::File;
use arrayvec::ArrayVec;
use nodrop::NoDrop;
use std::iter::ExactSizeIterator;
use std::mem;
#[derive(Copy, Clone, PartialEq, PartialOrd)]
pub struct SquareAndBitBoard {
square: Square,
bitboard: BitBoard,
promotion: bool,
}
impl SquareAndBitBoard {
pub fn new(sq: Square, bb: BitBoard, promotion: bool) -> SquareAndBitBoard {
SquareAndBitBoard {
square: sq,
bitboard: bb,
promotion: promotion,
}
}
}
pub type MoveList = NoDrop<ArrayVec<[SquareAndBitBoard; 18]>>;
/// An incremental move generator
///
/// This structure enumerates moves slightly slower than board.enumerate_moves(...),
/// but has some extra features, such as:
///
/// * Being an iterator
/// * Not requiring you to create a buffer
/// * Only iterating moves that match a certain pattern
/// * Being iterable multiple times (such as, iterating once for all captures, then iterating again
/// for all quiets)
/// * Doing as little work early on as possible, so that if you are not going to look at every move, the
/// struture moves faster
/// * Being able to iterate pseudo legal moves, while keeping the (nearly) free legality checks in
/// place
///
/// # Examples
///
/// ```
/// use chess::MoveGen;
/// use chess::Board;
/// use chess::EMPTY;
/// use chess::construct;
///
/// // create a board with the initial position
/// let board = Board::default();
///
/// // create an iterable
/// let mut iterable = MoveGen::new_legal(&board);
///
/// // make sure .len() works.
/// assert_eq!(iterable.len(), 20); // the .len() function does *not* consume the iterator
///
/// // lets iterate over targets.
/// let targets = board.color_combined(!board.side_to_move());
/// iterable.set_iterator_mask(*targets);
///
/// // count the number of targets
/// let mut count = 0;
/// for _ in &mut iterable {
/// count += 1;
/// // This move captures one of my opponents pieces (with the exception of en passant)
/// }
///
/// // now, iterate over the rest of the moves
/// iterable.set_iterator_mask(!EMPTY);
/// for _ in &mut iterable {
/// count += 1;
/// // This move does not capture anything
/// }
///
/// // make sure it works
/// assert_eq!(count, 20);
///
/// ```
pub struct MoveGen {
moves: MoveList,
promotion_index: usize,
iterator_mask: BitBoard,
index: usize,
}
impl MoveGen {
#[inline(always)]
fn enumerate_moves(board: &Board) -> MoveList {
let checkers = *board.checkers();
let mask = !board.color_combined(board.side_to_move());
let mut movelist = NoDrop::new(ArrayVec::<[SquareAndBitBoard; 18]>::new());
if let Some(ep) = board.en_passant() {
let capture_square = ep.backward(!board.side_to_move()).expect("Valid square");
let pawns = board.pieces(Piece::Pawn) & !mask;
let ep_attacker_rank = ep.get_rank();
if ep.get_file() != File::A {
let attacker = Square::make_square(ep_attacker_rank, ep.get_file().left());
if PawnType::legal_ep_move(&board, attacker, capture_square)
&& (pawns & BitBoard::from_square(attacker)).popcnt() == 1
{
movelist.push(SquareAndBitBoard::new(
attacker,
BitBoard::from_square(capture_square),
false,
));
}
}
if ep.get_file() != File::H {
let attacker = Square::make_square(ep_attacker_rank, ep.get_file().right());
if PawnType::legal_ep_move(&board, attacker, capture_square)
&& (pawns & BitBoard::from_square(attacker)).popcnt() == 1
{
movelist.push(SquareAndBitBoard::new(
attacker,
BitBoard::from_square(capture_square),
false,
));
}
}
} else {
if checkers == EMPTY {
PawnType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KnightType::legals::<NotInCheckType>(&mut movelist, &board, mask);
BishopType::legals::<NotInCheckType>(&mut movelist, &board, mask);
RookType::legals::<NotInCheckType>(&mut movelist, &board, mask);
QueenType::legals::<NotInCheckType>(&mut movelist, &board, mask);
KingType::legals::<NotInCheckType>(&mut movelist, &board, mask);
} else if checkers.popcnt() == 1 {
PawnType::legals::<InCheckType>(&mut movelist, &board, mask);
KnightType::legals::<InCheckType>(&mut movelist, &board, mask);
BishopType::legals::<InCheckType>(&mut movelist, &board, mask);
RookType::legals::<InCheckType>(&mut movelist, &board, mask);
QueenType::legals::<InCheckType>(&mut movelist, &board, mask);
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
} else {
KingType::legals::<InCheckType>(&mut movelist, &board, mask);
}
}
movelist
}
/// Create a new `MoveGen` structure, only generating legal moves
#[inline(always)]
pub fn new_legal(board: &Board) -> MoveGen {
MoveGen {
moves: MoveGen::enumerate_moves(board),
promotion_index: 0,
iterator_mask: !EMPTY,
index: 0,
}
}
/// Never, ever, iterate any moves that land on the following squares
pub fn remove_mask(&mut self, mask: BitBoard) {
for x in 0..self.moves.len() {
self.moves[x].bitboard &= !mask;
}
}
/// Never, ever, iterate this move
pub fn remove_move(&mut self, chess_move: ChessMove) -> bool {
for x in 0..self.moves.len() {
if self.moves[x].square == chess_move.get_source() {
self.moves[x].bitboard &= !BitBoard::from_square(chess_move.get_dest());
return true;
}
}
false
}
/// For now, Only iterate moves that land on the following squares
/// Note: Once iteration is completed, you can pass in a mask of ! `EMPTY`
/// to get the remaining moves, or another mask
pub fn set_iterator_mask(&mut self, mask: BitBoard) {
self.iterator_mask = mask;
self.index = 0;
// the iterator portion of this struct relies on the invariant that
// the bitboards at the beginning of the moves[] array are the only
// ones used. As a result, we must partition the list such that the
// assumption is true.
// first, find the first non-used moves index, and store that in i
let mut i = 0;
while i < self.moves.len() && self.moves[i].bitboard & self.iterator_mask != EMPTY {
i += 1;
}
// next, find each element past i where the moves are used, and store
// that in i. Then, increment i to point to a new unused slot.
for j in (i + 1)..self.moves.len() {
if self.moves[j].bitboard & self.iterator_mask != EMPTY {
let backup = self.moves[i];
self.moves[i] = self.moves[j];
self.moves[j] = backup;
i += 1;
}
}
}
/// This function checks the legality *only for moves generated by `MoveGen`*.
///
/// Calling this function for moves not generated by `MoveGen` will result in possibly
/// incorrect results, and making that move on the `Board` will result in undefined behavior.
/// This function may panic! if these rules are not followed.
///
/// If you are validating a move from a user, you should call the .legal() function.
pub fn legal_quick(board: &Board, chess_move: ChessMove) -> bool {
let piece = board.piece_on(chess_move.get_source()).unwrap();
match piece {
Piece::Rook => true,
Piece::Bishop => true,
Piece::Knight => true,
Piece::Queen => true,
Piece::Pawn => {
if chess_move.get_source().get_file() != chess_move.get_dest().get_file()
&& board.piece_on(chess_move.get_dest()).is_none()
{
// en-passant
PawnType::legal_ep_move(board, chess_move.get_source(), chess_move.get_dest())
} else {
true
}
}
Piece::King => {
let bb = between(chess_move.get_source(), chess_move.get_dest());
if bb.popcnt() == 1 {
// castles
if !KingType::legal_king_move(board, bb.to_square()) {
false
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
} else {
KingType::legal_king_move(board, chess_move.get_dest())
}
}
}
}
/// Fastest perft test with this structure
pub fn movegen_perft_test(board: &Board, depth: usize) -> usize {
let iterable = MoveGen::new_legal(board);
let mut result: usize = 0;
if depth == 1 {
//iterable.len()
for m in iterable {
dbg!(format!("{}", m));
result += 1;
}
result
} else {
for m in iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(m, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
result
}
}
#[cfg(test)]
/// Do a perft test after splitting the moves up into two groups
pub fn movegen_perft_test_piecewise(board: &Board, depth: usize) -> usize {
let mut iterable = MoveGen::new_legal(board);
let targets = board.color_combined(!board.side_to_move());
let mut result: usize = 0;
if depth == 1 {
iterable.set_iterator_mask(*targets);
result += iterable.len();
iterable.set_iterator_mask(!targets);
result += iterable.len();
result
} else {
iterable.set_iterator_mask(*targets);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
iterable.set_iterator_mask(!EMPTY);
for x in &mut iterable {
let mut bresult = mem::MaybeUninit::<Board>::uninit();
unsafe {
board.make_move(x, &mut *bresult.as_mut_ptr());
result += MoveGen::movegen_perft_test(&*bresult.as_ptr(), depth - 1);
}
}
result
}
}
}
impl ExactSizeIterator for MoveGen {
/// Give the exact length of this iterator
fn len(&self) -> usize {
let mut result = 0;
for i in 0..self.moves.len() {
if self.moves[i].bitboard & self.iterator_mask == EMPTY {
break;
}
if self.moves[i].promotion {
result += ((self.moves[i].bitboard & self.iterator_mask).popcnt() as usize)
* NUM_PROMOTION_PIECES;
} else {
result += (self.moves[i].bitboard & self.iterator_mask).popcnt() as usize;
}
}
result
}
}
impl Iterator for MoveGen {
type Item = ChessMove;
/// Give a size_hint to some functions that need it
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
/// Find the next chess move.
fn next(&mut self) -> Option<ChessMove> {
if self.index >= self.moves.len()
|| self.moves[self.index].bitboard & self.iterator_mask == EMPTY
{
// are we done?
None
} else if self.moves[self.index].promotion {
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
// deal with potential promotions for this pawn
let result = ChessMove::new(
moves.square,
dest,
Some(PROMOTION_PIECES[self.promotion_index]),
);
self.promotion_index += 1;
if self.promotion_index >= NUM_PROMOTION_PIECES {
moves.bitboard ^= BitBoard::from_square(dest);
self.promotion_index = 0;
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
}
Some(result)
} else {
// not a promotion move, so its a 'normal' move as far as this function is concerned
let moves = &mut self.moves[self.index];
let dest = (moves.bitboard & self.iterator_mask).to_square();
moves.bitboard ^= BitBoard::from_square(dest);
if moves.bitboard & self.iterator_mask == EMPTY {
self.index += 1;
}
Some(ChessMove::new(moves.square, dest, None))
}
}
}
#[cfg(test)]
use crate::board_builder::BoardBuilder;
#[cfg(test)]
use std::collections::HashSet;
#[cfg(test)]
use std::convert::TryInto;
#[cfg(test)]
use std::str::FromStr;
#[cfg(test)]
fn movegen_perft_test(fen: String, depth: usize, result: usize) {
let board: Board = BoardBuilder::from_str(&fen).unwrap().try_into().unwrap();
assert_eq!(MoveGen::movegen_perft_test(&board, depth), result);
assert_eq!(MoveGen::movegen_perft_test_piecewise(&board, depth), result);
}
#[test]
fn movegen_en_passant_1() {
movegen_perft_test(
"rnbqkbnr/ppp1ppp1/8/3pP2p/8/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 3".to_owned(),
1,
1,
)
}
#[test]
fn movegen_en_passant_2() {
movegen_perft_test("7r/8/8/5b2/4kpPp/7K/7P/7B b - g3 0 1".to_owned(), 1, 0)
}
#[test]
#[ignore]
fn movegen_perft_kiwipete() {
movegen_perft_test(
"r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq - 0 1".to_owned(),
5,
193690690,
);
}
#[test]
#[ignore]
fn movegen_perft_1() {
movegen_perft_test("8/5bk1/8/2Pp4/8/1K6/8/8 w - d6 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
#[ignore]
fn movegen_perft_2() {
movegen_perft_test("8/8/1k6/8/2pP4/8/5BK1/8 b - d3 0 1".to_owned(), 6, 824064);
// Invalid FEN
}
#[test]
#[ignore]
fn movegen_perft_3() {
movegen_perft_test("8/8/1k6/2b5/2pP4/8/5K2/8 b - d3 0 1".to_owned(), 6, 1440467);
}
#[test]
#[ignore]
fn movegen_perft_4() {
movegen_perft_test("8/5k2/8/2Pp4/2B5/1K6/8/8 w - d6 0 1".to_owned(), 6, 1440467);
}
#[test]
#[ignore]
fn movegen_perft_5() {
movegen_perft_test("5k2/8/8/8/8/8/8/4K2R w K - 0 1".to_owned(), 6, 661072);
}
#[test]
#[ignore]
fn movegen_perft_6() {
movegen_perft_test("4k2r/8/8/8/8/8/8/5K2 b k - 0 1".to_owned(), 6, 661072);
}
#[test]
#[ignore]
fn movegen_perft_7() {
movegen_perft_test("3k4/8/8/8/8/8/8/R3K3 w Q - 0 1".to_owned(), 6, 803711);
}
#[test]
#[ignore]
fn movegen_perft_8() {
movegen_perft_test("r3k3/8/8/8/8/8/8/3K4 b q - 0 1".to_owned(), 6, 803711);
}
#[test]
#[ignore]
fn movegen_perft_9() {
movegen_perft_test(
"r3k2r/1b4bq/8/8/8/8/7B/R3K2R w KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
#[ignore]
fn movegen_perft_10() {
movegen_perft_test(
"r3k2r/7b/8/8/8/8/1B4BQ/R3K2R b KQkq - 0 1".to_owned(),
4,
1274206,
);
}
#[test]
#[ignore]
fn movegen_perft_11() {
movegen_perft_test(
"r3k2r/8/3Q4/8/8/5q2/8/R3K2R b KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
#[ignore]
fn movegen_perft_12() {
movegen_perft_test(
"r3k2r/8/5Q2/8/8/3q4/8/R3K2R w KQkq - 0 1".to_owned(),
4,
1720476,
);
}
#[test]
#[ignore]
fn movegen_perft_13() {
movegen_perft_test("2K2r2/4P3/8/8/8/8/8/3k4 w - - 0 1".to_owned(), 6, 3821001);
}
#[test]
#[ignore]
fn movegen_perft_14() {
movegen_perft_test("3K4/8/8/8/8/8/4p3/2k2R2 b - - 0 1".to_owned(), 6, 3821001);
}
#[test]
#[ignore]
fn movegen_perft_15() {
movegen_perft_test("8/8/1P2K3/8/2n5/1q6/8/5k2 b - - 0 1".to_owned(), 5, 1004658);
}
#[test]
#[ignore]
fn movegen_perft_16() {
movegen_perft_test("5K2/8/1Q6/2N5/8/1p2k3/8/8 w - - 0 1".to_owned(), 5, 1004658);
}
#[test]
#[ignore]
fn movegen_perft_17() {
movegen_perft_test("4k3/1P6/8/8/8/8/K7/8 w - - 0 1".to_owned(), 6, 217342);
}
#[test]
#[ignore]
fn movegen_perft_18() {
movegen_perft_test("8/k7/8/8/8/8/1p6/4K3 b - - 0 1".to_owned(), 6, 217342);
}
#[test]
#[ignore]
fn movegen_perft_19() {
movegen_perft_test("8/P1k5/K7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 92683);
}
#[test]
#[ignore]
fn movegen_perft_20() {
movegen_perft_test("8/8/8/8/8/k7/p1K5/8 b - - 0 1".to_owned(), 6, 92683);
}
#[test]
#[ignore]
fn movegen_perft_21() {
movegen_perft_test("K1k5/8/P7/8/8/8/8/8 w - - 0 1".to_owned(), 6, 2217);
}
#[test]
#[ignore]
fn movegen_perft_22() {
movegen_perft_test("8/8/8/8/8/p7/8/k1K5 b - - 0 1".to_owned(), 6, 2217);
}
#[test]
#[ignore]
fn movegen_perft_23() {
movegen_perft_test("8/k1P5/8/1K6/8/8/8/8 w - - 0 1".to_owned(), 7, 567584);
}
#[test]
#[ignore]
fn movegen_perft_24() {
movegen_perft_test("8/8/8/8/1k6/8/K1p5/8 b - - 0 1".to_owned(), 7, 567584);
}
#[test]
#[ignore]
fn movegen_perft_25() {
movegen_perft_test("8/8/2k5/5q2/5n2/8/5K2/8 b - - 0 1".to_owned(), 4, 23527);
}
#[test]
#[ignore]
fn movegen_perft_26() {
movegen_perft_test("8/5k2/8/5N2/5Q2/2K5/8/8 w - - 0 1".to_owned(), 4, 23527);
}
#[test]
#[ignore]
fn movegen_issue_15() {
let board =
BoardBuilder::from_str("rnbqkbnr/ppp2pp1/4p3/3N4/3PpPp1/8/PPP3PP/R1B1KBNR b KQkq f3 0 1")
.unwrap()
.try_into()
.unwrap();
let _ = MoveGen::new_legal(&board);
}
#[cfg(test)]
fn move_of(m: &str) -> ChessMove {
let promo = if m.len() > 4 {
Some(match m.as_bytes()[4] {
b'q' => Piece::Queen,
b'r' => Piece::Rook,
b'b' => Piece::Bishop,
b'n' => Piece::Knight,
_ => panic!("unrecognized uci move: {}", m),
})
} else {
None
};
ChessMove::new(
Square::from_str(&m[..2]).unwrap(),
Square::from_str(&m[2..4]).unwrap(),
promo,
)
}
#[test]
#[ignore]
fn test_masked_move_gen() {
let board =
Board::from_str("r1bqkb1r/pp3ppp/5n2/2ppn1N1/4pP2/1BN1P3/PPPP2PP/R1BQ1RK1 w kq - 0 9")
.unwrap();
let mut capture_moves = MoveGen::new_legal(&board);
let targets = *board.color_combined(!board.side_to_move());
capture_moves.set_iterator_mask(targets);
let expected = vec![
move_of("f4e5"),
move_of("b3d5"),
move_of("g5e4"),
move_of("g5f7"),
move_of("g5h7"),
move_of("c3e4"),
move_of("c3d5"),
];
assert_eq!(
capture_moves.collect::<HashSet<_>>(),
expected.into_iter().collect()
);
}
| 30.174312 | 104 | 0.559947 |
f54bcef6d7966551170e325185931138ecdb28e3
| 1,949 |
//! Macro definitions in Enso.
use crate::macros::literal::Literal;
use crate::prelude::*;
use itertools::Itertools;
// ==================
// === Definition ===
// ==================
/// A macro definition.
///
/// A macro definition consists of a name, which identifies the macro to users, and a list of
/// [sections](`Section`). The sections are the most important portion of the macro definition, as
/// they define the literal portions of the token stream on which the macro will match.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
#[allow(missing_docs)]
pub struct Definition {
pub name: String,
pub sections: Vec<Section>,
}
impl Definition {
/// Constructor.
pub fn new(name: impl Str, sections: Vec<Section>) -> Self {
let name = name.into();
Self { name, sections }
}
/// Get the path for the definition.
///
/// The definition's path consists of the headers of each of the sections that make it up, and
/// describes the literals that must be matched for the macro to match.
pub fn path(&self) -> Vec<Literal> {
self.sections.iter().map(|s| s.start_symbol.clone()).collect_vec()
}
}
// ===============
// === Section ===
// ===============
/// A section in a macro, representing both a literal section header to match against, and the
/// tokens that the section contains.
///
/// The literal is the _most_ important portion of a section, as they are constants that allow the
/// macro resolver to divide up the input token stream based on these constants.
#[derive(Clone, Debug, Eq, PartialEq)]
#[allow(missing_docs)]
pub struct Section {
start_symbol: Literal, // TODO Pattern
}
impl Section {
/// Constructor.
pub fn new(symbol: Literal) -> Self {
Self { start_symbol: symbol }
}
/// Get a reference to the literal that heads the section.
pub fn start_symbol(&self) -> &Literal {
&self.start_symbol
}
}
| 28.246377 | 98 | 0.639302 |
019a28c7cb5b89231a9fc9433dcecfdc27d89099
| 8,185 |
use clap::ArgMatches;
use config::{Config, ConfigError, File, FileFormat, Source};
use crossbeam_channel::*;
use tantivy::merge_policy::*;
use std::str::FromStr;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
pub const HEADER: &str = r#"
______ __ _ ____ __
/_ __/__ ___ / / (_) / __/__ ___ _________/ /
/ / / _ \(_-</ _ \/ / _\ \/ -_) _ `/ __/ __/ _ \
/_/ \___/___/_//_/_/ /___/\__/\_,_/_/ \__/_//_/
Such Relevance, Much Index, Many Search, Wow
"#;
#[derive(PartialEq)]
pub enum MergePolicyType {
Log,
NoMerge,
}
#[derive(Deserialize, Clone)]
pub struct ConfigMergePolicy {
kind: String,
min_merge_size: Option<usize>,
min_layer_size: Option<u32>,
level_log_size: Option<f64>,
}
impl ConfigMergePolicy {
pub fn get_kind(&self) -> MergePolicyType {
match self.kind.to_ascii_lowercase().as_ref() {
"log" => MergePolicyType::Log,
"nomerge" => MergePolicyType::NoMerge,
_ => panic!("Unknown Merge Typed Defined"),
}
}
}
#[derive(Deserialize, Clone)]
pub struct Settings {
#[serde(default = "Settings::default_host")]
pub host: String,
#[serde(default = "Settings::default_port")]
pub port: u16,
#[serde(default = "Settings::default_path")]
pub path: String,
#[serde(default = "Settings::default_level")]
pub log_level: String,
#[serde(default = "Settings::default_writer_memory")]
pub writer_memory: usize,
#[serde(default = "Settings::default_json_parsing_threads")]
pub json_parsing_threads: usize,
#[serde(default = "Settings::default_auto_commit_duration")]
pub auto_commit_duration: u64,
#[serde(default = "Settings::default_bulk_buffer_size")]
pub bulk_buffer_size: usize,
#[serde(default = "Settings::default_merge_policy")]
pub merge_policy: ConfigMergePolicy,
#[serde(default = "Settings::default_consul_host")]
pub consul_host: String,
#[serde(default = "Settings::default_consul_port")]
pub consul_port: u16,
}
impl Default for Settings {
fn default() -> Self {
Self {
host: Settings::default_host(),
port: Settings::default_port(),
path: Settings::default_path(),
log_level: Settings::default_level(),
writer_memory: Settings::default_writer_memory(),
json_parsing_threads: Settings::default_json_parsing_threads(),
auto_commit_duration: Settings::default_auto_commit_duration(),
bulk_buffer_size: Settings::default_bulk_buffer_size(),
merge_policy: Settings::default_merge_policy(),
consul_host: Settings::default_consul_host(),
consul_port: Settings::default_consul_port(),
}
}
}
impl FromStr for Settings {
type Err = ConfigError;
fn from_str(cfg: &str) -> Result<Self, ConfigError> { Self::from_config(File::from_str(cfg, FileFormat::Toml)) }
}
impl Settings {
pub fn new(path: &str) -> Result<Self, ConfigError> { Self::from_config(File::with_name(path)) }
pub fn from_args(args: &ArgMatches) -> Self {
Self {
host: args.value_of("host").unwrap().to_string(),
port: args.value_of("port").unwrap().parse::<u16>().expect("Invalid port given."),
path: args.value_of("path").unwrap().to_string(),
log_level: args.value_of("level").unwrap().to_string(),
consul_host: args.value_of("consul-host").unwrap().to_string(),
consul_port: args
.value_of("consul-port")
.unwrap()
.parse::<u16>()
.expect("Invalid port given for Consul."),
..Default::default()
}
}
pub fn from_config<T: Source + Send + Sync + 'static>(c: T) -> Result<Self, ConfigError> {
let mut cfg = Config::new();
match cfg.merge(c) {
Ok(_) => {}
Err(e) => panic!("Problem with config file: {}", e),
};
cfg.try_into()
}
pub fn default_pretty() -> bool { false }
pub fn default_result_limit() -> usize { 100 }
pub fn default_host() -> String { "localhost".to_string() }
pub fn default_path() -> String { "data/".to_string() }
pub fn default_port() -> u16 { 8080 }
pub fn default_level() -> String { "info".to_string() }
pub fn default_writer_memory() -> usize { 200_000_000 }
pub fn default_json_parsing_threads() -> usize { 4 }
pub fn default_bulk_buffer_size() -> usize { 10000 }
pub fn default_auto_commit_duration() -> u64 { 10 }
pub fn default_merge_policy() -> ConfigMergePolicy {
ConfigMergePolicy {
kind: "log".to_string(),
min_merge_size: None,
min_layer_size: None,
level_log_size: None,
}
}
pub fn default_consul_host() -> String { "localhost".to_string() }
pub fn default_consul_port() -> u16 { 8500 }
pub fn get_channel<T>(&self) -> (Sender<T>, Receiver<T>) {
if self.bulk_buffer_size == 0 {
unbounded::<T>()
} else {
bounded::<T>(self.bulk_buffer_size)
}
}
pub fn get_merge_policy(&self) -> Box<MergePolicy> {
match self.merge_policy.get_kind() {
MergePolicyType::Log => {
let mut mp = LogMergePolicy::default();
if let Some(v) = self.merge_policy.level_log_size {
mp.set_level_log_size(v);
}
if let Some(v) = self.merge_policy.min_layer_size {
mp.set_min_layer_size(v);
}
if let Some(v) = self.merge_policy.min_merge_size {
mp.set_min_merge_size(v);
}
Box::new(mp)
}
MergePolicyType::NoMerge => Box::new(NoMergePolicy::default()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_default_config() {
let default = Settings::from_str("").unwrap();
assert_eq!(default.host, "localhost");
assert_eq!(default.port, 8080);
assert_eq!(default.path, "data/");
assert_eq!(default.writer_memory, 200_000_000);
assert_eq!(default.log_level, "info");
assert_eq!(default.json_parsing_threads, 4);
assert_eq!(default.bulk_buffer_size, 10000);
assert_eq!(default.merge_policy.kind, "log");
assert_eq!(default.merge_policy.level_log_size, None);
assert_eq!(default.merge_policy.min_layer_size, None);
assert_eq!(default.merge_policy.min_merge_size, None);
assert_eq!(default.consul_port, 8500);
assert_eq!(default.consul_host, "localhost");
}
#[test]
fn valid_merge_policy() {
let cfg = r#"
[merge_policy]
kind = "log"
level_log_size = 10.5
min_layer_size = 20
min_merge_size = 30"#;
let config = Settings::from_str(cfg).unwrap();
assert_eq!(config.merge_policy.level_log_size.unwrap(), 10.5);
assert_eq!(config.merge_policy.min_layer_size.unwrap(), 20);
assert_eq!(config.merge_policy.min_merge_size.unwrap(), 30);
}
#[test]
fn valid_no_merge_policy() {
let cfg = r#"
[merge_policy]
kind = "nomerge""#;
let config = Settings::from_str(cfg).unwrap();
assert!(config.merge_policy.get_kind() == MergePolicyType::NoMerge);
assert_eq!(config.merge_policy.kind, "nomerge");
assert_eq!(config.merge_policy.level_log_size, None);
assert_eq!(config.merge_policy.min_layer_size, None);
assert_eq!(config.merge_policy.min_merge_size, None);
}
#[test]
#[should_panic]
fn bad_config_file() { Settings::new("asdf/casdf").unwrap(); }
#[test]
#[should_panic]
fn bad_merge_type() {
let cfg = r#"
[merge_policy]
kind = "asdf1234""#;
let config = Settings::from_str(cfg).unwrap();
config.get_merge_policy();
}
}
| 32.871486 | 116 | 0.586316 |
7181f295057b64876fa990478a014a6dbd619d40
| 2,006 |
//! ra_db defines basic database traits. Concrete DB is defined by ra_analysis.
mod cancelation;
mod syntax_ptr;
mod input;
mod loc2id;
pub mod mock;
use std::sync::Arc;
use ra_editor::LineIndex;
use ra_syntax::{TextUnit, TextRange, SourceFileNode};
pub use crate::{
cancelation::{Canceled, Cancelable},
syntax_ptr::LocalSyntaxPtr,
input::{
FilesDatabase, FileId, CrateId, SourceRoot, SourceRootId, CrateGraph, Dependency,
FileTextQuery, FileSourceRootQuery, SourceRootQuery, LocalRootsQuery, LibraryRootsQuery, CrateGraphQuery,
FileRelativePathQuery
},
loc2id::{LocationIntener, NumericId},
};
#[macro_export]
macro_rules! impl_numeric_id {
($id:ident) => {
impl $crate::NumericId for $id {
fn from_u32(id: u32) -> Self {
$id(id)
}
fn to_u32(self) -> u32 {
self.0
}
}
};
}
pub trait BaseDatabase: salsa::Database {
fn check_canceled(&self) -> Cancelable<()> {
if self.salsa_runtime().is_current_revision_canceled() {
Err(Canceled::new())
} else {
Ok(())
}
}
}
salsa::query_group! {
pub trait SyntaxDatabase: crate::input::FilesDatabase + BaseDatabase {
fn source_file(file_id: FileId) -> SourceFileNode {
type SourceFileQuery;
}
fn file_lines(file_id: FileId) -> Arc<LineIndex> {
type FileLinesQuery;
}
}
}
fn source_file(db: &impl SyntaxDatabase, file_id: FileId) -> SourceFileNode {
let text = db.file_text(file_id);
SourceFileNode::parse(&*text)
}
fn file_lines(db: &impl SyntaxDatabase, file_id: FileId) -> Arc<LineIndex> {
let text = db.file_text(file_id);
Arc::new(LineIndex::new(&*text))
}
#[derive(Clone, Copy, Debug)]
pub struct FilePosition {
pub file_id: FileId,
pub offset: TextUnit,
}
#[derive(Clone, Copy, Debug)]
pub struct FileRange {
pub file_id: FileId,
pub range: TextRange,
}
| 25.392405 | 113 | 0.628614 |
76c5d11d6210c84b9ecaaf211c2e0861d33648bd
| 285 |
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
pub const INVALID_OBJECT_ID: u64 = 0;
// This only exists in the root store.
pub const SUPER_BLOCK_OBJECT_ID: u64 = 1;
| 31.666667 | 73 | 0.750877 |
29794c16494c22d9488081dec354757047b96982
| 4,156 |
use core::{
cmp::Ordering,
hash::{Hash, Hasher},
num::NonZeroU64,
};
use necsim_core_bond::{ClosedUnitF64, NonNegativeF64, PositiveF64};
use super::{
CoalescenceSampler, DispersalSampler, EmigrationExit, Habitat, LineageReference, LineageStore,
RngCore, SpeciationProbability, TurnoverRate,
};
use crate::{
event::PackedEvent, landscape::IndexedLocation,
simulation::partial::event_sampler::PartialSimulation,
};
#[allow(clippy::inline_always, clippy::inline_fn_without_body)]
#[contract_trait]
pub trait EventSampler<
H: Habitat,
G: RngCore,
R: LineageReference<H>,
S: LineageStore<H, R>,
X: EmigrationExit<H, G, R, S>,
D: DispersalSampler<H, G>,
C: CoalescenceSampler<H, R, S>,
T: TurnoverRate<H>,
N: SpeciationProbability<H>,
>: crate::cogs::Backup + core::fmt::Debug
{
#[must_use]
#[debug_ensures(if ret.is_none() { simulation.lineage_store.get(
old(lineage_reference.clone())
).is_none() } else { true }, "lineage emigrated if no event is returned")]
#[debug_ensures(ret.as_ref().map_or(true, |event: &PackedEvent| {
event.event_time == event_time
}), "event occurs at event_time")]
#[debug_ensures(ret.as_ref().map_or(true, |event: &PackedEvent| {
event.prior_time == prior_time
}), "event's prior time is prior_time")]
fn sample_event_for_lineage_at_indexed_location_time_or_emigrate(
&mut self,
lineage_reference: R,
indexed_location: IndexedLocation,
prior_time: NonNegativeF64,
event_time: PositiveF64,
simulation: &mut PartialSimulation<H, G, R, S, X, D, C, T, N>,
rng: &mut G,
) -> Option<PackedEvent>;
}
// The time of a speciation sample can be stored as a NonZeroU64 as:
// - an f64 can be stored as its u64 binary representation
// - a speciation sample is generated at an event time
// - every event must happen at a strictly greater time than the previous one
// - the simulation starts at time 0.0
#[derive(Clone, Debug)]
#[cfg_attr(feature = "cuda", derive(rust_cuda::rustacuda_core::DeviceCopy))]
#[cfg_attr(feature = "cuda", rustacuda(core = "rust_cuda::rustacuda_core"))]
pub struct SpeciationSample {
indexed_location: IndexedLocation,
time: NonZeroU64,
speciation_sample: ClosedUnitF64,
}
impl SpeciationSample {
#[must_use]
pub fn new(
indexed_location: IndexedLocation,
time: PositiveF64,
speciation_sample: ClosedUnitF64,
) -> Self {
// From the precondition time > 0.0_f64, we know that time =/= 0.0_f64
// i.e. time =/= 0_u64
Self {
indexed_location,
time: unsafe { NonZeroU64::new_unchecked(time.get().to_bits()) },
speciation_sample,
}
}
}
impl PartialEq for SpeciationSample {
fn eq(&self, other: &Self) -> bool {
self.speciation_sample.cmp(&other.speciation_sample) == Ordering::Equal
&& f64::from_bits(self.time.get()).total_cmp(&f64::from_bits(other.time.get()))
== Ordering::Equal
&& self.indexed_location == other.indexed_location
}
}
impl Eq for SpeciationSample {}
impl Hash for SpeciationSample {
fn hash<S: Hasher>(&self, state: &mut S) {
self.indexed_location.hash(state);
self.time.hash(state);
self.speciation_sample.hash(state);
}
}
impl PartialOrd for SpeciationSample {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for SpeciationSample {
fn cmp(&self, other: &Self) -> Ordering {
self.speciation_sample.cmp(&other.speciation_sample)
}
}
#[allow(clippy::module_name_repetitions)]
pub trait MinSpeciationTrackingEventSampler<
H: Habitat,
G: RngCore,
R: LineageReference<H>,
S: LineageStore<H, R>,
X: EmigrationExit<H, G, R, S>,
D: DispersalSampler<H, G>,
C: CoalescenceSampler<H, R, S>,
T: TurnoverRate<H>,
N: SpeciationProbability<H>,
>: EventSampler<H, G, R, S, X, D, C, T, N>
{
fn replace_min_speciation(&mut self, new: Option<SpeciationSample>)
-> Option<SpeciationSample>;
}
| 31.24812 | 98 | 0.659288 |
612fbf56d7a57e5b99e7f1787838a0149f51701b
| 13,408 |
use ff::{BitIterator, Field, PrimeField};
use std::ops::{AddAssign, MulAssign, Neg, SubAssign};
use subtle::CtOption;
use super::{montgomery, JubjubEngine, JubjubParams, PrimeOrder, Unknown};
use rand_core::RngCore;
use std::marker::PhantomData;
use std::io::{self, Read, Write};
// Represents the affine point (X/Z, Y/Z) via the extended
// twisted Edwards coordinates.
//
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
#[derive(Debug)]
pub struct Point<E: JubjubEngine, Subgroup> {
x: E::Fr,
y: E::Fr,
t: E::Fr,
z: E::Fr,
_marker: PhantomData<Subgroup>,
}
fn convert_subgroup<E: JubjubEngine, S1, S2>(from: &Point<E, S1>) -> Point<E, S2> {
Point {
x: from.x,
y: from.y,
t: from.t,
z: from.z,
_marker: PhantomData,
}
}
impl<E: JubjubEngine> From<&Point<E, Unknown>> for Point<E, Unknown> {
fn from(p: &Point<E, Unknown>) -> Point<E, Unknown> {
p.clone()
}
}
impl<E: JubjubEngine> From<Point<E, PrimeOrder>> for Point<E, Unknown> {
fn from(p: Point<E, PrimeOrder>) -> Point<E, Unknown> {
convert_subgroup(&p)
}
}
impl<E: JubjubEngine> From<&Point<E, PrimeOrder>> for Point<E, Unknown> {
fn from(p: &Point<E, PrimeOrder>) -> Point<E, Unknown> {
convert_subgroup(p)
}
}
impl<E: JubjubEngine, Subgroup> Clone for Point<E, Subgroup> {
fn clone(&self) -> Self {
convert_subgroup(self)
}
}
impl<E: JubjubEngine, Subgroup> PartialEq for Point<E, Subgroup> {
fn eq(&self, other: &Point<E, Subgroup>) -> bool {
// p1 = (x1/z1, y1/z1)
// p2 = (x2/z2, y2/z2)
// Deciding that these two points are equal is a matter of
// determining that x1/z1 = x2/z2, or equivalently that
// x1*z2 = x2*z1, and similarly for y.
let mut x1 = self.x;
x1.mul_assign(&other.z);
let mut y1 = self.y;
y1.mul_assign(&other.z);
let mut x2 = other.x;
x2.mul_assign(&self.z);
let mut y2 = other.y;
y2.mul_assign(&self.z);
x1 == x2 && y1 == y2
}
}
impl<E: JubjubEngine> Point<E, Unknown> {
pub fn read<R: Read>(mut reader: R, params: &E::Params) -> io::Result<Self> {
let mut y_repr = <E::Fr as PrimeField>::Repr::default();
reader.read_exact(y_repr.as_mut())?;
let x_sign = (y_repr.as_ref()[31] >> 7) == 1;
y_repr.as_mut()[31] &= 0x7f;
match E::Fr::from_repr(y_repr) {
Some(y) => {
let p = Self::get_for_y(y, x_sign, params);
if bool::from(p.is_some()) {
Ok(p.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidInput, "not on curve"))
}
}
None => Err(io::Error::new(
io::ErrorKind::InvalidInput,
"y is not in field",
)),
}
}
pub fn get_for_y(y: E::Fr, sign: bool, params: &E::Params) -> CtOption<Self> {
// Given a y on the curve, x^2 = (y^2 - 1) / (dy^2 + 1)
// This is defined for all valid y-coordinates,
// as dy^2 + 1 = 0 has no solution in Fr.
// tmp1 = y^2
let mut tmp1 = y.square();
// tmp2 = (y^2 * d) + 1
let mut tmp2 = tmp1;
tmp2.mul_assign(params.edwards_d());
tmp2.add_assign(&E::Fr::one());
// tmp1 = y^2 - 1
tmp1.sub_assign(&E::Fr::one());
tmp2.invert().and_then(|tmp2| {
// tmp1 = (y^2 - 1) / (dy^2 + 1)
tmp1.mul_assign(&tmp2);
tmp1.sqrt().map(|mut x| {
if x.is_odd() != sign {
x = x.neg();
}
let mut t = x;
t.mul_assign(&y);
Point {
x,
y,
t,
z: E::Fr::one(),
_marker: PhantomData,
}
})
})
}
/// This guarantees the point is in the prime order subgroup
#[must_use]
pub fn mul_by_cofactor(&self, params: &E::Params) -> Point<E, PrimeOrder> {
let tmp = self.double(params).double(params).double(params);
convert_subgroup(&tmp)
}
pub fn rand<R: RngCore>(rng: &mut R, params: &E::Params) -> Self {
loop {
let y = E::Fr::random(rng);
let sign = rng.next_u32() % 2 != 0;
let p = Self::get_for_y(y, sign, params);
if bool::from(p.is_some()) {
return p.unwrap();
}
}
}
}
impl<E: JubjubEngine, Subgroup> Point<E, Subgroup> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
let (x, y) = self.to_xy();
assert_eq!(E::Fr::NUM_BITS, 255);
let mut y_repr = y.to_repr();
if x.is_odd() {
y_repr.as_mut()[31] |= 0x80;
}
writer.write_all(y_repr.as_ref())
}
/// Convert from a Montgomery point
pub fn from_montgomery(m: &montgomery::Point<E, Subgroup>, params: &E::Params) -> Self {
match m.to_xy() {
None => {
// Map the point at infinity to the neutral element.
Point::zero()
}
Some((x, y)) => {
// The map from a Montgomery curve is defined as:
// (x, y) -> (u, v) where
// u = x / y
// v = (x - 1) / (x + 1)
//
// This map is not defined for y = 0 and x = -1.
//
// y = 0 is a valid point only for x = 0:
// y^2 = x^3 + A.x^2 + x
// 0 = x^3 + A.x^2 + x
// 0 = x(x^2 + A.x + 1)
// We have: x = 0 OR x^2 + A.x + 1 = 0
// x^2 + A.x + 1 = 0
// (2.x + A)^2 = A^2 - 4 (Complete the square.)
// The left hand side is a square, and so if A^2 - 4
// is nonsquare, there is no solution. Indeed, A^2 - 4
// is nonsquare.
//
// (0, 0) is a point of order 2, and so we map it to
// (0, -1) in the twisted Edwards curve, which is the
// only point of order 2 that is not the neutral element.
if y.is_zero() {
// This must be the point (0, 0) as above.
Point {
x: E::Fr::zero(),
y: E::Fr::one().neg(),
t: E::Fr::zero(),
z: E::Fr::one(),
_marker: PhantomData,
}
} else {
// Otherwise, as stated above, the mapping is still
// not defined at x = -1. However, x = -1 is not
// on the curve when A - 2 is nonsquare:
// y^2 = x^3 + A.x^2 + x
// y^2 = (-1) + A + (-1)
// y^2 = A - 2
// Indeed, A - 2 is nonsquare.
//
// We need to map into (projective) extended twisted
// Edwards coordinates (X, Y, T, Z) which represents
// the point (X/Z, Y/Z) with Z nonzero and T = XY/Z.
//
// Thus, we compute...
//
// u = x(x + 1)
// v = y(x - 1)
// t = x(x - 1)
// z = y(x + 1) (Cannot be nonzero, as above.)
//
// ... which represents the point ( x / y , (x - 1) / (x + 1) )
// as required by the mapping and preserves the property of
// the auxiliary coordinate t.
//
// We need to scale the coordinate, so u and t will have
// an extra factor s.
// u = xs
let mut u = x;
u.mul_assign(params.scale());
// v = x - 1
let mut v = x;
v.sub_assign(&E::Fr::one());
// t = xs(x - 1)
let mut t = u;
t.mul_assign(&v);
// z = (x + 1)
let mut z = x;
z.add_assign(&E::Fr::one());
// u = xs(x + 1)
u.mul_assign(&z);
// z = y(x + 1)
z.mul_assign(&y);
// v = y(x - 1)
v.mul_assign(&y);
Point {
x: u,
y: v,
t,
z,
_marker: PhantomData,
}
}
}
}
}
/// Attempts to cast this as a prime order element, failing if it's
/// not in the prime order subgroup.
pub fn as_prime_order(&self, params: &E::Params) -> Option<Point<E, PrimeOrder>> {
if self.mul(E::Fs::char(), params) == Point::zero() {
Some(convert_subgroup(self))
} else {
None
}
}
pub fn zero() -> Self {
Point {
x: E::Fr::zero(),
y: E::Fr::one(),
t: E::Fr::zero(),
z: E::Fr::one(),
_marker: PhantomData,
}
}
/// Convert to affine coordinates
pub fn to_xy(&self) -> (E::Fr, E::Fr) {
let zinv = self.z.invert().unwrap();
let mut x = self.x;
x.mul_assign(&zinv);
let mut y = self.y;
y.mul_assign(&zinv);
(x, y)
}
#[must_use]
pub fn negate(&self) -> Self {
let mut p = self.clone();
p.x = p.x.neg();
p.t = p.t.neg();
p
}
#[must_use]
pub fn double(&self, _: &E::Params) -> Self {
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
// Section 3.3
// http://hyperelliptic.org/EFD/g1p/auto-twisted-extended.html#doubling-dbl-2008-hwcd
// A = X1^2
let a = self.x.square();
// B = Y1^2
let b = self.y.square();
// C = 2*Z1^2
let c = self.z.square().double();
// D = a*A
// = -A
let d = a.neg();
// E = (X1+Y1)^2 - A - B
let mut e = self.x;
e.add_assign(&self.y);
e = e.square();
e.add_assign(&d); // -A = D
e.sub_assign(&b);
// G = D+B
let mut g = d;
g.add_assign(&b);
// F = G-C
let mut f = g;
f.sub_assign(&c);
// H = D-B
let mut h = d;
h.sub_assign(&b);
// X3 = E*F
let mut x3 = e;
x3.mul_assign(&f);
// Y3 = G*H
let mut y3 = g;
y3.mul_assign(&h);
// T3 = E*H
let mut t3 = e;
t3.mul_assign(&h);
// Z3 = F*G
let mut z3 = f;
z3.mul_assign(&g);
Point {
x: x3,
y: y3,
t: t3,
z: z3,
_marker: PhantomData,
}
}
#[must_use]
pub fn add(&self, other: &Self, params: &E::Params) -> Self {
// See "Twisted Edwards Curves Revisited"
// Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson
// 3.1 Unified Addition in E^e
// A = x1 * x2
let mut a = self.x;
a.mul_assign(&other.x);
// B = y1 * y2
let mut b = self.y;
b.mul_assign(&other.y);
// C = d * t1 * t2
let mut c = *params.edwards_d();
c.mul_assign(&self.t);
c.mul_assign(&other.t);
// D = z1 * z2
let mut d = self.z;
d.mul_assign(&other.z);
// H = B - aA
// = B + A
let mut h = b;
h.add_assign(&a);
// E = (x1 + y1) * (x2 + y2) - A - B
// = (x1 + y1) * (x2 + y2) - H
let mut e = self.x;
e.add_assign(&self.y);
{
let mut tmp = other.x;
tmp.add_assign(&other.y);
e.mul_assign(&tmp);
}
e.sub_assign(&h);
// F = D - C
let mut f = d;
f.sub_assign(&c);
// G = D + C
let mut g = d;
g.add_assign(&c);
// x3 = E * F
let mut x3 = e;
x3.mul_assign(&f);
// y3 = G * H
let mut y3 = g;
y3.mul_assign(&h);
// t3 = E * H
let mut t3 = e;
t3.mul_assign(&h);
// z3 = F * G
let mut z3 = f;
z3.mul_assign(&g);
Point {
x: x3,
y: y3,
t: t3,
z: z3,
_marker: PhantomData,
}
}
#[must_use]
pub fn mul<S: Into<<E::Fs as PrimeField>::Repr>>(&self, scalar: S, params: &E::Params) -> Self {
// Standard double-and-add scalar multiplication
let mut res = Self::zero();
for b in BitIterator::<u8, _>::new(scalar.into()) {
res = res.double(params);
if b {
res = res.add(self, params);
}
}
res
}
}
| 27.87526 | 100 | 0.420122 |
798288feb6ecb2088e558c6179b4a8e08c34c67e
| 4,444 |
use ast;
use keyword::Keyword;
use scanner::Token;
use stream::Stream;
#[derive(PartialEq, Eq, Debug)]
pub struct ParseError {
reason: String,
}
fn expect_next(token_stream: &mut Stream<Token>) -> Result<Token, ParseError> {
token_stream.next().ok_or(ParseError {
reason: "premature end".to_string(),
})
}
fn expect_next_eql(token_stream: &mut Stream<Token>, exp: Token) -> Result<(), ParseError> {
let tok = expect_next(token_stream)?;
if tok != exp {
Err(ParseError {
reason: format!("expected token: {:?}. actual: {:?}", exp, tok),
})
} else {
Ok(())
}
}
pub fn parse_expr(token_stream: &mut Stream<Token>) -> Result<ast::Expr, ParseError> {
let tok = expect_next(token_stream)?;
match tok {
Token::NumLiteral(s) => Ok(ast::Expr::NumLiteral(s)),
_ => Err(ParseError {
reason: "parse_expr: Expect NumLiteral".to_string(),
}),
}
}
pub fn parse_statement(token_stream: &mut Stream<Token>) -> Result<ast::Statement, ParseError> {
let _ = expect_next_eql(token_stream, Token::Symbol("return".to_string()))?;
let expr = parse_expr(token_stream)?;
let _ = expect_next_eql(token_stream, Token::Semi)?;
return Ok(ast::Statement::Return(Box::new(expr)));
}
pub fn parse_block(token_stream: &mut Stream<Token>) -> Result<ast::Block, ParseError> {
let _ = expect_next_eql(token_stream, Token::OpenCur)?;
let mut statements = Vec::new();
loop {
match token_stream.peek().ok_or(ParseError {
reason: "Premature end".to_string(),
})? {
Token::CloseCur => {
break;
}
_ => {
let stmt = parse_statement(token_stream)?;
statements.push(stmt);
}
}
}
let _ = expect_next_eql(token_stream, Token::CloseCur)?;
Ok(ast::Block { statements })
}
pub fn parse_function(token_stream: &mut Stream<Token>) -> Result<ast::Function, ParseError> {
let return_typename = expect_next(token_stream)?
.get_symbol_string()
.and_then(|name| {
match Keyword::from_str(&name) {
Some(_) => None, // reject keywords
None => Some(name),
}
})
.ok_or(ParseError {
reason: "invalid return typename type".to_string(),
})?;
let function_name = expect_next(token_stream)?
.get_symbol_string()
.and_then(|name| {
match Keyword::from_str(&name) {
Some(_) => None, // reject keywords
None => Some(name),
}
})
.ok_or(ParseError {
reason: "invalid function name type".to_string(),
})?;
let _ = expect_next_eql(token_stream, Token::OpenPar)?;
let _ = expect_next_eql(token_stream, Token::ClosePar)?;
let block = parse_block(token_stream)?;
Ok(ast::Function {
return_type: ast::Type {
name: return_typename,
},
name: function_name,
parameters: Vec::new(),
block,
})
}
pub fn parse_program(token_stream: &mut Stream<Token>) -> Result<ast::Program, ParseError> {
let mut functions = vec![];
while !token_stream.is_exhausted() {
functions.push(parse_function(token_stream)?);
}
return Ok(ast::Program { functions });
}
#[test]
fn test_parser() {
let tokens = vec![
Token::Symbol("int".to_string()),
Token::Symbol("main".to_string()),
Token::OpenPar,
Token::ClosePar,
Token::OpenCur,
Token::Symbol("return".to_string()),
Token::NumLiteral("0".to_string()),
Token::Semi,
Token::CloseCur,
];
let exp_ast = ast::Program {
functions: vec![
ast::Function {
return_type: ast::Type {
name: "int".to_string(),
},
name: "main".to_string(),
parameters: vec![],
block: ast::Block {
statements: vec![
ast::Statement::Return(Box::new(ast::Expr::NumLiteral("0".to_string()))),
],
},
},
],
};
let mut token_stream = Stream::new(tokens);
let ast = parse_program(&mut token_stream).unwrap();
assert_eq!(true, token_stream.is_exhausted());
assert_eq!(exp_ast, ast);
}
| 29.236842 | 97 | 0.555581 |
bfdbf62ac533baa1630395ba8b85cdc148e59e76
| 3,253 |
// Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! This crate provides implementations of the streaming AEAD primitive.
//!
//! AEAD encryption assures the confidentiality and authenticity of the data.
//! This primitive is CPA secure.
#![deny(broken_intra_doc_links)]
use std::sync::Once;
use tink_core::registry::register_key_manager;
mod aes_ctr_hmac_key_manager;
pub use aes_ctr_hmac_key_manager::*;
mod aes_gcm_hkdf_key_manager;
pub use aes_gcm_hkdf_key_manager::*;
mod decrypt_reader;
use decrypt_reader::*;
mod streamingaead_factory;
pub use streamingaead_factory::*;
mod streamingaead_key_templates;
pub use streamingaead_key_templates::*;
pub mod subtle;
/// The [upstream Tink](https://github.com/google/tink) version that this Rust
/// port is based on.
pub const UPSTREAM_VERSION: &str = "1.6.0";
static INIT: Once = Once::new();
/// Initialize the `tink-streaming-aead` crate, registering its primitives so they are available via
/// tink-core.
pub fn init() {
INIT.call_once(|| {
register_key_manager(std::sync::Arc::new(AesCtrHmacKeyManager::default()))
.expect("tink_streaming_aead::init() failed"); // safe: init
register_key_manager(std::sync::Arc::new(AesGcmHkdfKeyManager::default()))
.expect("tink_streaming_aead::init() failed"); // safe: init
tink_core::registry::register_template_generator(
"AES128_CTR_HMAC_SHA256_4KB",
aes128_ctr_hmac_sha256_segment_4kb_key_template,
);
tink_core::registry::register_template_generator(
"AES128_CTR_HMAC_SHA256_1MB",
aes128_ctr_hmac_sha256_segment_1mb_key_template,
);
tink_core::registry::register_template_generator(
"AES256_CTR_HMAC_SHA256_4KB",
aes256_ctr_hmac_sha256_segment_4kb_key_template,
);
tink_core::registry::register_template_generator(
"AES256_CTR_HMAC_SHA256_1MB",
aes256_ctr_hmac_sha256_segment_1mb_key_template,
);
tink_core::registry::register_template_generator(
"AES128_GCM_HKDF_4KB",
aes128_gcm_hkdf_4kb_key_template,
);
tink_core::registry::register_template_generator(
"AES128_GCM_HKDF_1MB",
aes128_gcm_hkdf_1mb_key_template,
);
tink_core::registry::register_template_generator(
"AES256_GCM_HKDF_4KB",
aes256_gcm_hkdf_4kb_key_template,
);
tink_core::registry::register_template_generator(
"AES256_GCM_HKDF_1MB",
aes256_gcm_hkdf_1mb_key_template,
);
});
}
| 36.550562 | 100 | 0.686443 |
69b24c5500c1c8cbc6c9006a1abfb6b716377c10
| 998 |
use indyrs::{cache, future::Future, IndyError, PoolHandle, WalletHandle};
pub fn get_schema_cache(
pool_handle: PoolHandle,
wallet_handle: WalletHandle,
submitter_did: &str,
id: &str,
options_json: &str,
) -> Result<String, IndyError> {
cache::get_schema(pool_handle, wallet_handle, submitter_did, id, options_json).wait()
}
pub fn get_cred_def_cache(
pool_handle: PoolHandle,
wallet_handle: WalletHandle,
submitter_did: &str,
id: &str,
options_json: &str,
) -> Result<String, IndyError> {
cache::get_cred_def(pool_handle, wallet_handle, submitter_did, id, options_json).wait()
}
pub fn purge_schema_cache(
wallet_handle: WalletHandle,
options_json: &str,
) -> Result<(), IndyError> {
cache::purge_schema_cache(wallet_handle, options_json).wait()
}
pub fn purge_cred_def_cache(
wallet_handle: WalletHandle,
options_json: &str,
) -> Result<(), IndyError> {
cache::purge_cred_def_cache(wallet_handle, options_json).wait()
}
| 27.722222 | 91 | 0.714429 |
29e1aa2bdba01e0f735765d83f1311d08946d086
| 3,325 |
use crate::args;
use crate::utils::{get_crate_name, get_rustdoc};
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::{Data, DeriveInput, Error, LitInt, Result};
pub fn generate(object_args: &args::Object, input: &DeriveInput) -> Result<TokenStream> {
let crate_name = get_crate_name(object_args.internal);
let ident = &input.ident;
let gql_typename = object_args
.name
.clone()
.unwrap_or_else(|| ident.to_string());
let desc = object_args
.desc
.clone()
.or_else(|| get_rustdoc(&input.attrs).ok().flatten())
.map(|s| quote! { Some(#s) })
.unwrap_or_else(|| quote! {None});
let s = match &input.data {
Data::Struct(e) => e,
_ => return Err(Error::new_spanned(input, "It should be a struct")),
};
let mut types = Vec::new();
for field in &s.fields {
types.push(&field.ty);
}
let create_merged_obj = {
let mut obj = quote! { #crate_name::MergedObjectSubscriptionTail };
for i in 0..types.len() {
let n = LitInt::new(&format!("{}", i), Span::call_site());
obj = quote! { #crate_name::MergedObject(&self.#n, #obj) };
}
quote! {
#obj
}
};
let merged_type = {
let mut obj = quote! { #crate_name::MergedObjectTail };
for ty in &types {
obj = quote! { #crate_name::MergedObject::<#ty, #obj> };
}
obj
};
let expanded = quote! {
#[allow(clippy::all, clippy::pedantic)]
impl #crate_name::Type for #ident {
fn type_name() -> ::std::borrow::Cow<'static, str> {
::std::borrow::Cow::Borrowed(#gql_typename)
}
fn create_type_info(registry: &mut #crate_name::registry::Registry) -> String {
registry.create_type::<Self, _>(|registry| {
#merged_type::create_type_info(registry);
let mut fields = Default::default();
if let Some(#crate_name::registry::MetaType::Object {
fields: obj_fields,
..
}) = registry.types.remove(&*#merged_type::type_name()) {
fields = obj_fields;
}
#crate_name::registry::MetaType::Object {
name: #gql_typename.to_string(),
description: #desc,
fields,
cache_control: Default::default(),
extends: false,
keys: None,
}
})
}
}
#[allow(clippy::all, clippy::pedantic)]
#[#crate_name::async_trait::async_trait]
impl #crate_name::SubscriptionType for #ident {
async fn create_field_stream(&self, idx: usize, ctx: &#crate_name::Context<'_>, schema_env: #crate_name::SchemaEnv, query_env: #crate_name::QueryEnv) -> #crate_name::Result<::std::pin::Pin<Box<dyn #crate_name::futures::Stream<Item = #crate_name::Result<#crate_name::serde_json::Value>> + Send>>> {
#create_merged_obj.create_field_stream(idx, ctx, schema_env, query_env).await
}
}
};
Ok(expanded.into())
}
| 35.37234 | 309 | 0.528421 |
16e9cb538919cbc173db9a898e51315be0c56a8c
| 2,083 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test linked failure
// A port of task-killjoin to use a class with a dtor to manage
// the join.
use std::cell::Cell;
use std::comm::*;
use std::ptr;
use std::task;
struct notify {
ch: Chan<bool>,
v: @Cell<bool>,
}
#[unsafe_destructor]
impl Drop for notify {
fn drop(&mut self) {
unsafe {
error!("notify: task=%? v=%x unwinding=%b b=%b",
0,
ptr::to_unsafe_ptr(&(*(self.v))) as uint,
task::failing(),
*(self.v));
let b = *(self.v);
self.ch.send(b);
}
}
}
fn notify(ch: Chan<bool>, v: @Cell<bool>) -> notify {
notify {
ch: ch,
v: v
}
}
fn joinable(f: proc()) -> Port<bool> {
fn wrapper(c: Chan<bool>, f: ||) {
let b = @Cell::new(false);
error!("wrapper: task=%? allocated v=%x",
0,
ptr::to_unsafe_ptr(&b) as uint);
let _r = notify(c, b);
f();
*b = true;
}
let (p, c) = stream();
do task::spawn_unlinked {
let ccc = c;
wrapper(ccc, f)
}
p
}
fn join(port: Port<bool>) -> bool {
port.recv()
}
fn supervised() {
// Deschedule to make sure the supervisor joins before we
// fail. This is currently not needed because the supervisor
// runs first, but I can imagine that changing.
error!("supervised task=%?", 0);
task::deschedule();
fail!();
}
fn supervisor() {
error!("supervisor task=%?", 0);
let t = joinable(supervised);
join(t);
}
pub fn main() {
join(joinable(supervisor));
}
| 23.670455 | 68 | 0.55977 |
6116157c0670db2037feb465a67ba0beca8c29c4
| 9,961 |
#![warn(missing_docs)]
// TODO: fix `catch!` macro (see issue #132)
#![cfg_attr(feature = "cargo-clippy", allow(redundant_closure_call))]
//! # Safe JNI Bindings in Rust
//!
//! This crate provides a (mostly) safe way to implement methods in Java using
//! the JNI. Because who wants to *actually* write Java?
//!
//! ## Getting Started
//!
//! Naturally, any ffi-related project is going to require some code in both
//! languages that we're trying to make communicate. Java requires all native
//! methods to adhere to the Java Native Interface (JNI), so we first have to
//! define our function signature from Java, and then we can write Rust that
//! will adhere to it.
//!
//! ### The Java side
//!
//! First, you need a Java class definition. `HelloWorld.java`:
//!
//! ```java
//! class HelloWorld {
//! // This declares that the static `hello` method will be provided
//! // a native library.
//! private static native String hello(String input);
//!
//! static {
//! // This actually loads the shared object that we'll be creating.
//! // The actual location of the .so or .dll may differ based on your
//! // platform.
//! System.loadLibrary("mylib");
//! }
//!
//! // The rest is just regular ol' Java!
//! public static void main(String[] args) {
//! String output = HelloWorld.hello("josh");
//! System.out.println(output);
//! }
//! }
//! ```
//!
//! Compile this to a class file with `javac HelloWorld.java`.
//!
//! Trying to run it now will give us the error `Exception in thread "main"
//! java.lang.UnsatisfiedLinkError: no mylib in java.library.path` since we
//! haven't written our native code yet.
//!
//! To do that, first we need the name and type signature that our Rust function
//! needs to adhere to. Luckily, the Java compiler can generate that for you!
//! Run `javac -h . HelloWorld` and you'll get a `HelloWorld.h` output to your
//! directory. It should look something like this:
//!
//! ```c
//! /* DO NOT EDIT THIS FILE - it is machine generated */
//! #include <jni.h>
//! /* Header for class HelloWorld */
//!
//! #ifndef _Included_HelloWorld
//! #define _Included_HelloWorld
//! #ifdef __cplusplus
//! extern "C" {
//! #endif
//! /*
//! * Class: HelloWorld
//! * Method: hello
//! * Signature: (Ljava/lang/String;)Ljava/lang/String;
//! */
//! JNIEXPORT jstring JNICALL Java_HelloWorld_hello
//! (JNIEnv *, jclass, jstring);
//!
//! #ifdef __cplusplus
//! }
//! #endif
//! #endif
//! ```
//!
//! It's a C header, but luckily for us, the types will mostly match up. Let's
//! make our crate that's going to compile to our native library.
//!
//! ### The Rust side
//!
//! Create your crate with `cargo new mylib`. This will create a directory
//! `mylib` that has everything needed to build an basic crate with `cargo`. We
//! need to make a couple of changes to `Cargo.toml` before we do anything else.
//!
//! * Under `[dependencies]`, add `jni = "0.12.1"`
//! * Add a new `[lib]` section and under it, `crate_type = ["cdylib"]`.
//!
//! Now, if you run `cargo build` from inside the crate directory, you should
//! see a `libmylib.so` (if you're on linux/OSX) in the `target/debug`
//! directory.
//!
//! The last thing we need to do is to define our exported method. Add this to
//! your crate's `src/lib.rs`:
//!
//! ```rust,ignore
//! extern crate jni;
//!
//! // This is the interface to the JVM that we'll call the majority of our
//! // methods on.
//! use jni::JNIEnv;
//!
//! // These objects are what you should use as arguments to your native
//! // function. They carry extra lifetime information to prevent them escaping
//! // this context and getting used after being GC'd.
//! use jni::objects::{JClass, JString};
//!
//! // This is just a pointer. We'll be returning it from our function. We
//! // can't return one of the objects with lifetime information because the
//! // lifetime checker won't let us.
//! use jni::sys::jstring;
//!
//! // This keeps Rust from "mangling" the name and making it unique for this
//! // crate.
//! #[no_mangle]
//! // This turns off linter warnings because the name doesn't conform to
//! // conventions.
//! #[allow(non_snake_case)]
//! pub extern "system" fn Java_HelloWorld_hello(env: JNIEnv,
//! // This is the class that owns our static method. It's not going to be used,
//! // but still must be present to match the expected signature of a static
//! // native method.
//! class: JClass,
//! input: JString)
//! -> jstring {
//! // First, we have to get the string out of Java. Check out the `strings`
//! // module for more info on how this works.
//! let input: String =
//! env.get_string(input).expect("Couldn't get java string!").into();
//!
//! // Then we have to create a new Java string to return. Again, more info
//! // in the `strings` module.
//! let output = env.new_string(format!("Hello, {}!", input))
//! .expect("Couldn't create java string!");
//!
//! // Finally, extract the raw pointer to return.
//! output.into_inner()
//! }
//! ```
//!
//! Note that the type signature for our function is almost identical to the one
//! from the generated header, aside from our lifetime-carrying arguments.
//!
//! ### Final steps
//!
//! That's it! Build your crate and try to run your Java class again.
//!
//! ... Same error as before you say? Well that's because JVM is looking for
//! `mylib` in all the wrong places. This will differ by platform thanks to
//! different linker/loader semantics, but on Linux, you can simply `export
//! LD_LIBRARY_PATH=/path/to/mylib/target/debug`. Now, you should get the
//! expected output `Hello, josh!` from your Java class.
//!
//! ## Launching JVM from Rust
//!
//! If you need to use the part of the [Invocation API]
//! that allows to launch a JVM
//! from a native process, you must enable `invocation` feature.
//! The application will require linking to the dynamic `jvm`
//! library, which is distributed with the JVM.
//!
//! During build time, the JVM installation path is determined:
//! 1. By `JAVA_HOME` environment variable, if it is set.
//! 2. Otherwise — from `java` output.
//!
//! It is recommended to set `JAVA_HOME` to have reproducible builds, especially, in case of multiple VMs installed.
//!
//! At application run time, you must specify the path
//! to the `jvm` library so that the loader can locate it.
//! * On **Windows**, append the path to `jvm.dll` to `PATH` environment variable.
//! * On **MacOS**, append the path to `libjvm.dylib` to `LD_LIBRARY_PATH` environment variable.
//! * On **Linux**, append the path to `libjvm.so` to `LD_LIBRARY_PATH` environment variable.
//!
//! The exact relative path to `jvm` library is version-specific.
//!
//! For more information - see documentation in [build.rs](https://github.com/jni-rs/jni-rs/tree/master/build.rs).
//!
//! ## See Also
//!
//! ### Examples
//! - [Example project][jni-rs-example]
//! - Our [integration tests][jni-rs-its] and [benchmarks][jni-rs-benches]
//!
//! ### JNI Documentation
//! - [Java Native Interface Specification][jni-spec]
//! - [JNI tips][jni-tips] — general tips on JNI development and some Android-specific
//!
//! ### Open-Source Users
//! - The Servo browser engine Android [port][users-servo]
//! - The Exonum framework [Java Binding][users-ejb]
//! - MaidSafe [Java Binding][users-maidsafe]
//!
//! ### Other Projects Simplifying Java and Rust Communication
//! - Consider [JNR][projects-jnr] if you just need to use a native library with C interface
//! - Watch OpenJDK [Project Panama][projects-panama] which aims to enable using native libraries
//! with no JNI code
//! - Consider [GraalVM][projects-graalvm] — a recently released VM that gives zero-cost
//! interoperability between various languages (including Java and [Rust][graalvm-rust] compiled
//! into LLVM-bitcode)
//!
//! [Invocation API]: https://docs.oracle.com/en/java/javase/11/docs/specs/jni/invocation.html
//! [jni-spec]: https://docs.oracle.com/en/java/javase/11/docs/specs/jni/index.html
//! [jni-tips]: https://developer.android.com/training/articles/perf-jni
//! [jni-rs-example]: https://github.com/jni-rs/jni-rs/tree/master/example
//! [jni-rs-its]: https://github.com/jni-rs/jni-rs/tree/master/tests
//! [jni-rs-benches]: https://github.com/jni-rs/jni-rs/tree/master/benches
//! [users-servo]: https://github.com/servo/servo/tree/master/ports/libsimpleservo
//! [users-ejb]: https://github.com/exonum/exonum-java-binding/tree/master/exonum-java-binding/core/rust
//! [users-maidsafe]: https://github.com/maidsafe/safe_client_libs/tree/master/safe_app_jni
//! [projects-jnr]: https://github.com/jnr/jnr-ffi/
//! [projects-graalvm]: http://www.graalvm.org/docs/why-graal/#for-java-programs
//! [graalvm-rust]: http://www.graalvm.org/docs/reference-manual/languages/llvm/#running-rust
//! [projects-panama]: https://jdk.java.net/panama/
/// Bindgen-generated definitions. Mirrors `jni.h` and `jni_md.h`.
extern crate jni_sys;
/// `jni-sys` re-exports
pub mod sys;
#[macro_use]
extern crate log;
#[macro_use]
extern crate error_chain;
extern crate combine;
extern crate cesu8;
mod wrapper {
mod version;
pub use self::version::*;
#[macro_use]
mod macros;
/// Errors. Do you really need more explanation?
pub mod errors;
/// Descriptors for classes and method IDs.
pub mod descriptors;
/// Parser for java type signatures.
pub mod signature;
/// Wrappers for object pointers returned from the JVM.
pub mod objects;
/// String types for going to/from java strings.
pub mod strings;
/// Actual communication with the JVM
mod jnienv;
pub use self::jnienv::*;
/// Java VM interface
mod java_vm;
pub use self::java_vm::*;
}
pub use wrapper::*;
| 37.874525 | 116 | 0.661279 |
0ae9d1672c198eed6cd0272eac0bfdd9ace72896
| 32,183 |
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Library for filesystem management in rust.
//!
//! This library is analogous to the fs-management library in zircon. It provides support for
//! formatting, mounting, unmounting, and fsck-ing. It is implemented in a similar way to the C++
//! version - it uses the blobfs command line tool present in the base image. In order to use this
//! library inside of a sandbox, the following must be added to the relevant component manifest
//! file -
//!
//! ```
//! "sandbox": {
//! "services": [
//! "fuchsia.process.Launcher",
//! "fuchsia.tracing.provider.Registry"
//! ]
//! }
//! ```
//!
//! and the projects BUILD.gn file must contain
//!
//! ```
//! package("foo") {
//! deps = [
//! "//src/storage/bin/blobfs",
//! "//src/storage/bin/minfs",
//! ...
//! ]
//! binaries = [
//! { name = "blobfs" },
//! { name = "minfs" },
//! ...
//! ]
//! ...
//! }
//! ```
//!
//! for components v1. For components v2, add `/svc/fuchsia.process.Launcher` to `use` and add the
//! binaries as dependencies to your component.
//!
//! This library currently doesn't work outside of a component (the filesystem utility binary paths
//! are hard-coded strings).
pub mod asynchronous;
mod error;
use {
anyhow::{format_err, Context as _, Error},
cstr::cstr,
fdio::{service_connect_at, spawn_etc, Namespace, SpawnAction, SpawnOptions},
fidl::endpoints::DiscoverableProtocolMarker,
fidl_fuchsia_fs::AdminSynchronousProxy,
fidl_fuchsia_io as fio,
fuchsia_runtime::{HandleInfo, HandleType},
fuchsia_zircon::{self as zx, AsHandleRef, Task},
fuchsia_zircon_status as zx_status,
std::ffi::CStr,
};
// Re-export errors as public.
pub use error::{
BindError, CommandError, KillError, LaunchProcessError, QueryError, ServeError, ShutdownError,
};
/// Constants for fuchsia.io/FilesystemInfo.fs_type
/// Keep in sync with VFS_TYPE_* types in //zircon/system/public/zircon/device/vfs.h
pub mod vfs_type {
pub const BLOBFS: u32 = 0x9e694d21;
pub const FATFS: u32 = 0xce694d21;
pub const MINFS: u32 = 0x6e694d21;
pub const MEMFS: u32 = 0x3e694d21;
pub const FACTORYFS: u32 = 0x1e694d21;
pub const FXFS: u32 = 0x73667866;
pub const F2FS: u32 = 0xfe694d21;
}
/// Stores state of the mounted filesystem instance
struct FSInstance {
process: zx::Process,
mount_point: String,
export_root: zx::Channel,
}
impl FSInstance {
/// Mount the filesystem partition that exists on the provided block device, allowing it to
/// receive requests on the root channel. In order to be mounted in the traditional sense, the
/// client side of the provided root channel needs to be bound to a path in a namespace
/// somewhere.
fn mount(
block_device: zx::Channel,
args: Vec<&CStr>,
mount_point: &str,
) -> Result<Self, Error> {
let (export_root, server_end) = fidl::endpoints::create_endpoints::<fio::NodeMarker>()?;
let export_root = fio::DirectorySynchronousProxy::new(export_root.into_channel());
let actions = vec![
// export root handle is passed in as a PA_DIRECTORY_REQUEST handle at argument 0
SpawnAction::add_handle(
HandleInfo::new(HandleType::DirectoryRequest, 0),
server_end.into(),
),
// device handle is passed in as a PA_USER0 handle at argument 1
SpawnAction::add_handle(HandleInfo::new(HandleType::User0, 1), block_device.into()),
];
let process = launch_process(&args, actions)?;
// Wait until the filesystem is ready to take incoming requests. We want
// mount errors to show before we bind to the namespace.
let (root_dir, server_end) = fidl::endpoints::create_endpoints::<fio::NodeMarker>()?;
export_root.open(
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::POSIX_EXECUTABLE
| fio::OpenFlags::POSIX_WRITABLE,
0,
"root",
server_end.into(),
)?;
let root_dir = fio::DirectorySynchronousProxy::new(root_dir.into_channel());
let _: fio::NodeInfo = root_dir.describe(zx::Time::INFINITE).context("failed to mount")?;
let namespace = Namespace::installed().context("failed to get installed namespace")?;
namespace
.bind(mount_point, root_dir.into_channel())
.context("failed to bind client channel into default namespace")?;
Ok(Self {
process,
mount_point: mount_point.to_string(),
export_root: export_root.into_channel(),
})
}
/// Unmount the filesystem partition. The partition must already be mounted.
fn unmount(self) -> Result<(), Error> {
let (client_chan, server_chan) = zx::Channel::create()?;
service_connect_at(
&self.export_root,
fidl_fuchsia_fs::AdminMarker::PROTOCOL_NAME,
server_chan,
)?;
let admin_proxy = AdminSynchronousProxy::new(client_chan);
admin_proxy.shutdown(zx::Time::INFINITE)?;
let namespace = Namespace::installed().context("failed to get installed namespace")?;
namespace
.unbind(&self.mount_point)
.context("failed to unbind filesystem from default namespace")
}
/// Get `FileSystemInfo` struct from which one can find out things like
/// free space, used space, block size, etc.
fn query_filesystem(&self) -> Result<Box<fio::FilesystemInfo>, Error> {
let (client_chan, server_chan) = zx::Channel::create()?;
let namespace = Namespace::installed().context("failed to get installed namespace")?;
namespace
.connect(&self.mount_point, fio::OpenFlags::RIGHT_READABLE, server_chan)
.context("failed to connect to filesystem")?;
let proxy = fio::DirectorySynchronousProxy::new(client_chan);
let (status, result) = proxy
.query_filesystem(zx::Time::INFINITE)
.context("failed to query filesystem info")?;
zx_status::Status::ok(status).context("failed to query filesystem info")?;
result.ok_or(format_err!("querying filesystem info got empty result"))
}
/// Terminate the filesystem process and force unmount the mount point
fn kill(self) -> Result<(), Error> {
let namespace = Namespace::installed().context("failed to get installed namespace")?;
namespace
.unbind(&self.mount_point)
.context("failed to unbind filesystem from default namespace")?;
self.process.kill().context("Could not kill filesystem process")
}
}
fn launch_process(
args: &[&CStr],
mut actions: Vec<SpawnAction<'_>>,
) -> Result<zx::Process, LaunchProcessError> {
match spawn_etc(
&zx::Handle::invalid().into(),
SpawnOptions::CLONE_ALL,
args[0],
args,
None,
&mut actions,
) {
Ok(process) => Ok(process),
Err((status, message)) => Err(LaunchProcessError {
args: args.iter().map(|&a| a.to_owned()).collect(),
status,
message,
}),
}
}
fn run_command_and_wait_for_clean_exit(
args: Vec<&CStr>,
block_device: zx::Channel,
) -> Result<(), Error> {
let actions = vec![
// device handle is passed in as a PA_USER0 handle at argument 1
SpawnAction::add_handle(HandleInfo::new(HandleType::User0, 1), block_device.into()),
];
let process = launch_process(&args, actions)?;
let _signals = process
.wait_handle(zx::Signals::PROCESS_TERMINATED, zx::Time::INFINITE)
.context(format!("failed to wait for process to complete"))?;
let info = process.info().context("failed to get process info")?;
if !zx::ProcessInfoFlags::from_bits(info.flags).unwrap().contains(zx::ProcessInfoFlags::EXITED)
|| info.return_code != 0
{
return Err(format_err!("process returned non-zero exit code ({})", info.return_code));
}
Ok(())
}
/// Describes the configuration for a particular native filesystem.
pub trait FSConfig {
/// Path to the filesystem binary
fn binary_path(&self) -> &CStr;
/// Arguments passed to the binary for all subcommands
fn generic_args(&self) -> Vec<&CStr>;
/// Arguments passed to the binary for formatting
fn format_args(&self) -> Vec<&CStr>;
/// Arguments passed to the binary for mounting
fn mount_args(&self) -> Vec<&CStr>;
}
/// Manages a block device for filesystem operations
pub struct Filesystem<FSC: FSConfig> {
device: fio::NodeSynchronousProxy,
config: FSC,
instance: Option<FSInstance>,
}
impl<FSC: FSConfig> Filesystem<FSC> {
/// Manage a filesystem on a device at the given path. The device is not formatted, mounted, or
/// modified at this point.
pub fn from_path(device_path: &str, config: FSC) -> Result<Self, Error> {
let (client_end, server_end) = zx::Channel::create()?;
fdio::service_connect(device_path, server_end)
.context("could not connect to block device")?;
Self::from_channel(client_end, config)
}
/// Manage a filesystem on a device at the given channel. The device is not formatted, mounted,
/// or modified at this point.
pub fn from_channel(client_end: zx::Channel, config: FSC) -> Result<Self, Error> {
let device = fio::NodeSynchronousProxy::new(client_end);
Ok(Self { device, config, instance: None })
}
/// Returns a channel to the block device.
fn get_channel(&mut self) -> Result<zx::Channel, Error> {
let (channel, server) = zx::Channel::create()?;
let () = self
.device
.clone(fio::OpenFlags::CLONE_SAME_RIGHTS, fidl::endpoints::ServerEnd::new(server))?;
Ok(channel)
}
/// Mount the provided block device and bind it to the provided mount_point in the default
/// namespace. The filesystem can't already be mounted, and the mount will fail if the provided
/// mount path doesn't already exist. The path is relative to the root of the default namespace,
/// and can't contain any '.' or '..' entries.
pub fn mount(&mut self, mount_point: &str) -> Result<(), Error> {
if self.instance.is_some() {
return Err(format_err!("cannot mount. filesystem is already mounted"));
}
let block_device = self.get_channel()?;
let mut args = vec![self.config.binary_path()];
args.append(&mut self.config.generic_args());
args.push(cstr!("mount"));
args.append(&mut self.config.mount_args());
self.instance = Some(FSInstance::mount(block_device, args, mount_point)?);
Ok(())
}
/// Format the associated device with a fresh filesystem. It must not be mounted.
pub fn format(&mut self) -> Result<(), Error> {
if self.instance.is_some() {
return Err(format_err!("cannot format! filesystem is mounted"));
}
let block_device = self.get_channel()?;
let mut args = vec![self.config.binary_path()];
args.append(&mut self.config.generic_args());
args.push(cstr!("mkfs"));
args.append(&mut self.config.format_args());
run_command_and_wait_for_clean_exit(args, block_device).context("failed to format device")
}
/// Run fsck on the filesystem partition. Returns Ok(()) if fsck succeeds, or the associated
/// error if it doesn't. Will fail if run on a mounted partition.
pub fn fsck(&mut self) -> Result<(), Error> {
if self.instance.is_some() {
return Err(format_err!("cannot fsck! filesystem is mounted"));
}
let block_device = self.get_channel()?;
let mut args = vec![self.config.binary_path()];
args.append(&mut self.config.generic_args());
args.push(cstr!("fsck"));
run_command_and_wait_for_clean_exit(args, block_device).context("failed to fsck device")
}
/// Unmount the filesystem partition. The partition must already be mounted.
pub fn unmount(&mut self) -> Result<(), Error> {
if let Some(instance) = self.instance.take() {
instance.unmount()
} else {
Err(format_err!("cannot unmount. filesystem is not mounted"))
}
}
/// Get `FileSystemInfo` struct from which one can find out things like
/// free space, used space, block size, etc.
pub fn query_filesystem(&self) -> Result<Box<fio::FilesystemInfo>, Error> {
if let Some(instance) = &self.instance {
instance.query_filesystem()
} else {
Err(format_err!("cannot query filesystem. filesystem is not mounted"))
}
}
/// Terminate the filesystem process and force unmount the mount point
pub fn kill(&mut self) -> Result<(), Error> {
if let Some(instance) = self.instance.take() {
instance.kill()
} else {
Err(format_err!("cannot kill. filesystem is not mounted"))
}
}
}
impl<FSC: FSConfig> Drop for Filesystem<FSC> {
fn drop(&mut self) {
if self.instance.is_some() {
// Unmount if possible.
let _ = self.unmount();
}
}
}
///
/// FILESYSTEMS
///
/// Layout of blobs in blobfs
#[derive(Clone)]
pub enum BlobLayout {
/// Merkle tree is stored in a separate block. This is deprecated and used only on Astro
/// devices (it takes more space).
DeprecatedPadded,
/// Merkle tree is appended to the last block of data
Compact,
}
/// Compression used for blobs in blobfs
#[derive(Clone)]
pub enum BlobCompression {
ZSTD,
ZSTDSeekable,
ZSTDChunked,
Uncompressed,
}
/// Eviction policy used for blobs in blobfs
#[derive(Clone)]
pub enum BlobEvictionPolicy {
NeverEvict,
EvictImmediately,
}
/// Blobfs Filesystem Configuration
/// If fields are None or false, they will not be set in arguments.
#[derive(Clone, Default)]
pub struct Blobfs {
pub verbose: bool,
pub readonly: bool,
pub metrics: bool,
pub blob_deprecated_padded_format: bool,
pub blob_compression: Option<BlobCompression>,
pub blob_eviction_policy: Option<BlobEvictionPolicy>,
}
impl Blobfs {
/// Manages a block device at a given path using
/// the default configuration.
pub fn new(path: &str) -> Result<Filesystem<Self>, Error> {
Filesystem::from_path(path, Self::default())
}
/// Manages a block device at a given channel using
/// the default configuration.
pub fn from_channel(channel: zx::Channel) -> Result<Filesystem<Self>, Error> {
Filesystem::from_channel(channel, Self::default())
}
}
impl FSConfig for Blobfs {
fn binary_path(&self) -> &CStr {
cstr!("/pkg/bin/blobfs")
}
fn generic_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.verbose {
args.push(cstr!("--verbose"));
}
args
}
fn format_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.blob_deprecated_padded_format {
args.push(cstr!("--deprecated_padded_format"));
}
args
}
fn mount_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.readonly {
args.push(cstr!("--readonly"));
}
if self.metrics {
args.push(cstr!("--metrics"));
}
if let Some(compression) = &self.blob_compression {
args.push(cstr!("--compression"));
args.push(match compression {
BlobCompression::ZSTD => cstr!("ZSTD"),
BlobCompression::ZSTDSeekable => cstr!("ZSTD_SEEKABLE"),
BlobCompression::ZSTDChunked => cstr!("ZSTD_CHUNKED"),
BlobCompression::Uncompressed => cstr!("UNCOMPRESSED"),
});
}
if let Some(eviction_policy) = &self.blob_eviction_policy {
args.push(cstr!("--eviction_policy"));
args.push(match eviction_policy {
BlobEvictionPolicy::NeverEvict => cstr!("NEVER_EVICT"),
BlobEvictionPolicy::EvictImmediately => cstr!("EVICT_IMMEDIATELY"),
})
}
args
}
}
/// Minfs Filesystem Configuration
/// If fields are None or false, they will not be set in arguments.
#[derive(Clone, Default)]
pub struct Minfs {
// TODO(xbhatnag): Add support for fvm_data_slices
pub verbose: bool,
pub readonly: bool,
pub metrics: bool,
pub fsck_after_every_transaction: bool,
}
impl Minfs {
/// Manages a block device at a given path using
/// the default configuration.
pub fn new(path: &str) -> Result<Filesystem<Self>, Error> {
Filesystem::from_path(path, Self::default())
}
/// Manages a block device at a given channel using
/// the default configuration.
pub fn from_channel(channel: zx::Channel) -> Result<Filesystem<Self>, Error> {
Filesystem::from_channel(channel, Self::default())
}
}
impl FSConfig for Minfs {
fn binary_path(&self) -> &CStr {
cstr!("/pkg/bin/minfs")
}
fn generic_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.verbose {
args.push(cstr!("--verbose"));
}
args
}
fn format_args(&self) -> Vec<&CStr> {
vec![]
}
fn mount_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.readonly {
args.push(cstr!("--readonly"));
}
if self.metrics {
args.push(cstr!("--metrics"));
}
if self.fsck_after_every_transaction {
args.push(cstr!("--fsck_after_every_transaction"));
}
args
}
}
/// Fxfs Filesystem Configuration
/// If fields are None or false, they will not be set in arguments.
#[derive(Clone, Default)]
pub struct Fxfs {
pub verbose: bool,
pub readonly: bool,
}
impl Fxfs {
/// Manages a block device at a given path using
/// the default configuration.
pub fn new(path: &str) -> Result<Filesystem<Self>, Error> {
Filesystem::from_path(path, Self::default())
}
/// Manages a block device at a given channel using
/// the default configuration.
pub fn from_channel(channel: zx::Channel) -> Result<Filesystem<Self>, Error> {
Filesystem::from_channel(channel, Self::default())
}
}
impl FSConfig for Fxfs {
fn binary_path(&self) -> &CStr {
cstr!("/pkg/bin/fxfs")
}
fn generic_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.verbose {
args.push(cstr!("--verbose"));
}
args
}
fn format_args(&self) -> Vec<&CStr> {
vec![]
}
fn mount_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.readonly {
args.push(cstr!("--readonly"));
}
args
}
}
/// Factoryfs Filesystem Configuration
/// If fields are None or false, they will not be set in arguments.
#[derive(Clone, Default)]
pub struct Factoryfs {
pub verbose: bool,
pub metrics: bool,
}
impl Factoryfs {
/// Manages a block device at a given path using
/// the default configuration.
pub fn new(path: &str) -> Result<Filesystem<Self>, Error> {
Filesystem::from_path(path, Self::default())
}
/// Manages a block device at a given channel using
/// the default configuration.
pub fn from_channel(channel: zx::Channel) -> Result<Filesystem<Self>, Error> {
Filesystem::from_channel(channel, Self::default())
}
}
impl FSConfig for Factoryfs {
fn binary_path(&self) -> &CStr {
cstr!("/pkg/bin/factoryfs")
}
fn generic_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.verbose {
args.push(cstr!("--verbose"));
}
args
}
fn format_args(&self) -> Vec<&CStr> {
vec![]
}
fn mount_args(&self) -> Vec<&CStr> {
let mut args = vec![];
if self.metrics {
args.push(cstr!("--metrics"));
}
args
}
}
#[cfg(test)]
mod tests {
use {
super::{BlobCompression, BlobEvictionPolicy, Blobfs, Factoryfs, Filesystem, Minfs},
fuchsia_zircon::HandleBased,
ramdevice_client::RamdiskClient,
std::io::{Read, Seek, Write},
};
fn ramdisk(block_size: u64) -> RamdiskClient {
ramdevice_client::wait_for_device(
"/dev/sys/platform/00:00:2d/ramctl",
std::time::Duration::from_secs(30),
)
.unwrap();
RamdiskClient::create(block_size, 1 << 16).unwrap()
}
fn blobfs(ramdisk: &RamdiskClient) -> Filesystem<Blobfs> {
let device = ramdisk.open().unwrap();
Blobfs::from_channel(device).unwrap()
}
#[test]
fn blobfs_custom_config() {
let block_size = 512;
let mount_point = "/test-fs-root";
let ramdisk = ramdisk(block_size);
let device = ramdisk.open().unwrap();
let config = Blobfs {
verbose: true,
metrics: true,
readonly: true,
blob_deprecated_padded_format: false,
blob_compression: Some(BlobCompression::Uncompressed),
blob_eviction_policy: Some(BlobEvictionPolicy::EvictImmediately),
};
let mut blobfs = Filesystem::from_channel(device, config).unwrap();
blobfs.format().expect("failed to format blobfs");
blobfs.fsck().expect("failed to fsck blobfs");
blobfs.mount(mount_point).expect("failed to mount blobfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn blobfs_format_fsck_success() {
let block_size = 512;
let ramdisk = ramdisk(block_size);
let mut blobfs = blobfs(&ramdisk);
blobfs.format().expect("failed to format blobfs");
blobfs.fsck().expect("failed to fsck blobfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn blobfs_format_fsck_error() {
let block_size = 512;
let ramdisk = ramdisk(block_size);
let mut blobfs = blobfs(&ramdisk);
blobfs.format().expect("failed to format blobfs");
// force fsck to fail by stomping all over one of blobfs's metadata blocks after formatting
// TODO(fxbug.dev/35860): corrupt something other than the superblock
let device_channel = ramdisk.open().expect("failed to get channel to device");
let mut file = fdio::create_fd::<std::fs::File>(device_channel.into_handle())
.expect("failed to convert to file descriptor");
let mut bytes: Vec<u8> = std::iter::repeat(0xff).take(block_size as usize).collect();
file.write_all(&mut bytes).expect("failed to write to device");
blobfs.fsck().expect_err("fsck succeeded when it shouldn't have");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn blobfs_format_mount_write_query_remount_read_unmount() {
let block_size = 512;
let mount_point = "/test-fs-root";
let ramdisk = ramdisk(block_size);
let mut blobfs = blobfs(&ramdisk);
blobfs.format().expect("failed to format blobfs");
blobfs.mount(mount_point).expect("failed to mount blobfs the first time");
// snapshot of FilesystemInfo
let fs_info1 =
blobfs.query_filesystem().expect("failed to query filesystem info after first mount");
// pre-generated merkle test fixture data
let merkle = "be901a14ec42ee0a8ee220eb119294cdd40d26d573139ee3d51e4430e7d08c28";
let content = String::from("test content").into_bytes();
let path = format!("{}/{}", mount_point, merkle);
{
let mut test_file = std::fs::File::create(&path).expect("failed to create test file");
test_file.set_len(content.len() as u64).expect("failed to truncate file");
test_file.write_all(&content).expect("failed to write to test file");
}
// check against the snapshot FilesystemInfo
let fs_info2 =
blobfs.query_filesystem().expect("failed to query filesystem info after write");
assert_eq!(
fs_info2.used_bytes - fs_info1.used_bytes,
fs_info2.block_size as u64 // assuming content < 8K
);
blobfs.unmount().expect("failed to unmount blobfs the first time");
blobfs
.query_filesystem()
.expect_err("filesystem query on an unmounted filesystem didn't fail");
blobfs.mount(mount_point).expect("failed to mount blobfs the second time");
{
let mut test_file = std::fs::File::open(&path).expect("failed to open test file");
let mut read_content = Vec::new();
test_file.read_to_end(&mut read_content).expect("failed to read from test file");
assert_eq!(content, read_content);
}
// once more check against the snapshot FilesystemInfo
let fs_info3 =
blobfs.query_filesystem().expect("failed to query filesystem info after read");
assert_eq!(
fs_info3.used_bytes - fs_info1.used_bytes,
fs_info3.block_size as u64 // assuming content < 8K
);
blobfs.unmount().expect("failed to unmount blobfs the second time");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
fn minfs(ramdisk: &RamdiskClient) -> Filesystem<Minfs> {
let device = ramdisk.open().unwrap();
Minfs::from_channel(device).unwrap()
}
#[test]
fn minfs_custom_config() {
let block_size = 512;
let mount_point = "/test-fs-root";
let ramdisk = ramdisk(block_size);
let device = ramdisk.open().unwrap();
let config = Minfs {
verbose: true,
metrics: true,
readonly: true,
fsck_after_every_transaction: true,
};
let mut minfs = Filesystem::from_channel(device, config).unwrap();
minfs.format().expect("failed to format minfs");
minfs.fsck().expect("failed to fsck minfs");
minfs.mount(mount_point).expect("failed to mount minfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn minfs_format_fsck_success() {
let block_size = 8192;
let ramdisk = ramdisk(block_size);
let mut minfs = minfs(&ramdisk);
minfs.format().expect("failed to format minfs");
minfs.fsck().expect("failed to fsck minfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn minfs_format_fsck_error() {
let block_size = 8192;
let ramdisk = ramdisk(block_size);
let mut minfs = minfs(&ramdisk);
minfs.format().expect("failed to format minfs");
// force fsck to fail by stomping all over one of minfs's metadata blocks after formatting
let device_channel = ramdisk.open().expect("failed to get channel to device");
let mut file = fdio::create_fd::<std::fs::File>(device_channel.into_handle())
.expect("failed to convert to file descriptor");
// when minfs isn't on an fvm, the location for it's bitmap offset is the 8th block.
// TODO(fxbug.dev/35861): parse the superblock for this offset and the block size.
let bitmap_block_offset = 8;
let bitmap_offset = block_size * bitmap_block_offset;
let mut stomping_bytes: Vec<u8> =
std::iter::repeat(0xff).take(block_size as usize).collect();
let actual_offset =
file.seek(std::io::SeekFrom::Start(bitmap_offset)).expect("failed to seek to bitmap");
assert_eq!(actual_offset, bitmap_offset);
file.write_all(&mut stomping_bytes).expect("failed to write to device");
minfs.fsck().expect_err("fsck succeeded when it shouldn't have");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn minfs_format_mount_write_query_remount_read_unmount() {
let block_size = 8192;
let mount_point = "/test-fs-root";
let ramdisk = ramdisk(block_size);
let mut minfs = minfs(&ramdisk);
minfs.format().expect("failed to format minfs");
minfs.mount(mount_point).expect("failed to mount minfs the first time");
// snapshot of FilesystemInfo
let fs_info1 =
minfs.query_filesystem().expect("failed to query filesystem info after first mount");
let filename = "test_file";
let content = String::from("test content").into_bytes();
let path = format!("{}/{}", mount_point, filename);
{
let mut test_file = std::fs::File::create(&path).expect("failed to create test file");
test_file.write_all(&content).expect("failed to write to test file");
}
// check against the snapshot FilesystemInfo
let fs_info2 =
minfs.query_filesystem().expect("failed to query filesystem info after write");
assert_eq!(
fs_info2.used_bytes - fs_info1.used_bytes,
fs_info2.block_size as u64 // assuming content < 8K
);
minfs.unmount().expect("failed to unmount minfs the first time");
minfs
.query_filesystem()
.expect_err("filesystem query on an unmounted filesystem didn't fail");
minfs.mount(mount_point).expect("failed to mount minfs the second time");
{
let mut test_file = std::fs::File::open(&path).expect("failed to open test file");
let mut read_content = Vec::new();
test_file.read_to_end(&mut read_content).expect("failed to read from test file");
assert_eq!(content, read_content);
}
// once more check against the snapshot FilesystemInfo
let fs_info3 =
minfs.query_filesystem().expect("failed to query filesystem info after read");
assert_eq!(
fs_info3.used_bytes - fs_info1.used_bytes,
fs_info3.block_size as u64 // assuming content < 8K
);
minfs.unmount().expect("failed to unmount minfs the second time");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
fn factoryfs(ramdisk: &RamdiskClient) -> Filesystem<Factoryfs> {
let device = ramdisk.open().unwrap();
Factoryfs::from_channel(device).unwrap()
}
#[test]
fn factoryfs_custom_config() {
let block_size = 512;
let mount_point = "/test-fs-root";
let ramdisk = ramdisk(block_size);
let device = ramdisk.open().unwrap();
let config = Factoryfs { verbose: true, metrics: true };
let mut factoryfs = Filesystem::from_channel(device, config).unwrap();
factoryfs.format().expect("failed to format factoryfs");
factoryfs.fsck().expect("failed to fsck factoryfs");
factoryfs.mount(mount_point).expect("failed to mount factoryfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn factoryfs_format_fsck_success() {
let block_size = 512;
let ramdisk = ramdisk(block_size);
let mut factoryfs = factoryfs(&ramdisk);
factoryfs.format().expect("failed to format factoryfs");
factoryfs.fsck().expect("failed to fsck factoryfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
#[test]
fn factoryfs_format_mount_unmount() {
let block_size = 512;
let mount_point = "/test-fs-root";
let ramdisk = ramdisk(block_size);
let mut factoryfs = factoryfs(&ramdisk);
factoryfs.format().expect("failed to format factoryfs");
factoryfs.mount(mount_point).expect("failed to mount factoryfs");
factoryfs.unmount().expect("failed to unmount factoryfs");
ramdisk.destroy().expect("failed to destroy ramdisk");
}
}
| 34.457173 | 100 | 0.618712 |
ab0dabe20b0aa7016a2d2a73a677e02429bfc441
| 7,719 |
use rayon::prelude::*;
use rug::{float::Special, integer::Order, ops::Pow, Float, Integer};
use sha3::{Digest, Sha3_256};
/// Result of a simulation.
pub struct Result {
/// Amount of simulated rounds.
pub rounds: u64,
/// Number of wins for each validator.
pub wins: Vec<u64>,
/// Lowest weight winning a block.
pub min_win_weight: Float,
/// Highest weight winning a block.
pub max_win_weight: Float,
/// Sum of all weights (to compute average)
pub sum_win_weight: Float,
/// Maximum of shards on which the same validator won at the same height.
pub max_win_across_shards: u64,
}
impl Result {
/// Create a default result with given validators amount and float precision.
pub fn new(validators: usize, precision: u32) -> Self {
Self {
rounds: 0,
wins: vec![0; validators],
min_win_weight: Float::with_val(precision, Special::Infinity),
max_win_weight: Float::with_val(precision, Special::NegInfinity),
sum_win_weight: Float::with_val(precision, 0),
max_win_across_shards: 0,
}
}
/// Merge 2 results together.
pub fn merge(mut a: Self, b: Self) -> Self {
a.rounds += b.rounds;
a.min_win_weight.min_mut(&b.min_win_weight);
a.max_win_weight.max_mut(&b.max_win_weight);
a.sum_win_weight += b.sum_win_weight;
for (wa, wb) in a.wins.iter_mut().zip(b.wins.iter()) {
*wa += wb;
}
if b.max_win_across_shards > a.max_win_across_shards {
a.max_win_across_shards = b.max_win_across_shards;
}
a
}
/// Display the results in a human readable format.
pub fn display(&self, powers: &[Float], top_amount: usize, precision: u32) {
let win_rates: Vec<_> = self
.wins
.iter()
.map(|w| Float::with_val(precision, w) / Float::with_val(precision, self.rounds))
.collect();
println!("Results (top {} validators) :", top_amount);
println!("power win rate wins diff");
for i in 0..top_amount {
let diff = Float::with_val(precision, &win_rates[i] - &powers[i]);
println!(
"{:0.8} {:0.8} {:>8} {:+0.8}",
powers[i].to_f64(),
win_rates[i].to_f64(),
self.wins[i],
diff.to_f64()
);
}
println!();
println!("blocks: {}", self.rounds);
println!("max multi shard win : {}", self.max_win_across_shards);
println!("min winner score : {:0.8}", self.min_win_weight.to_f64());
println!("max winner score : {:0.8}", self.max_win_weight.to_f64());
println!(
"avr winner score : {:0.8}",
Float::with_val(
precision,
&self.sum_win_weight / Float::with_val(precision, self.rounds)
)
.to_f64()
);
}
}
/// Configuration of the simulation.
pub struct Config<'a, W, P>
where
// Weight forumla (seed, power, height, shard, validator, precision)
W: Sync + Fn(&[u8], &Float, u64, u64, u64, u32) -> Float,
P: Sync + Fn(),
{
/// List of validators powers.
/// They should sum up to 1.
pub powers: &'a [Float],
/// Weight formula.
pub weight: &'a W,
/// Callback triggered each time an epoch has been calculated for one shard.
/// Mainly used for progress monitoring.
pub progress: P,
/// Amount of validators.
pub validators: usize,
/// Amount of shards.
pub shards: u64,
/// Amount of epochs.
pub epochs: u64,
/// Amount of blocks per epoch.
pub blocks_per_epoch: u64,
/// Float precision.
pub precision: u32,
}
impl<'a, W, P> Config<'a, W, P>
where
W: Sync + Fn(&[u8], &Float, u64, u64, u64, u32) -> Float,
P: Sync + Fn(),
{
/// Simulate the POS algorithm on all shards for the same height.
pub fn simulate_height(&self, seed: &[u8], height: u64) -> Result {
let mut shards_wins = vec![0; self.validators];
let mut result = Result::new(self.validators, self.precision);
for shard in 0..self.shards {
let mut winner = 0;
let mut winner_weight = Float::with_val(self.precision, Special::NegInfinity);
for (validator, power) in self.powers.iter().enumerate() {
let weight =
(self.weight)(seed, power, height, shard, validator as u64, self.precision);
// println!("{}", weight);
if weight > winner_weight {
winner = validator;
winner_weight = weight;
}
}
// println!("{},{}", winner, &winner_weight);
result.wins[winner] += 1;
shards_wins[winner] += 1;
result.min_win_weight.min_mut(&winner_weight);
result.max_win_weight.max_mut(&winner_weight);
result.sum_win_weight += winner_weight;
}
let max_wins = *shards_wins.iter().max().unwrap();
if max_wins > result.max_win_across_shards {
result.max_win_across_shards = max_wins;
}
result.rounds = self.shards;
result
}
fn seed(epoch: u64) -> [u8; 32] {
let mut hasher = Sha3_256::new();
hasher.input(b"seed");
hasher.input(epoch.to_be_bytes());
hasher.result().into()
}
/// Simulate the POS algorithm on all shards for all blocks in given epoch.
pub fn simulate_epoch(&self, epoch: u64) -> Result {
let seed = Self::seed(epoch);
(0..self.blocks_per_epoch)
.into_par_iter()
.map(|h| self.simulate_height(&seed, h))
.inspect(|_| (self.progress)())
.reduce(
|| Result::new(self.validators, self.precision),
Result::merge,
)
}
/// Siumate the POS algorithms on all shards for all blocks.
pub fn simulate_full(&self) -> Result {
(0..self.epochs)
.into_par_iter()
.map(|e| self.simulate_epoch(e))
.reduce(
|| Result::new(self.validators, self.precision),
Result::merge,
)
}
}
/// Compute a "random number".
pub fn random(seed: &[u8], height: u64, shard: u64, validator: u64, precision: u32) -> Float {
// Generate "random" number.
let mut hasher = Sha3_256::new();
hasher.input(seed);
hasher.input(shard.to_be_bytes());
hasher.input(height.to_be_bytes());
hasher.input(validator.to_be_bytes());
let hash = hasher.result();
let hash = Integer::from_digits(&hash, Order::Lsf);
Float::with_val(precision, hash)
}
/// Weight forumula using a single exp.
pub fn weight_exp(
seed: &[u8],
power: &Float,
height: u64,
shard: u64,
validator: u64,
precision: u32,
) -> Float {
let rand = random(seed, height, shard, validator, precision);
// Transform number in interval [0;1].
let hash_max = Float::with_val(precision, 2).pow(256);
let rand: Float = rand / hash_max;
// Compute weight.
rand.pow(Float::with_val(precision, 1 / power))
}
pub fn weight_log(
seed: &[u8],
power: &Float,
height: u64,
shard: u64,
validator: u64,
precision: u32,
) -> Float {
let rand = random(seed, height, shard, validator, precision);
// Compute weight.
let ln_r = rand.ln();
let hash_max: Float = Float::with_val(precision, 2).pow(256);
let ln_max = hash_max.clone().ln();
let ln_d = Float::with_val(precision, 5).ln();
(ln_r - ln_max) / (power * ln_d) // + hash_max
}
| 31.378049 | 96 | 0.566913 |
28c82201e2e5005c3812a4e64b1b9b003c210f1b
| 4,108 |
use std::cell::{RefCell, UnsafeCell};
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use tokio::sync::RwLock;
use crate::app::channel::consumer::{BaseConsumerStrategy, Consumer, ConsumerWaker};
use crate::app::channel::storage::VecStorage;
use crate::app::channel::UnsafeSync;
use super::consumer::ConsumerStrategy;
use super::storage::ChannelStorage;
use crate::app::channel::producer::Producer;
pub struct Channel {
inner: Arc<UnsafeSync<UnsafeCell<Inner>>>,
}
pub(crate) struct Inner {
pub storage: Option<Rc<RefCell<dyn ChannelStorage>>>,
pub consumer_strategy: Option<Rc<RefCell<dyn ConsumerStrategy>>>,
pub consumer_group_handlers: RwLock<HashMap<u128, Arc<ConsumerGroupHandler>>>,
}
unsafe impl Send for Inner {}
unsafe impl Sync for Inner {}
unsafe impl Send for UnsafeSync<UnsafeCell<Inner>> {}
unsafe impl Sync for UnsafeSync<UnsafeCell<Inner>> {}
impl Channel {
pub fn new(app: &mut crate::app::App) -> Self {
let inner = Inner::new(app);
Channel {
inner: Arc::new(UnsafeSync::new(UnsafeCell::new(inner))),
}
}
pub fn consumer(&self, consumer_id: u128) -> Consumer {
unsafe {
let pointer: &mut Inner = &mut *self.inner.get();
Consumer::new(
consumer_id,
futures::executor::block_on(pointer.consumer_group_handler(consumer_id)),
)
}
}
pub fn producer(&self) -> Producer {
Producer::new(self.inner.clone())
}
}
impl Inner {
pub fn new(app: &mut crate::app::App) -> Self {
let mut inner = Inner {
storage: None,
consumer_strategy: None,
consumer_group_handlers: RwLock::new(HashMap::new()),
};
let storage = Rc::new(RefCell::new(VecStorage::new(app, &mut inner)));
inner.storage = Some(storage);
let consumer_strategy = Rc::new(RefCell::new(BaseConsumerStrategy::new(app, &mut inner)));
inner.consumer_strategy = Some(consumer_strategy);
inner
}
pub fn produce(&mut self, data: &mut Vec<u32>) {
self.consumer_strategy.as_ref().unwrap().borrow_mut().produce(data);
let guard = futures::executor::block_on(self.consumer_group_handlers.read());
for (_, consumer_group_handler) in guard.iter() {
consumer_group_handler.waker.wake();
}
}
#[allow(dead_code)]
pub async fn consume(&mut self, offset: usize, count: usize) -> Option<Vec<u32>> {
let result = self.consumer_strategy.as_ref().unwrap().borrow().consume(offset, count);
result
}
async fn consumer_group_handler(&self, consumer_id: u128) -> Arc<ConsumerGroupHandler> {
let guard = self.consumer_group_handlers.read().await;
if let Some(consumer_group_handler) = guard.get(&consumer_id) {
return consumer_group_handler.clone();
}
drop(guard);
let handler = Arc::new(ConsumerGroupHandler {
offset: AtomicUsize::new(0),
consumer_strategy: self.consumer_strategy.as_ref().unwrap().clone(),
waker: Arc::new(ConsumerWaker::new()),
});
let mut guard = self.consumer_group_handlers.write().await;
guard.insert(consumer_id, handler.clone());
handler
}
}
pub(crate) struct ConsumerGroupHandler {
offset: AtomicUsize,
consumer_strategy: Rc<RefCell<dyn ConsumerStrategy>>,
waker: Arc<ConsumerWaker>,
}
unsafe impl Send for ConsumerGroupHandler {}
unsafe impl Sync for ConsumerGroupHandler {}
impl ConsumerGroupHandler {
pub fn waker(&self) -> Arc<ConsumerWaker> {
self.waker.clone()
}
pub async fn consume(&self, count: usize) -> Option<Vec<u32>> {
let current_offset = self.offset.load(Ordering::Relaxed);
let result = self.consumer_strategy.borrow().consume(current_offset, count);
if let Some(data) = &result {
self.offset.store(current_offset + data.len(), Ordering::Relaxed);
}
result
}
}
| 29.553957 | 98 | 0.639971 |
01336f6e27c5ec38f44b5efb458a9b5e6cd4c9b2
| 4,268 |
use druid::widget::prelude::*;
use druid::widget::{Button, Controller, Flex, Label, Spinner};
use druid::{SingleUse, Widget, WidgetExt};
use scribl_widget::ModalHost;
use crate::{CurrentAction, EditorState};
pub fn make_unsaved_changes_alert() -> impl Widget<EditorState> {
let close =
Button::new("Close without saving").on_click(|ctx, data: &mut EditorState, _env| {
data.action = CurrentAction::WaitingToExit;
ctx.submit_command(ModalHost::DISMISS_MODAL);
ctx.submit_command(druid::commands::CLOSE_WINDOW);
});
let cancel = Button::new("Cancel").on_click(|ctx, _data, _env| {
ctx.submit_command(ModalHost::DISMISS_MODAL);
});
let save = Button::dynamic(|data: &EditorState, _| {
if data.save_path.is_some() {
"Save".to_owned()
} else {
"Save as".to_owned()
}
})
.on_click(|ctx, data, _env| {
ctx.submit_command(ModalHost::DISMISS_MODAL);
if data.save_path.is_some() {
ctx.submit_command(druid::commands::SAVE_FILE);
} else {
ctx.submit_command(
druid::commands::SHOW_SAVE_PANEL.with(crate::menus::save_dialog_options()),
);
}
data.action = CurrentAction::WaitingToExit;
ctx.submit_command(
ModalHost::SHOW_MODAL.with(SingleUse::new(Box::new(make_waiting_to_exit_alert()))),
);
});
let button_row = Flex::row()
.with_child(close)
.with_spacer(5.0)
.with_child(cancel)
.with_spacer(5.0)
.with_child(save);
let label = Label::dynamic(|data: &EditorState, _| {
if let Some(file_name) = data
.save_path
.as_ref()
.and_then(|p| p.file_name())
.map(|f| f.to_string_lossy())
{
format!("\"{}\" has unsaved changes!", file_name)
} else {
"Your untitled animation has unsaved changes!".to_owned()
}
});
Flex::column()
.with_child(label)
.with_spacer(15.0)
.with_child(button_row)
.padding(10.0)
.background(druid::theme::BACKGROUND_LIGHT)
.border(druid::theme::FOREGROUND_DARK, 1.0)
}
/// This controller gets instantiated when we're planning to close a window. Its job is to sit and
/// wait until any saves and encodes in progress are finished. When they are, it sends a
/// CLOSE_WINDOW command.
struct Waiter {}
impl<W: Widget<EditorState>> Controller<EditorState, W> for Waiter {
fn update(
&mut self,
child: &mut W,
ctx: &mut UpdateCtx,
old_data: &EditorState,
data: &EditorState,
env: &Env,
) {
if data.status.in_progress.saving.is_none() && data.status.in_progress.encoding.is_none() {
ctx.submit_command(druid::commands::CLOSE_WINDOW);
}
child.update(ctx, old_data, data, env);
}
fn lifecycle(
&mut self,
child: &mut W,
ctx: &mut LifeCycleCtx,
ev: &LifeCycle,
data: &EditorState,
env: &Env,
) {
// We check for termination in lifecycle as well as update, because it's possible that
// the condition was triggered before we were instantiated, in which case we'll get a
// lifecycle event when we're added to the widget tree but we won't get any updates.
if data.status.in_progress.saving.is_none() && data.status.in_progress.encoding.is_none() {
ctx.submit_command(druid::commands::CLOSE_WINDOW);
}
child.lifecycle(ctx, ev, data, env);
}
}
pub fn make_waiting_to_exit_alert() -> impl Widget<EditorState> {
let label = Label::dynamic(|data: &EditorState, _env| {
if let Some(progress) = data.status.in_progress.encoding {
format!("Encoding (frame {} of {})...", progress.0, progress.1)
} else {
"Saving...".to_owned()
}
});
let spinner = Spinner::new();
Flex::column()
.with_child(label)
.with_spacer(15.0)
.with_child(spinner)
.padding(10.0)
.background(druid::theme::BACKGROUND_LIGHT)
.border(druid::theme::FOREGROUND_DARK, 1.0)
.controller(Waiter {})
}
| 33.085271 | 99 | 0.594658 |
fcd13a40fe333277087e51081e294a0063d6c983
| 4,228 |
use itertools::Itertools;
use kuchiki::{traits::*, NodeRef};
use crate::errors::PaperoniError;
use crate::moz_readability::{MetaData, Readability};
/// A tuple of the url and an Option of the resource's MIME type
pub type ResourceInfo = (String, Option<String>);
pub struct Article {
node_ref_opt: Option<NodeRef>,
pub img_urls: Vec<ResourceInfo>,
readability: Readability,
pub url: String,
}
impl Article {
/// Create a new instance of an HTML extractor given an HTML string
pub fn from_html(html_str: &str, url: &str) -> Self {
Self {
node_ref_opt: None,
img_urls: Vec::new(),
readability: Readability::new(html_str),
url: url.to_string(),
}
}
/// Locates and extracts the HTML in a document which is determined to be
/// the source of the content
pub fn extract_content(&mut self) -> Result<(), PaperoniError> {
self.readability.parse(&self.url)?;
if let Some(article_node_ref) = &self.readability.article_node {
let template = r#"
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="stylesheet.css" type="text/css"></link>
</head>
<body>
</body>
</html>
"#;
let doc = kuchiki::parse_html().one(template);
let body = doc.select_first("body").unwrap();
body.as_node().append(article_node_ref.clone());
self.node_ref_opt = Some(doc);
}
Ok(())
}
/// Traverses the DOM tree of the content and retrieves the IMG URLs
pub fn extract_img_urls(&mut self) {
if let Some(content_ref) = &self.node_ref_opt {
self.img_urls = content_ref
.select("img")
.unwrap()
.filter_map(|img_ref| {
let attrs = img_ref.attributes.borrow();
attrs
.get("src")
.filter(|val| !(val.is_empty() || val.starts_with("data:image")))
.map(ToString::to_string)
})
.unique()
.map(|val| (val, None))
.collect();
}
}
/// Returns the extracted article [NodeRef]. It should only be called *AFTER* calling parse
pub fn node_ref(&self) -> &NodeRef {
self.node_ref_opt.as_ref().expect(
"Article node doesn't exist. This may be because the document has not been parsed",
)
}
pub fn metadata(&self) -> &MetaData {
&self.readability.metadata
}
}
#[cfg(test)]
mod test {
use super::*;
const TEST_HTML: &'static str = r#"
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="description" content="A sample document">
<meta name="keywords" content="test,Rust">
<meta name="author" content="Paperoni">
<title>Testing Paperoni</title>
</head>
<body>
<header>
<!-- Unimportant information -->
<h1>Testing Paperoni</h1>
</header>
<article>
<h1>Starting out</h1>
<p>Some Lorem Ipsum text here</p>
<p>Observe this picture</p>
<img src="./img.jpg" alt="Random image">
<img src="data:image/png;base64,lJGWEIUQOIQWIDYVIVEDYFOUYQFWD">
</article>
<footer>
<p>Made in HTML</p>
</footer>
</body>
</html>
"#;
#[test]
fn test_extract_img_urls() {
let mut article = Article::from_html(TEST_HTML, "http://example.com/");
article
.extract_content()
.expect("Article extraction failed unexpectedly");
article.extract_img_urls();
assert!(article.img_urls.len() > 0);
assert_eq!(
vec![("http://example.com/img.jpg".to_string(), None)],
article.img_urls
);
}
}
| 32.775194 | 95 | 0.508751 |
eb7a3a7aeb2aa20ecce0e0163d6725ccff770086
| 1,157 |
// Copyright 2018-2021 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::error::Error;
use std::fmt;
#[derive(Debug)]
pub struct ServiceConnectionAgentError(pub String);
impl Error for ServiceConnectionAgentError {}
impl fmt::Display for ServiceConnectionAgentError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug)]
pub struct ServiceConnectionError(pub String);
impl Error for ServiceConnectionError {}
impl fmt::Display for ServiceConnectionError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
| 29.666667 | 75 | 0.724287 |
f8f2053d059798dc5c566399f62efee71af3ffdd
| 459 |
use ross_dsl::Parser;
fn main() {
let text = "
const device_address = 0x0003~u16;
const receiver_address = 0x000b~u16; // old value: 0x000a~u16;
// This is a comment
send BUTTON_PRESSED_EVENT_CODE from device_address to receiver_address; // This is also a comment
";
match Parser::parse(text) {
Ok(event_processors) => println!("{:?}", event_processors),
Err(err) => println!("{}", err),
}
}
| 27 | 105 | 0.607843 |
14f03232427c35dfd9aff304a79fc4f9781300cb
| 62 |
//! Extra floating points numbers.
mod f16;
pub use f16::*;
| 10.333333 | 34 | 0.66129 |
010ed3ab4f2c17c97c57b7dd4deacddbe279f120
| 4,490 |
use crate::*;
use truck_meshalgo::prelude::*;
/// Wasm wrapper by Polygonmesh
#[wasm_bindgen]
#[derive(Clone, Debug, Into, From, Deref, DerefMut)]
pub struct PolygonMesh(truck_meshalgo::prelude::PolygonMesh);
impl IntoWasm for truck_meshalgo::prelude::PolygonMesh {
type WasmWrapper = PolygonMesh;
}
/// STL Type
#[wasm_bindgen]
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub enum STLType {
/// Determine stl type automatically.
///
/// **Reading**: if the first 5 bytes are..
/// - "solid" => ascii format
/// - otherwise => binary format
///
/// **Writing**: always binary format.
Automatic,
/// ascii format
ASCII,
/// binary format
Binary,
}
impl From<STLType> for stl::STLType {
fn from(stl_type: STLType) -> stl::STLType {
match stl_type {
STLType::Automatic => stl::STLType::Automatic,
STLType::ASCII => stl::STLType::ASCII,
STLType::Binary => stl::STLType::Binary,
}
}
}
/// Buffer for rendering polygon
#[wasm_bindgen]
#[derive(Debug, Clone, Default)]
pub struct PolygonBuffer {
vertices: Vec<f32>,
indices: Vec<u32>,
}
#[wasm_bindgen]
impl PolygonMesh {
/// input from obj format
#[inline(always)]
pub fn from_obj(data: &[u8]) -> Option<PolygonMesh> {
obj::read::<&[u8]>(data)
.map_err(|e| eprintln!("{}", e))
.ok()
.map(|mesh| mesh.into_wasm())
}
/// input from STL format
#[inline(always)]
pub fn from_stl(data: &[u8], stl_type: STLType) -> Option<PolygonMesh> {
stl::read::<&[u8]>(data, stl_type.into())
.map_err(|e| eprintln!("{}", e))
.ok()
.map(|mesh| mesh.into_wasm())
}
/// output obj format
#[inline(always)]
pub fn to_obj(&self) -> Option<Vec<u8>> {
let mut res = Vec::new();
obj::write(&self.0, &mut res)
.map_err(|e| eprintln!("{}", e))
.ok()?;
Some(res)
}
/// output stl format
#[inline(always)]
pub fn to_stl(&self, stl_type: STLType) -> Option<Vec<u8>> {
let mut res = Vec::new();
stl::write(&self.0, &mut res, stl_type.into())
.map_err(|e| eprintln!("{}", e))
.ok()?;
Some(res)
}
/// Returns polygon buffer
#[inline(always)]
pub fn to_buffer(&self) -> PolygonBuffer {
let exp = self.0.expands(|attr| {
let position = attr.position;
let uv_coord = attr.uv_coord.unwrap_or_else(Vector2::zero);
let normal = attr.normal.unwrap_or_else(Vector3::zero);
[
position[0] as f32,
position[1] as f32,
position[2] as f32,
uv_coord[0] as f32,
uv_coord[1] as f32,
normal[0] as f32,
normal[1] as f32,
normal[2] as f32,
]
});
PolygonBuffer {
vertices: exp.attributes().iter().flatten().copied().collect(),
indices: exp
.faces()
.triangle_iter()
.flatten()
.map(|x| x as u32)
.collect(),
}
}
/// meshing shell
#[inline(always)]
pub fn from_shell(shell: Shell, tol: f64) -> Option<PolygonMesh> { shell.to_polygon(tol) }
/// meshing solid
#[inline(always)]
pub fn from_solid(solid: Solid, tol: f64) -> Option<PolygonMesh> { solid.to_polygon(tol) }
/// Returns the bonding box
#[inline(always)]
pub fn bounding_box(&self) -> Vec<f64> {
let bdd = self.0.bounding_box();
let min = bdd.min();
let max = bdd.max();
vec![min[0], min[1], min[2], max[0], max[1], max[2]]
}
}
#[wasm_bindgen]
impl PolygonBuffer {
/// vertex buffer. One attribute contains `position: [f32; 3]`, `uv_coord: [f32; 2]` and `normal: [f32; 3]`.
#[inline(always)]
pub fn vertex_buffer(&self) -> Vec<f32> { self.vertices.clone() }
#[inline(always)]
/// the length (bytes) of vertex buffer. (Num of attributes) * 8 components * 4 bytes.
pub fn vertex_buffer_size(&self) -> usize { self.vertices.len() * 4 }
/// index buffer. `u32`.
#[inline(always)]
pub fn index_buffer(&self) -> Vec<u32> { self.indices.clone() }
/// the length (bytes) of index buffer. (Num of triangles) * 3 vertices * 4 bytes.
#[inline(always)]
pub fn index_buffer_size(&self) -> usize { self.indices.len() * 4 }
}
| 30.965517 | 112 | 0.545657 |
0eb32e12ddd8abf9ad45f1dd09c79cec15580a15
| 6,489 |
//! HTTP client interceptor API.
//!
//! This module provides the core types and functions for defining and working
//! with interceptors. Interceptors are handlers that augment HTTP client
//! functionality by decorating HTTP calls with custom logic.
//!
//! Known issues:
//!
//! - [`from_fn`] doesn't work as desired. The trait bounds are too ambiguous
//! for the compiler to infer for closures, and since the return type is
//! generic over a lifetime, there's no way to give the return type the
//! correct name using current Rust syntax.
//! - [`InterceptorObj`] wraps the returned future in an extra box.
//! - If an interceptor returns a custom error, it is stringified and wrapped in
//! `Error::Curl`. We should introduce a new error variant that boxes the
//! error and also records the type of the interceptor that created the error
//! for visibility. But we can't add a new variant right now without a BC
//! break. See [#182](https://github.com/sagebind/isahc/issues/182).
//! - Automatic redirect following currently bypasses interceptors for
//! subsequent requests. This will be fixed when redirect handling is
//! rewritten as an interceptor itself. See
//! [#232](https://github.com/sagebind/isahc/issues/232).
///
/// # Availability
///
/// This module is only available when the
/// [`unstable-interceptors`](../index.html#unstable-interceptors) feature is
/// enabled.
use crate::body::AsyncBody;
use http::{Request, Response};
use std::{error::Error, fmt, future::Future, pin::Pin};
mod context;
mod obj;
pub use self::context::Context;
pub(crate) use self::{context::Invoke, obj::InterceptorObj};
type InterceptorResult<E> = Result<Response<AsyncBody>, E>;
/// Defines an inline interceptor using a closure-like syntax.
///
/// Closures are not supported due to a limitation in Rust's type inference.
#[cfg(feature = "unstable-interceptors")]
#[macro_export]
macro_rules! interceptor {
($request:ident, $ctx:ident, $body:expr) => {{
async fn interceptor(
mut $request: $crate::http::Request<$crate::AsyncBody>,
$ctx: $crate::interceptor::Context<'_>,
) -> Result<$crate::http::Response<$crate::AsyncBody>, $crate::Error> {
(move || async move { $body })().await.map_err(Into::into)
}
$crate::interceptor::from_fn(interceptor)
}};
}
/// Base trait for interceptors.
///
/// Since clients may be used to send requests concurrently, all interceptors
/// must be synchronized and must be able to account for multiple requests being
/// made in parallel.
pub trait Interceptor: Send + Sync {
/// The type of error returned by this interceptor.
type Err: Error + Send + Sync + 'static;
/// Intercept a request, returning a response.
///
/// The returned future is allowed to borrow the interceptor for the
/// duration of its execution.
fn intercept<'a>(
&'a self,
request: Request<AsyncBody>,
ctx: Context<'a>,
) -> InterceptorFuture<'a, Self::Err>;
}
/// The type of future returned by an interceptor.
pub type InterceptorFuture<'a, E> = Pin<Box<dyn Future<Output = InterceptorResult<E>> + Send + 'a>>;
/// Creates an interceptor from an arbitrary closure or function.
pub fn from_fn<F, E>(f: F) -> InterceptorFn<F>
where
F: for<'a> private::AsyncFn2<Request<AsyncBody>, Context<'a>, Output = InterceptorResult<E>>
+ Send
+ Sync
+ 'static,
E: Error + Send + Sync + 'static,
{
InterceptorFn(f)
}
/// An interceptor created from an arbitrary closure or function. See
/// [`from_fn`] for details.
pub struct InterceptorFn<F>(F);
impl<E, F> Interceptor for InterceptorFn<F>
where
E: Error + Send + Sync + 'static,
F: for<'a> private::AsyncFn2<Request<AsyncBody>, Context<'a>, Output = InterceptorResult<E>>
+ Send
+ Sync
+ 'static,
{
type Err = E;
fn intercept<'a>(
&self,
request: Request<AsyncBody>,
ctx: Context<'a>,
) -> InterceptorFuture<'a, Self::Err> {
Box::pin(self.0.call(request, ctx))
}
}
impl<F: fmt::Debug> fmt::Debug for InterceptorFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
// Workaround for https://github.com/rust-lang/rust/issues/51004
#[allow(unreachable_pub)]
mod private {
use std::future::Future;
macro_rules! impl_async_fn {
($(($FnOnce:ident, $FnMut:ident, $Fn:ident, ($($arg:ident: $arg_ty:ident,)*)),)*) => {
$(
pub trait $FnOnce<$($arg_ty,)*> {
type Output;
type Future: Future<Output = Self::Output> + Send;
fn call_once(self, $($arg: $arg_ty,)*) -> Self::Future;
}
pub trait $FnMut<$($arg_ty,)*>: $FnOnce<$($arg_ty,)*> {
fn call_mut(&mut self, $($arg: $arg_ty,)*) -> Self::Future;
}
pub trait $Fn<$($arg_ty,)*>: $FnMut<$($arg_ty,)*> {
fn call(&self, $($arg: $arg_ty,)*) -> Self::Future;
}
impl<$($arg_ty,)* F, Fut> $FnOnce<$($arg_ty,)*> for F
where
F: FnOnce($($arg_ty,)*) -> Fut,
Fut: Future + Send,
{
type Output = Fut::Output;
type Future = Fut;
fn call_once(self, $($arg: $arg_ty,)*) -> Self::Future {
self($($arg,)*)
}
}
impl<$($arg_ty,)* F, Fut> $FnMut<$($arg_ty,)*> for F
where
F: FnMut($($arg_ty,)*) -> Fut,
Fut: Future + Send,
{
fn call_mut(&mut self, $($arg: $arg_ty,)*) -> Self::Future {
self($($arg,)*)
}
}
impl<$($arg_ty,)* F, Fut> $Fn<$($arg_ty,)*> for F
where
F: Fn($($arg_ty,)*) -> Fut,
Fut: Future + Send,
{
fn call(&self, $($arg: $arg_ty,)*) -> Self::Future {
self($($arg,)*)
}
}
)*
}
}
impl_async_fn! {
(AsyncFnOnce0, AsyncFnMut0, AsyncFn0, ()),
(AsyncFnOnce1, AsyncFnMut1, AsyncFn1, (a0:A0, )),
(AsyncFnOnce2, AsyncFnMut2, AsyncFn2, (a0:A0, a1:A1, )),
}
}
| 35.850829 | 100 | 0.564186 |
fcbff59dec0070bf94d40df9ec157d47528e83b2
| 9,523 |
#![allow(missing_docs, nonstandard_style)]
use crate::ffi::{OsStr, OsString};
use crate::io::ErrorKind;
use crate::os::windows::ffi::{OsStrExt, OsStringExt};
use crate::path::PathBuf;
use crate::time::Duration;
pub use self::rand::hashmap_random_keys;
pub use libc::strlen;
#[macro_use]
pub mod compat;
pub mod alloc;
pub mod args;
pub mod c;
pub mod cmath;
pub mod condvar;
pub mod env;
pub mod ext;
pub mod fs;
pub mod handle;
pub mod io;
pub mod memchr;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
pub mod path;
pub mod pipe;
pub mod process;
pub mod rand;
pub mod rwlock;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
pub mod thread_parker;
pub mod time;
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
pub mod stdio;
pub mod stack_overflow;
} else {
pub mod stdio_uwp;
pub mod stack_overflow_uwp;
pub use self::stdio_uwp as stdio;
pub use self::stack_overflow_uwp as stack_overflow;
}
}
#[cfg(not(test))]
pub fn init() {}
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno as c::DWORD {
c::ERROR_ACCESS_DENIED => return ErrorKind::PermissionDenied,
c::ERROR_ALREADY_EXISTS => return ErrorKind::AlreadyExists,
c::ERROR_FILE_EXISTS => return ErrorKind::AlreadyExists,
c::ERROR_BROKEN_PIPE => return ErrorKind::BrokenPipe,
c::ERROR_FILE_NOT_FOUND => return ErrorKind::NotFound,
c::ERROR_PATH_NOT_FOUND => return ErrorKind::NotFound,
c::ERROR_NO_DATA => return ErrorKind::BrokenPipe,
c::ERROR_INVALID_PARAMETER => return ErrorKind::InvalidInput,
c::ERROR_SEM_TIMEOUT
| c::WAIT_TIMEOUT
| c::ERROR_DRIVER_CANCEL_TIMEOUT
| c::ERROR_OPERATION_ABORTED
| c::ERROR_SERVICE_REQUEST_TIMEOUT
| c::ERROR_COUNTER_TIMEOUT
| c::ERROR_TIMEOUT
| c::ERROR_RESOURCE_CALL_TIMED_OUT
| c::ERROR_CTX_MODEM_RESPONSE_TIMEOUT
| c::ERROR_CTX_CLIENT_QUERY_TIMEOUT
| c::FRS_ERR_SYSVOL_POPULATE_TIMEOUT
| c::ERROR_DS_TIMELIMIT_EXCEEDED
| c::DNS_ERROR_RECORD_TIMED_OUT
| c::ERROR_IPSEC_IKE_TIMED_OUT
| c::ERROR_RUNLEVEL_SWITCH_TIMEOUT
| c::ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT => return ErrorKind::TimedOut,
_ => {}
}
match errno {
c::WSAEACCES => ErrorKind::PermissionDenied,
c::WSAEADDRINUSE => ErrorKind::AddrInUse,
c::WSAEADDRNOTAVAIL => ErrorKind::AddrNotAvailable,
c::WSAECONNABORTED => ErrorKind::ConnectionAborted,
c::WSAECONNREFUSED => ErrorKind::ConnectionRefused,
c::WSAECONNRESET => ErrorKind::ConnectionReset,
c::WSAEINVAL => ErrorKind::InvalidInput,
c::WSAENOTCONN => ErrorKind::NotConnected,
c::WSAEWOULDBLOCK => ErrorKind::WouldBlock,
c::WSAETIMEDOUT => ErrorKind::TimedOut,
_ => ErrorKind::Other,
}
}
pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> {
let ptr = haystack.as_ptr();
let mut start = &haystack[..];
// For performance reasons unfold the loop eight times.
while start.len() >= 8 {
macro_rules! if_return {
($($n:literal,)+) => {
$(
if start[$n] == needle {
return Some((&start[$n] as *const u16 as usize - ptr as usize) / 2);
}
)+
}
}
if_return!(0, 1, 2, 3, 4, 5, 6, 7,);
start = &start[8..];
}
for c in start {
if *c == needle {
return Some((c as *const u16 as usize - ptr as usize) / 2);
}
}
None
}
pub fn to_u16s<S: AsRef<OsStr>>(s: S) -> crate::io::Result<Vec<u16>> {
fn inner(s: &OsStr) -> crate::io::Result<Vec<u16>> {
let mut maybe_result: Vec<u16> = s.encode_wide().collect();
if unrolled_find_u16s(0, &maybe_result).is_some() {
return Err(crate::io::Error::new(
ErrorKind::InvalidInput,
"strings passed to WinAPI cannot contain NULs",
));
}
maybe_result.push(0);
Ok(maybe_result)
}
inner(s.as_ref())
}
// Many Windows APIs follow a pattern of where we hand a buffer and then they
// will report back to us how large the buffer should be or how many bytes
// currently reside in the buffer. This function is an abstraction over these
// functions by making them easier to call.
//
// The first callback, `f1`, is yielded a (pointer, len) pair which can be
// passed to a syscall. The `ptr` is valid for `len` items (u16 in this case).
// The closure is expected to return what the syscall returns which will be
// interpreted by this function to determine if the syscall needs to be invoked
// again (with more buffer space).
//
// Once the syscall has completed (errors bail out early) the second closure is
// yielded the data which has been read from the syscall. The return value
// from this closure is then the return value of the function.
fn fill_utf16_buf<F1, F2, T>(mut f1: F1, f2: F2) -> crate::io::Result<T>
where
F1: FnMut(*mut u16, c::DWORD) -> c::DWORD,
F2: FnOnce(&[u16]) -> T,
{
// Start off with a stack buf but then spill over to the heap if we end up
// needing more space.
let mut stack_buf = [0u16; 512];
let mut heap_buf = Vec::new();
unsafe {
let mut n = stack_buf.len();
loop {
let buf = if n <= stack_buf.len() {
&mut stack_buf[..]
} else {
let extra = n - heap_buf.len();
heap_buf.reserve(extra);
heap_buf.set_len(n);
&mut heap_buf[..]
};
// This function is typically called on windows API functions which
// will return the correct length of the string, but these functions
// also return the `0` on error. In some cases, however, the
// returned "correct length" may actually be 0!
//
// To handle this case we call `SetLastError` to reset it to 0 and
// then check it again if we get the "0 error value". If the "last
// error" is still 0 then we interpret it as a 0 length buffer and
// not an actual error.
c::SetLastError(0);
let k = match f1(buf.as_mut_ptr(), n as c::DWORD) {
0 if c::GetLastError() == 0 => 0,
0 => return Err(crate::io::Error::last_os_error()),
n => n,
} as usize;
if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER {
n *= 2;
} else if k >= n {
n = k;
} else {
return Ok(f2(&buf[..k]));
}
}
}
}
fn os2path(s: &[u16]) -> PathBuf {
PathBuf::from(OsString::from_wide(s))
}
pub fn truncate_utf16_at_nul(v: &[u16]) -> &[u16] {
match unrolled_find_u16s(0, v) {
// don't include the 0
Some(i) => &v[..i],
None => v,
}
}
pub trait IsZero {
fn is_zero(&self) -> bool;
}
macro_rules! impl_is_zero {
($($t:ident)*) => ($(impl IsZero for $t {
fn is_zero(&self) -> bool {
*self == 0
}
})*)
}
impl_is_zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
pub fn cvt<I: IsZero>(i: I) -> crate::io::Result<I> {
if i.is_zero() { Err(crate::io::Error::last_os_error()) } else { Ok(i) }
}
pub fn dur2timeout(dur: Duration) -> c::DWORD {
// Note that a duration is a (u64, u32) (seconds, nanoseconds) pair, and the
// timeouts in windows APIs are typically u32 milliseconds. To translate, we
// have two pieces to take care of:
//
// * Nanosecond precision is rounded up
// * Greater than u32::MAX milliseconds (50 days) is rounded up to INFINITE
// (never time out).
dur.as_secs()
.checked_mul(1000)
.and_then(|ms| ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000))
.and_then(|ms| ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 { 1 } else { 0 }))
.map(|ms| if ms > <c::DWORD>::MAX as u64 { c::INFINITE } else { ms as c::DWORD })
.unwrap_or(c::INFINITE)
}
/// Use `__fastfail` to abort the process
///
/// This is the same implementation as in libpanic_abort's `__rust_start_panic`. See
/// that function for more information on `__fastfail`
#[allow(unreachable_code)]
pub fn abort_internal() -> ! {
const FAST_FAIL_FATAL_APP_EXIT: usize = 7;
unsafe {
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
asm!("int $$0x29", in("ecx") FAST_FAIL_FATAL_APP_EXIT);
crate::intrinsics::unreachable();
} else if #[cfg(all(target_arch = "arm", target_feature = "thumb-mode"))] {
asm!(".inst 0xDEFB", in("r0") FAST_FAIL_FATAL_APP_EXIT);
crate::intrinsics::unreachable();
} else if #[cfg(target_arch = "aarch64")] {
asm!("brk 0xF003", in("x0") FAST_FAIL_FATAL_APP_EXIT);
crate::intrinsics::unreachable();
}
}
}
crate::intrinsics::abort();
}
cfg_if::cfg_if! {
if #[cfg(target_vendor = "uwp")] {
#[link(name = "ws2_32")]
// For BCryptGenRandom
#[link(name = "bcrypt")]
extern "C" {}
} else {
#[link(name = "advapi32")]
#[link(name = "ws2_32")]
#[link(name = "userenv")]
extern "C" {}
}
}
| 33.065972 | 94 | 0.58826 |
1eb4449bc80a4e626f61ddcb1d0fa4159eea3c8a
| 294 |
/// Alias type for Hasher used to shard data in Arcon
///
/// Arcon uses MurmurHash3
#[cfg(feature = "hasher")]
pub type KeyHasher = mur3::Hasher32;
/// Helper function to create [KeyHasher]
#[cfg(feature = "hasher")]
#[inline]
pub fn key_hasher() -> KeyHasher {
KeyHasher::with_seed(0)
}
| 22.615385 | 53 | 0.680272 |
7240a3ffcd5cfbbc842f6f3f88797a863e2223a9
| 5,412 |
use crate::core::ribosome::error::RibosomeResult;
use crate::core::{
ribosome::{CallContext, RibosomeT},
state::metadata::LinkMetaKey,
};
use holochain_p2p::actor::GetLinksOptions;
use holochain_zome_types::link::LinkDetails;
use holochain_zome_types::GetLinkDetailsInput;
use holochain_zome_types::GetLinkDetailsOutput;
use std::sync::Arc;
#[allow(clippy::extra_unused_lifetimes)]
pub fn get_link_details<'a>(
ribosome: Arc<impl RibosomeT>,
call_context: Arc<CallContext>,
input: GetLinkDetailsInput,
) -> RibosomeResult<GetLinkDetailsOutput> {
let (base_address, tag) = input.into_inner();
// Get zome id
let zome_id = ribosome.zome_name_to_id(&call_context.zome_name)?;
// Get the network from the context
let network = call_context.host_access.network().clone();
tokio_safe_block_on::tokio_safe_block_forever_on(async move {
// Create the key
let key = match tag.as_ref() {
Some(tag) => LinkMetaKey::BaseZomeTag(&base_address, zome_id, tag),
None => LinkMetaKey::BaseZome(&base_address, zome_id),
};
// Get the links from the dht
let link_details = LinkDetails::from(
call_context
.host_access
.workspace()
.write()
.await
.cascade(network)
.get_link_details(&key, GetLinksOptions::default())
.await?,
);
Ok(GetLinkDetailsOutput::new(link_details))
})
}
#[cfg(test)]
#[cfg(feature = "slow_tests")]
pub mod slow_tests {
use crate::fixt::ZomeCallHostAccessFixturator;
use ::fixt::prelude::*;
use holochain_wasm_test_utils::TestWasm;
use holochain_zome_types::element::SignedHeaderHashed;
use holochain_zome_types::Header;
use test_wasm_common::*;
#[tokio::test(threaded_scheduler)]
async fn ribosome_entry_hash_path_children_details() {
let test_env = holochain_state::test_utils::test_cell_env();
let env = test_env.env();
let mut workspace =
crate::core::workflow::CallZomeWorkspace::new(env.clone().into()).unwrap();
// commits fail validation if we don't do genesis
crate::core::workflow::fake_genesis(&mut workspace.source_chain)
.await
.unwrap();
let workspace_lock = crate::core::workflow::CallZomeWorkspaceLock::new(workspace);
let mut host_access = fixt!(ZomeCallHostAccess);
host_access.workspace = workspace_lock;
// ensure foo.bar twice to ensure idempotency
let _: () = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"ensure",
TestString::from("foo.bar".to_string())
);
let _: () = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"ensure",
TestString::from("foo.bar".to_string())
);
// ensure foo.baz
let _: () = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"ensure",
TestString::from("foo.baz".to_string())
);
let exists_output: TestBool = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"exists",
TestString::from("foo".to_string())
);
assert_eq!(TestBool(true), exists_output,);
let _foo_bar: holo_hash::EntryHash = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"hash",
TestString::from("foo.bar".to_string())
);
let _foo_baz: holo_hash::EntryHash = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"hash",
TestString::from("foo.baz".to_string())
);
let children_details_output: holochain_zome_types::link::LinkDetails = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"children_details",
TestString::from("foo".to_string())
);
let link_details = children_details_output.into_inner();
let to_remove: SignedHeaderHashed = (link_details[0]).0.clone();
let to_remove_hash = to_remove.as_hash().clone();
let _remove_hash: holo_hash::HeaderHash = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"delete_link",
to_remove_hash
);
let children_details_output_2: holochain_zome_types::link::LinkDetails = crate::call_test_ribosome!(
host_access,
TestWasm::HashPath,
"children_details",
TestString::from("foo".to_string())
);
let children_details_output_2_vec = children_details_output_2.into_inner();
assert_eq!(2, children_details_output_2_vec.len());
let mut remove_happened = false;
for (_, removes) in children_details_output_2_vec {
if removes.len() > 0 {
remove_happened = true;
let link_add_address = unwrap_to
::unwrap_to!(removes[0].header() => Header::DeleteLink)
.link_add_address
.clone();
assert_eq!(link_add_address, to_remove_hash,);
}
}
assert!(remove_happened);
}
}
| 32.407186 | 108 | 0.599778 |
abcabc68f961bf962b6bf5d1584bf31ad0ffcd8c
| 18,254 |
//! 2D Sprite Rendering implementation details.
use ron::de::from_bytes as from_ron_bytes;
use serde::{Deserialize, Serialize};
use crate::{error, types::Texture};
use amethyst_assets::{Asset, Format, Handle};
use amethyst_core::ecs::prelude::{Component, DenseVecStorage};
use amethyst_error::Error;
pub mod prefab;
/// An asset handle to sprite sheet metadata.
pub type SpriteSheetHandle = Handle<SpriteSheet>;
/// Meta data for a sprite sheet texture.
///
/// Contains a handle to the texture and the sprite coordinates on the texture.
#[derive(Clone, Debug, PartialEq)]
pub struct SpriteSheet {
/// `Texture` handle of the spritesheet texture
pub texture: Handle<Texture>,
/// A list of sprites in this sprite sheet.
pub sprites: Vec<Sprite>,
}
impl Asset for SpriteSheet {
const NAME: &'static str = "renderer::SpriteSheet";
type Data = Self;
type HandleStorage = DenseVecStorage<Handle<Self>>;
}
/// Dimensions and texture coordinates of each sprite in a sprite sheet.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sprite {
/// Pixel width of the sprite
pub width: f32,
/// Pixel height of the sprite
pub height: f32,
/// Number of pixels to shift the sprite to the left and down relative to the entity
pub offsets: [f32; 2],
/// Texture coordinates of the sprite
pub tex_coords: TextureCoordinates,
}
/// Texture coordinates of the sprite
///
/// The coordinates should be normalized to a value between 0.0 and 1.0:
///
/// * X axis: 0.0 is the left side and 1.0 is the right side.
/// * Y axis: 0.0 is the bottom and 1.0 is the top.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TextureCoordinates {
/// Normalized left x coordinate
pub left: f32,
/// Normalized right x coordinate
pub right: f32,
/// Normalized bottom y coordinate
pub bottom: f32,
/// Normalized top y coordinate
pub top: f32,
}
impl Sprite {
/// Creates a `Sprite` from pixel values.
///
/// This function expects pixel coordinates -- starting from the top left of the image. X
/// increases to the right, Y increases downwards. Texture coordinates are calculated from the
/// pixel values.
///
/// # Parameters
///
/// * `image_w`: Width of the full sprite sheet.
/// * `image_h`: Height of the full sprite sheet.
/// * `sprite_w`: Width of the sprite.
/// * `sprite_h`: Height of the sprite.
/// * `pixel_left`: Pixel X coordinate of the left side of the sprite.
/// * `pixel_top`: Pixel Y coordinate of the top of the sprite.
/// * `offsets`: Number of pixels to shift the sprite to the left and down relative to the
/// entity.
pub fn from_pixel_values(
image_w: u32,
image_h: u32,
sprite_w: u32,
sprite_h: u32,
pixel_left: u32,
pixel_top: u32,
offsets: [f32; 2],
flip_horizontal: bool,
flip_vertical: bool,
) -> Sprite {
let image_w = image_w as f32;
let image_h = image_h as f32;
let offsets = [offsets[0] as f32, offsets[1] as f32];
let pixel_right = (pixel_left + sprite_w) as f32;
let pixel_bottom = (pixel_top + sprite_h) as f32;
let pixel_left = pixel_left as f32;
let pixel_top = pixel_top as f32;
// Texture coordinates are expressed as fractions of the position on the image.
//
// For pixel perfect result, the sprite border must be rendered exactly at
// screen pixel border or use nearest-neighbor sampling.
// <http://www.mindcontrol.org/~hplus/graphics/opengl-pixel-perfect.html>
// NOTE: Maybe we should provide an option to round coordinates from `Transform`
// to nearest integer in `DrawFlat2D` pass before rendering.
let left = (pixel_left) / image_w;
let right = (pixel_right) / image_w;
let top = (pixel_top) / image_h;
let bottom = (pixel_bottom) / image_h;
let (left, right) = if flip_horizontal {
(right, left)
} else {
(left, right)
};
let (top, bottom) = if flip_vertical {
(bottom, top)
} else {
(top, bottom)
};
let tex_coords = TextureCoordinates {
left,
right,
top,
bottom,
};
Sprite {
width: sprite_w as f32,
height: sprite_h as f32,
offsets,
tex_coords,
}
}
}
impl From<((f32, f32), [f32; 4])> for Sprite {
fn from((dimensions, tex_coords): ((f32, f32), [f32; 4])) -> Self {
Self::from((dimensions, [0.0; 2], tex_coords))
}
}
impl From<((f32, f32), [f32; 2], [f32; 4])> for Sprite {
fn from(((width, height), offsets, tex_coords): ((f32, f32), [f32; 2], [f32; 4])) -> Self {
Sprite {
width,
height,
offsets,
tex_coords: TextureCoordinates::from(tex_coords),
}
}
}
impl From<((f32, f32), (f32, f32))> for TextureCoordinates {
fn from(((left, right), (bottom, top)): ((f32, f32), (f32, f32))) -> Self {
TextureCoordinates {
left,
right,
bottom,
top,
}
}
}
impl From<[f32; 4]> for TextureCoordinates {
fn from(uv: [f32; 4]) -> Self {
TextureCoordinates {
left: uv[0],
right: uv[1],
bottom: uv[2],
top: uv[3],
}
}
}
/// Information for rendering a sprite.
///
/// Instead of using a `Mesh` on a `DrawFlat` render pass, we can use a simpler set of shaders to
/// render textures to quads. This struct carries the information necessary for the draw2dflat pass.
#[derive(Clone, Debug, PartialEq)]
pub struct SpriteRender {
/// Handle to the sprite sheet of the sprite
pub sprite_sheet: Handle<SpriteSheet>,
/// Index of the sprite on the sprite sheet
pub sprite_number: usize,
}
impl Component for SpriteRender {
type Storage = DenseVecStorage<Self>;
}
/// Represents one sprite in `SpriteList`.
/// Positions originate in the top-left corner (bitmap image convention).
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct SpritePosition {
/// Horizontal position of the sprite in the sprite sheet
pub x: u32,
/// Vertical position of the sprite in the sprite sheet
pub y: u32,
/// Width of the sprite
pub width: u32,
/// Height of the sprite
pub height: u32,
/// Number of pixels to shift the sprite to the left and down relative to the entity holding it
#[serde(default = "default_offsets")]
pub offsets: Option<[f32; 2]>,
/// Flip the sprite horizontally during rendering
#[serde(default = "default_flip")]
pub flip_horizontal: bool,
/// Flip the sprite vertically during rendering
#[serde(default = "default_flip")]
pub flip_vertical: bool,
}
fn default_offsets() -> Option<[f32; 2]> {
None
}
fn default_flip() -> bool {
false
}
/// `SpriteList` controls how a sprite list is generated when using `Sprites::List` in a
/// `SpriteSheetPrefab`.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct SpriteList {
/// Width of the texture in pixels.
pub texture_width: u32,
/// Height of the texture in pixels.
pub texture_height: u32,
/// Description of the sprites
pub sprites: Vec<SpritePosition>,
}
/// `SpriteGrid` controls how a sprite grid is generated when using `Sprites::Grid` in a
/// `SpriteSheetPrefab`.
///
/// The number of columns in the grid must always be provided, and one of the other fields must also
/// be provided. The grid will be layout row major, starting with the sprite in the upper left corner,
/// and ending with the sprite in the lower right corner. For example a grid with 2 rows and 4 columns
/// will have the order below for the sprites.
///
/// ```text
/// |---|---|---|---|
/// | 0 | 1 | 2 | 3 |
/// |---|---|---|---|
/// | 4 | 5 | 6 | 7 |
/// |---|---|---|---|
/// ```
#[derive(Clone, Debug, Deserialize, Serialize, Default)]
pub struct SpriteGrid {
/// Width of the texture in pixels.
pub texture_width: u32,
/// Height of the texture in pixels.
pub texture_height: u32,
/// Specifies the number of columns in the spritesheet, this value must always be given.
pub columns: u32,
/// Specifies the number of rows in the spritesheet. If this is not given it will be calculated
/// using either `sprite_count` (`sprite_count / columns`), or `cell_size` (`sheet_size / cell_size`).
pub rows: Option<u32>,
/// Specifies the number of sprites in the spritesheet. If this is not given it will be
/// calculated using `rows` (`columns * rows`).
pub sprite_count: Option<u32>,
/// Specifies the size of the individual sprites in the spritesheet in pixels. If this is not
/// given it will be calculated using the spritesheet size, `columns` and `rows`.
/// Tuple order is `(width, height)`.
pub cell_size: Option<(u32, u32)>,
/// Specifies the position of the grid on a texture. If this is not given it will be set to (0, 0).
/// Positions originate in the top-left corner (bitmap image convention).
pub position: Option<(u32, u32)>,
}
/// Defined the sprites that are part of a `SpriteSheetPrefab`.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Sprites {
/// A list of sprites
List(SpriteList),
/// Generate a grid sprite list, see `SpriteGrid` for more information.
Grid(SpriteGrid),
}
impl Sprites {
fn build_sprites(&self) -> Vec<Sprite> {
match self {
Sprites::List(list) => list.build_sprites(),
Sprites::Grid(grid) => grid.build_sprites(),
}
}
}
impl SpriteList {
/// Creates a `Vec<Sprite>` from `SpriteList`.
pub fn build_sprites(&self) -> Vec<Sprite> {
self.sprites
.iter()
.map(|pos| {
Sprite::from_pixel_values(
self.texture_width,
self.texture_height,
pos.width,
pos.height,
pos.x,
pos.y,
pos.offsets.unwrap_or([0.0; 2]),
pos.flip_horizontal,
pos.flip_vertical,
)
})
.collect()
}
}
impl SpriteGrid {
/// The width of the part of the texture that the sprites reside on
fn sheet_width(&self) -> u32 {
self.texture_width - self.position().0
}
/// The height of the part of the texture that the sprites reside on
fn sheet_height(&self) -> u32 {
self.texture_height - self.position().1
}
fn rows(&self) -> u32 {
self.rows.unwrap_or_else(|| {
self.sprite_count
.map(|c| {
if (c % self.columns) == 0 {
(c / self.columns)
} else {
(c / self.columns) + 1
}
})
.or_else(|| self.cell_size.map(|(_, y)| (self.sheet_height() / y)))
.unwrap_or(1)
})
}
fn sprite_count(&self) -> u32 {
self.sprite_count
.unwrap_or_else(|| self.columns * self.rows())
}
fn cell_size(&self) -> (u32, u32) {
self.cell_size.unwrap_or_else(|| {
(
(self.sheet_width() / self.columns),
(self.sheet_height() / self.rows()),
)
})
}
fn position(&self) -> (u32, u32) {
self.position.unwrap_or((0, 0))
}
/// Creates a `Vec<Sprite>` from `SpriteGrid`.
pub fn build_sprites(&self) -> Vec<Sprite> {
let rows = self.rows();
let sprite_count = self.sprite_count();
let cell_size = self.cell_size();
let position = self.position();
if (self.columns * cell_size.0) > self.sheet_width() {
log::warn!(
"Grid spritesheet contains more columns than can fit in the given width: {} * {} > {} - {}",
self.columns,
cell_size.0,
self.texture_width,
position.0
);
}
if (rows * cell_size.1) > self.sheet_height() {
log::warn!(
"Grid spritesheet contains more rows than can fit in the given height: {} * {} > {} - {}",
rows,
cell_size.1,
self.texture_height,
position.1
);
}
(0..sprite_count)
.map(|cell| {
let row = cell / self.columns;
let column = cell - (row * self.columns);
let x = column * cell_size.0 + position.0;
let y = row * cell_size.1 + position.1;
Sprite::from_pixel_values(
self.texture_width,
self.texture_height,
cell_size.0,
cell_size.1,
x,
y,
[0.0; 2],
false,
false,
)
})
.collect()
}
}
/// Allows loading of sprite sheets in RON format.
///
/// This format allows to conveniently load a sprite sheet from a RON file.
///
/// Example:
/// ```text,ignore
/// (
/// // Width of the texture
/// texture_width: 48,
/// // Height of the texture
/// texture_height: 16,
/// // List of sprites the sheet holds
/// sprites: [
/// (
/// // Horizontal position of the sprite in the sprite sheet
/// x: 0,
/// // Vertical position of the sprite in the sprite sheet
/// y: 0,
/// // Width of the sprite
/// width: 16,
/// // Height of the sprite
/// height: 16,
/// // Number of pixels to shift the sprite to the left and down relative to the entity holding it when rendering
/// offsets: (0.0, 0.0), // This is optional and defaults to (0.0, 0.0)
/// ),
/// (
/// x: 16,
/// y: 0,
/// width: 32,
/// height: 16,
/// ),
/// ],
/// )
/// ```
///
/// Such a spritesheet description can be loaded using a `Loader` by passing it the handle of the corresponding loaded texture.
/// ```rust,no_run
/// # use amethyst_assets::{Loader, AssetStorage};
/// # use amethyst_rendy::{sprite::{SpriteSheetFormat, SpriteSheet}, Texture, formats::texture::ImageFormat};
/// #
/// # fn load_sprite_sheet() {
/// # let world = amethyst_core::ecs::World::new(); // Normally, you would use Amethyst's world
/// # let loader = world.read_resource::<Loader>();
/// # let spritesheet_storage = world.read_resource::<AssetStorage<SpriteSheet>>();
/// # let texture_storage = world.read_resource::<AssetStorage<Texture>>();
/// let texture_handle = loader.load(
/// "my_texture.png",
/// ImageFormat(Default::default()),
/// (),
/// &texture_storage,
/// );
/// let spritesheet_handle = loader.load(
/// "my_spritesheet.ron",
/// SpriteSheetFormat(texture_handle),
/// (),
/// &spritesheet_storage,
/// );
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct SpriteSheetFormat(pub Handle<Texture>);
impl Format<SpriteSheet> for SpriteSheetFormat {
fn name(&self) -> &'static str {
"SPRITE_SHEET"
}
fn import_simple(&self, bytes: Vec<u8>) -> Result<SpriteSheet, Error> {
let sprite_list: SpriteList =
from_ron_bytes(&bytes).map_err(|_| error::Error::LoadSpritesheetError)?;
Ok(SpriteSheet {
texture: self.0.clone(),
sprites: sprite_list.build_sprites(),
})
}
}
#[cfg(test)]
mod test {
use super::{Sprite, TextureCoordinates};
#[test]
fn texture_coordinates_from_tuple_maps_fields_correctly() {
assert_eq!(
TextureCoordinates {
left: 0.,
right: 0.5,
bottom: 0.75,
top: 1.0,
},
((0.0, 0.5), (0.75, 1.0)).into()
);
}
#[test]
fn texture_coordinates_from_slice_maps_fields_correctly() {
assert_eq!(
TextureCoordinates {
left: 0.,
right: 0.5,
bottom: 0.75,
top: 1.0,
},
[0.0, 0.5, 0.75, 1.0].into()
);
}
#[test]
fn sprite_from_tuple_maps_fields_correctly() {
assert_eq!(
Sprite {
width: 10.,
height: 40.,
offsets: [5., 20.],
tex_coords: TextureCoordinates {
left: 0.,
right: 0.5,
bottom: 0.75,
top: 1.0,
},
},
((10., 40.), [5., 20.], [0.0, 0.5, 0.75, 1.0]).into()
);
}
#[test]
fn sprite_offsets_default_to_zero() {
assert_eq!(
Sprite {
width: 10.,
height: 40.,
offsets: [0., 0.],
tex_coords: TextureCoordinates {
left: 0.,
right: 0.5,
bottom: 0.75,
top: 1.0,
},
},
((10., 40.), [0.0, 0.5, 0.75, 1.0]).into()
);
}
#[test]
fn sprite_from_pixel_values_calculates_pixel_perfect_coordinates() {
let image_w = 30;
let image_h = 40;
let sprite_w = 10;
let sprite_h = 20;
let pixel_left = 0;
let pixel_top = 20;
let offsets = [-5.0, -10.0];
assert_eq!(
Sprite::from((
(10., 20.), // Sprite w and h
[-5., -10.], // Offsets
[0., 10. / 30., 1., 20. / 40.], // Texture coordinates
)),
Sprite::from_pixel_values(
image_w, image_h, sprite_w, sprite_h, pixel_left, pixel_top, offsets, false, false
)
);
}
}
| 31.968476 | 127 | 0.550455 |
62bbe5fb53f10f179c7b5305f28769092601cc36
| 114 |
struct S { field: i32 }
fn main() {
match (S {field: 0}) {
S {field: 0} => {},
_ => {}
}
}
| 16.285714 | 27 | 0.359649 |
39e1fda50a718e78d8dab1c230b98d2cbc3b73d9
| 9,562 |
// Copyright 2018-2021 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::traits::{
clear_packed_root,
clear_spread_root_opt,
pull_packed_root_opt,
pull_spread_root_opt,
push_packed_root_opt,
push_spread_root_opt,
ExtKeyPtr,
KeyPtr,
PackedLayout,
SpreadLayout,
};
use core::{
cell::Cell,
fmt,
fmt::Debug,
};
use ink_prelude::vec::Vec;
use ink_primitives::Key;
/// The entry of a single cached value of a lazy storage data structure.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct StorageEntry<T> {
/// The value or `None` if the value has been removed.
value: Option<T>,
/// This is [`EntryState::Mutated`] if the value has been mutated and is in
/// need to be synchronized with the contract storage. If it is
/// [`EntryState::Preserved`] the value from the contract storage has been
/// preserved and does not need to be synchronized.
state: Cell<EntryState>,
}
impl<T> Debug for StorageEntry<T>
where
T: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Entry")
.field("value", &self.value)
.field("state", &self.state.get())
.finish()
}
}
#[test]
fn debug_impl_works() {
let e1 = <StorageEntry<i32>>::new(None, EntryState::Preserved);
assert_eq!(
format!("{:?}", &e1),
"Entry { value: None, state: Preserved }",
);
let e2 = StorageEntry::new(Some(42), EntryState::Mutated);
assert_eq!(
format!("{:?}", &e2),
"Entry { value: Some(42), state: Mutated }",
);
}
/// The state of the entry.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum EntryState {
/// The entry's value must be synchronized with the contract storage.
Mutated,
/// The entry's value preserved the value from the contract storage.
Preserved,
}
impl EntryState {
/// Returns `true` if the entry state is mutated.
pub fn is_mutated(self) -> bool {
match self {
EntryState::Mutated => true,
EntryState::Preserved => false,
}
}
/// Returns `true` if the entry state is preserved.
pub fn is_preserved(self) -> bool {
!self.is_mutated()
}
}
impl<T> SpreadLayout for StorageEntry<T>
where
T: SpreadLayout,
{
const FOOTPRINT: u64 = <T as SpreadLayout>::FOOTPRINT;
fn pull_spread(ptr: &mut KeyPtr) -> Self {
let root_key = ExtKeyPtr::next_for::<Self>(ptr);
Self::pull_spread_root(root_key)
}
fn push_spread(&self, ptr: &mut KeyPtr) {
let root_key = ExtKeyPtr::next_for::<Self>(ptr);
self.push_spread_root(root_key)
}
fn clear_spread(&self, ptr: &mut KeyPtr) {
let root_key = ExtKeyPtr::next_for::<Self>(ptr);
self.clear_spread_root(root_key)
}
}
impl<T> scale::Encode for StorageEntry<T>
where
T: scale::Encode,
{
#[inline]
fn size_hint(&self) -> usize {
<Option<T> as scale::Encode>::size_hint(&self.value)
}
#[inline]
fn encode_to<O: scale::Output + ?Sized>(&self, dest: &mut O) {
<Option<T> as scale::Encode>::encode_to(&self.value, dest)
}
#[inline]
fn encode(&self) -> Vec<u8> {
<Option<T> as scale::Encode>::encode(&self.value)
}
#[inline]
fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
<Option<T> as scale::Encode>::using_encoded(&self.value, f)
}
}
impl<T> scale::Decode for StorageEntry<T>
where
T: scale::Decode,
{
fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {
Ok(Self::new(
<Option<T> as scale::Decode>::decode(input)?,
EntryState::Preserved,
))
}
}
impl<T> PackedLayout for StorageEntry<T>
where
T: PackedLayout,
{
#[inline]
fn pull_packed(&mut self, at: &Key) {
PackedLayout::pull_packed(&mut self.value, at)
}
#[inline]
fn push_packed(&self, at: &Key) {
PackedLayout::push_packed(&self.value, at)
}
#[inline]
fn clear_packed(&self, at: &Key) {
PackedLayout::clear_packed(&self.value, at)
}
}
impl<T> StorageEntry<T>
where
T: SpreadLayout,
{
/// Pulls the entity from the underlying associated storage as a `SpreadLayout`
/// storage layout representation.
///
/// # Note
///
/// Mainly used by lazy storage abstractions that only allow operating on
/// packed storage entities such as [`LazyCell`][`crate::lazy::LazyCell`].
pub fn pull_spread_root(root_key: &Key) -> Self {
Self::new(pull_spread_root_opt::<T>(&root_key), EntryState::Preserved)
}
/// Pushes the underlying associated data to the contract storage using
/// the `SpreadLayout` storage layout.
///
/// # Note
///
/// Mainly used by lazy storage abstractions that only allow operating on
/// packed storage entities such as [`LazyCell`][`crate::lazy::LazyCell`].
pub fn push_spread_root(&self, root_key: &Key) {
let old_state = self.replace_state(EntryState::Preserved);
if old_state.is_mutated() {
push_spread_root_opt::<T>(self.value().into(), &root_key);
}
}
/// Clears the underlying associated storage as `SpreadLayout` storage layout representation.
///
/// # Note
///
/// Mainly used by lazy storage abstractions that only allow operating on
/// packed storage entities such as [`LazyCell`][`crate::lazy::LazyCell`].
pub fn clear_spread_root(&self, root_key: &Key) {
clear_spread_root_opt::<T, _>(&root_key, || self.value().into());
}
}
impl<T> StorageEntry<T>
where
T: PackedLayout,
{
/// Pulls the entity from the underlying associated storage as packed representation.
///
/// # Note
///
/// Mainly used by lazy storage abstractions that only allow operating on
/// packed storage entities such as [`LazyIndexMap`][`crate::lazy::LazyIndexMap`] or
/// [`LazyArray`][`crate::lazy::LazyArray`].
pub fn pull_packed_root(root_key: &Key) -> Self {
Self::new(pull_packed_root_opt::<T>(root_key), EntryState::Preserved)
}
/// Pushes the underlying associated storage as packed representation.
///
/// # Note
///
/// Mainly used by lazy storage abstractions that only allow operating on
/// packed storage entities such as [`LazyIndexMap`][`crate::lazy::LazyIndexMap`]
/// or [`LazyArray`][`crate::lazy::LazyArray`].
pub fn push_packed_root(&self, root_key: &Key) {
let old_state = self.replace_state(EntryState::Preserved);
if old_state.is_mutated() {
push_packed_root_opt::<T>(self.value().into(), &root_key);
}
}
/// Clears the underlying associated storage as packed representation.
///
/// # Note
///
/// Mainly used by lazy storage abstractions that only allow operating on
/// packed storage entities such as [`LazyIndexMap`][`crate::lazy::LazyIndexMap`]
/// or [`LazyArray`][`crate::lazy::LazyArray`].
pub fn clear_packed_root(&self, root_key: &Key) {
clear_packed_root::<Option<T>>(self.value(), &root_key);
}
}
impl<T> StorageEntry<T> {
/// Creates a new entry with the value and state.
pub fn new(value: Option<T>, state: EntryState) -> Self {
Self {
value,
state: Cell::new(state),
}
}
/// Replaces the current entry state with the new state and returns it.
pub fn replace_state(&self, new_state: EntryState) -> EntryState {
// The implementation of `Cell::set` uses `Cell::replace` so instead
// of offering both APIs we simply opted to offer just the more general
// replace API for `Entry`.
self.state.replace(new_state)
}
/// Returns a shared reference to the value of the entry.
pub fn value(&self) -> &Option<T> {
&self.value
}
/// Returns an exclusive reference to the entry value.
///
/// # Note
///
/// This changes the `mutate` state of the entry if the entry was occupied
/// since the caller could potentially change the returned value.
pub fn value_mut(&mut self) -> &mut Option<T> {
if self.value.is_some() {
self.state.set(EntryState::Mutated);
}
&mut self.value
}
/// Converts the entry into its value.
pub fn into_value(self) -> Option<T> {
self.value
}
/// Puts the new value into the entry and returns the old value.
///
/// # Note
///
/// This changes the `mutate` state of the entry to `true` as long as at
/// least one of `old_value` and `new_value` is `Some`.
pub fn put(&mut self, new_value: Option<T>) -> Option<T> {
let new_value_is_some = new_value.is_some();
let old_value = core::mem::replace(&mut self.value, new_value);
if old_value.is_some() || new_value_is_some {
self.state.set(EntryState::Mutated);
}
old_value
}
}
| 30.647436 | 97 | 0.623196 |
91f86faf4935c43be9ecaf4fb9f522028c5575b7
| 724 |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::account_config::resources::{
ChildVASP, Credential, DesignatedDealer, DesignatedDealerPreburns, ParentVASP,
};
use serde::{Deserialize, Serialize};
/// A enum that captures the collection of role-specific resources stored under each account type
#[derive(Debug, Serialize, Deserialize)]
pub enum AccountRole {
ParentVASP {
vasp: ParentVASP,
credential: Credential,
},
ChildVASP(ChildVASP),
DesignatedDealer {
dd_credential: Credential,
preburn_balances: DesignatedDealerPreburns,
designated_dealer: DesignatedDealer,
},
Unknown,
// TODO: add other roles
}
| 28.96 | 97 | 0.712707 |
6a72d0629c64e1727fa8171d396583d7de942449
| 4,692 |
#[macro_export]
macro_rules! caml_ffi {
($code:tt) => {
let mut caml_frame = $crate::core::memory::caml_local_roots.clone();
$code;
return
};
($code:tt => $result:expr) => {
let mut caml_frame = $crate::core::memory::caml_local_roots;
$code;
return $crate::core::mlvalues::Value::from($result);
}
}
#[macro_export]
/// Registers OCaml parameters with the GC
macro_rules! caml_param {
(@step $idx:expr, $caml_roots:ident,) => {
$caml_roots.ntables = $idx;
};
(@step $idx:expr, $caml_roots:ident, $param:expr, $($tail:expr,)*) => {
$caml_roots.tables[$idx] = &mut $param;
caml_param!(@step $idx + 1usize, $caml_roots, $($tail,)*);
};
($($n:expr),*) => {
let mut caml_roots: $crate::core::memory::CamlRootsBlock = ::std::default::Default::default();
caml_roots.next = $crate::core::memory::caml_local_roots;
$crate::core::memory::caml_local_roots = (&mut caml_roots) as *mut $crate::core::memory::CamlRootsBlock;
caml_roots.nitems = 1; // this is = N when CAMLxparamN is used
caml_param!(@step 0usize, caml_roots, $($n,)*);
}
}
/// Initializes and registers the given identifier(s) as a local value with the OCaml runtime.
///
/// ## Original C code
///
/// ```c
/// #define CAMLlocal1(x) \
/// value x = Val_unit; \
/// CAMLxparam1 (x)
/// ```
///
#[macro_export]
macro_rules! caml_local {
($($local:ident),*) => {
$(let mut $local = $crate::value::Value::new($crate::core::mlvalues::UNIT);)*
caml_param!($($local.0),*);
}
}
#[macro_export]
/// Defines an OCaml FFI body, including any locals, as well as a return if provided; it is up to you to define the parameters.
macro_rules! caml_body {
(||, <$($local:ident),*>, $code:block) => {
let caml_frame = $crate::core::memory::caml_local_roots;
caml_local!($($local),*);
{
$(let mut $param = $crate::value::Value::new($param);
{
let _ = $param;
})*
$code;
}
$crate::core::memory::caml_local_roots = caml_frame;
};
(|$($param:ident),*|, @code $code:block) => {
let caml_frame = $crate::core::memory::caml_local_roots;
caml_param!($($param),*);
{
$(let mut $param = $crate::value::Value::new($param);
{
let _ = $param;
})*
$code;
}
$crate::core::memory::caml_local_roots = caml_frame;
};
(|$($param:ident),*|, <$($local:ident),*>, $code:block) => {
let caml_frame = $crate::core::memory::caml_local_roots;
caml_param!($($param),*);
caml_local!($($local),*);
{
$(let mut $param = $crate::value::Value::new($param);
{
let _ = $param;
})*
$code;
}
$crate::core::memory::caml_local_roots = caml_frame;
}
}
#[macro_export]
/// Defines an external Rust function for FFI use by an OCaml program, with automatic `CAMLparam`, `CAMLlocal`, and `CAMLreturn` inserted for you.
macro_rules! caml {
($name:ident, |$($param:ident),*|, <$($local:ident),*>, $code:block -> $retval:ident) => {
#[allow(unused_mut)]
#[no_mangle]
pub unsafe extern fn $name ($(mut $param: $crate::core::mlvalues::Value,)*) -> $crate::core::mlvalues::Value {
caml_body!(|$($param),*|, <$($local),*>, $code);
return $crate::core::mlvalues::Value::from($retval)
}
};
($name:ident, |$($param:ident),*|, $code:block) => {
#[allow(unused_mut)]
#[no_mangle]
pub unsafe extern fn $name ($(mut $param: $crate::core::mlvalues::Value,)*) {
caml_body!(|$($param),*|, @code $code);
return;
}
};
($name:ident, |$($param:ident),*|, $code:block -> $retval:ident) => {
#[allow(unused_mut)]
#[no_mangle]
pub unsafe extern fn $name ($(mut $param: $crate::core::mlvalues::Value,)*) -> $crate::core::mlvalues::Value {
caml_body!(|$($param),*|, @code $code);
return $crate::core::mlvalues::Value::from($retval);
}
};
}
#[macro_export]
/// Create an OCaml tuple
macro_rules! tuple {
($($x:expr),*) => {
$crate::Tuple::from(&[$($x.to_value(),)*]).into()
}
}
#[macro_export]
/// Create an OCaml array
macro_rules! array {
($($x:expr),*) => {
$crate::Array::from(&[$($x.to_value(),)*]).into()
}
}
#[macro_export]
/// Create an OCaml list
macro_rules! list {
($($x:expr),*) => {
$crate::List::from(&[$($x.to_value(),)*]).into()
}
}
| 29.88535 | 146 | 0.531756 |
0a1810ab69f0294285efacec9d28460c063aea83
| 56,979 |
//! Searches, processes and uploads debug information files (DIFs). See
//! `DifUpload` for more information.
use std::collections::{BTreeMap, BTreeSet};
use std::ffi::{OsStr, OsString};
use std::fmt;
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::iter::IntoIterator;
use std::mem::transmute;
use std::ops::Deref;
use std::path::{Component, Path, PathBuf};
use std::process::Command;
use std::slice::{Chunks, Iter};
use std::str;
use std::thread;
use console::style;
use failure::{bail, err_msg, Error, SyncFailure};
use indicatif::HumanBytes;
use log::{debug, info, warn};
use sha1::Digest;
use symbolic::common::{ByteView, DebugId, SelfCell, Uuid};
use symbolic::debuginfo::{
sourcebundle::SourceBundleWriter, Archive, FileEntry, FileFormat, Object,
};
use walkdir::WalkDir;
use which::which;
use zip::{write::FileOptions, ZipArchive, ZipWriter};
use crate::api::{
Api, ChunkUploadCapability, ChunkUploadOptions, ChunkedDifRequest, ChunkedFileState,
};
use crate::config::Config;
use crate::constants::DEFAULT_MAX_DIF_SIZE;
use crate::utils::chunks::{
upload_chunks, BatchedSliceExt, Chunk, ItemSize, ASSEMBLE_POLL_INTERVAL,
};
use crate::utils::dif::DifFeatures;
use crate::utils::fs::{get_sha1_checksum, get_sha1_checksums, TempDir, TempFile};
use crate::utils::progress::{ProgressBar, ProgressStyle};
use crate::utils::ui::{copy_with_progress, make_byte_progress_bar};
/// A debug info file on the server.
pub use crate::api::DebugInfoFile;
/// Fallback maximum number of chunks in a batch for the legacy upload.
static MAX_CHUNKS: u64 = 64;
/// An iterator over chunks of data in a `ChunkedDifMatch` object.
///
/// This struct is returned by `ChunkedDifMatch::chunks`.
struct DifChunks<'a> {
checksums: Iter<'a, Digest>,
iter: Chunks<'a, u8>,
}
impl<'a> Iterator for DifChunks<'a> {
type Item = Chunk<'a>;
fn next(&mut self) -> Option<Self::Item> {
match (self.checksums.next(), self.iter.next()) {
(Some(checksum), Some(data)) => Some(Chunk((*checksum, data))),
(_, _) => None,
}
}
}
/// Contains backing data for a `DifMatch`.
///
/// This can be used to store the actual data that a `FatObject` might be
/// relying upon, such as temporary files or extracted archives. It will be
/// disposed along with a `DifMatch` once it is dropped.
#[derive(Debug)]
enum DifBacking {
Temp(TempFile),
}
/// A handle to a debug information file found by `DifUpload`.
///
/// It contains a `FatObject` giving access to the metadata and contents of the
/// debug information file. `DifMatch::attachments` may contain supplemental
/// files used to further process this file, such as dSYM PLists.
struct DifMatch<'data> {
_backing: Option<DifBacking>,
object: SelfCell<ByteView<'data>, Object<'data>>,
name: String,
debug_id: Option<DebugId>,
attachments: Option<BTreeMap<String, ByteView<'static>>>,
}
impl<'data> DifMatch<'data> {
fn from_temp<S>(temp_file: TempFile, name: S) -> Result<Self, Error>
where
S: Into<String>,
{
let buffer = ByteView::open(temp_file.path()).map_err(SyncFailure::new)?;
Ok(DifMatch {
_backing: Some(DifBacking::Temp(temp_file)),
object: SelfCell::try_new(buffer, |b| Object::parse(unsafe { &*b }))?,
name: name.into(),
debug_id: None,
attachments: None,
})
}
/// Moves the specified temporary debug file to a safe location and assumes
/// ownership. The file will be deleted in the file system when this
/// `DifMatch` is dropped.
///
/// The path must point to a `FatObject` containing exactly one `Object`.
fn take_temp<P, S>(path: P, name: S) -> Result<Self, Error>
where
P: AsRef<Path>,
S: Into<String>,
{
let temp_file = TempFile::take(path)?;
Self::from_temp(temp_file, name)
}
/// Returns the parsed `Object` of this DIF.
pub fn object(&self) -> &Object<'_> {
self.object.get()
}
/// Returns the raw binary data of this DIF.
pub fn data(&self) -> &[u8] {
self.object().data()
}
/// Returns the size of of this DIF in bytes.
pub fn size(&self) -> u64 {
self.data().len() as u64
}
/// Returns the path of this DIF relative to the search origin.
pub fn path(&self) -> &str {
&self.name
}
/// Returns the name of this DIF, including its file extension.
pub fn file_name(&self) -> &str {
Path::new(self.path())
.file_name()
.and_then(OsStr::to_str)
.unwrap_or("Generic")
}
/// Returns attachments of this DIF, if any.
pub fn attachments(&self) -> Option<&BTreeMap<String, ByteView<'static>>> {
self.attachments.as_ref()
}
/// Determines whether this file needs resolution of hidden symbols.
pub fn needs_symbol_map(&self) -> bool {
// XCode release archives and dSYM bundles downloaded from iTunes
// Connect contain Swift library symbols. These have caused various
// issues in the past, so we ignore them for now. In particular, there
// are never any BCSymbolMaps generated for them and the DBGOriginalUUID
// in the plist is the UUID of the original dsym file.
//
// We *might* have to locate the original library in the Xcode
// distribution, then build a new non-fat dSYM file from it and patch
// the the UUID.
if self.file_name().starts_with("libswift") {
return false;
}
match self.object() {
Object::MachO(ref macho) => macho.requires_symbolmap(),
_ => false,
}
}
}
impl<'data> fmt::Debug for DifMatch<'data> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DifMatch")
.field("object", &self.object())
.field("name", &self.name)
.finish()
}
}
/// A `DifMatch` with computed SHA1 checksum.
#[derive(Debug)]
struct HashedDifMatch<'data> {
inner: DifMatch<'data>,
checksum: Digest,
}
impl<'data> HashedDifMatch<'data> {
/// Calculates the SHA1 checksum for the given DIF.
fn from(inner: DifMatch<'data>) -> Result<Self, Error> {
let checksum = get_sha1_checksum(inner.data())?;
Ok(HashedDifMatch { inner, checksum })
}
/// Returns the SHA1 checksum of this DIF.
fn checksum(&self) -> Digest {
self.checksum
}
}
impl<'data> Deref for HashedDifMatch<'data> {
type Target = DifMatch<'data>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'data> ItemSize for HashedDifMatch<'data> {
fn size(&self) -> u64 {
self.deref().size()
}
}
/// A chunked `DifMatch` with computed SHA1 checksums.
#[derive(Debug)]
struct ChunkedDifMatch<'data> {
inner: HashedDifMatch<'data>,
chunks: Vec<Digest>,
chunk_size: u64,
}
impl<'data> ChunkedDifMatch<'data> {
/// Slices the DIF into chunks of `chunk_size` bytes each, and computes SHA1
/// checksums for every chunk as well as the entire DIF.
pub fn from(inner: DifMatch<'data>, chunk_size: u64) -> Result<Self, Error> {
let (checksum, chunks) = get_sha1_checksums(inner.data(), chunk_size)?;
Ok(ChunkedDifMatch {
inner: HashedDifMatch { inner, checksum },
chunks,
chunk_size,
})
}
/// Returns an iterator over all chunk checksums.
pub fn checksums(&self) -> Iter<'_, Digest> {
self.chunks.iter()
}
/// Returns an iterator over all `DifChunk`s.
pub fn chunks(&self) -> DifChunks<'_> {
DifChunks {
checksums: self.checksums(),
iter: self.data().chunks(self.chunk_size as usize),
}
}
/// Creates a tuple which can be collected into a `ChunkedDifRequest`.
pub fn to_assemble(&self) -> (Digest, ChunkedDifRequest<'_>) {
(
self.checksum(),
ChunkedDifRequest {
name: self.file_name(),
debug_id: self.debug_id,
chunks: &self.chunks,
},
)
}
}
impl<'data> Deref for ChunkedDifMatch<'data> {
type Target = HashedDifMatch<'data>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'data> ItemSize for ChunkedDifMatch<'data> {
fn size(&self) -> u64 {
self.deref().size()
}
}
type ZipFileArchive = ZipArchive<BufReader<File>>;
/// A handle to the source of a potential `DifMatch` used inside `search_difs`.
///
/// The primary use of this handle is to resolve files relative to the debug
/// information file and store them in `DifMatch::attachments`. These could be
/// companion files or metadata files needed to process the DIFs in sentry-cli,
/// or later even on Sentry.
#[derive(Debug)]
enum DifSource<'a> {
/// A file located in the file system
FileSystem(&'a Path),
/// An entry in a ZIP file
Zip(&'a mut ZipFileArchive, &'a str),
}
impl<'a> DifSource<'a> {
/// Resolves a file relative to the directory of `base`, stripping of the
/// file name.
fn get_relative_fs(base: &Path, path: &Path) -> Option<ByteView<'static>> {
// Use parent() to get to the directory and then move relative from
// there. ByteView will internally cannonicalize the path and resolve
// symlinks.
base.parent()
.and_then(|p| ByteView::open(p.join(path)).ok())
}
/// Extracts a file relative to the directory of `name`, stripping of the
/// file name.
fn get_relative_zip(
zip: &mut ZipFileArchive,
name: &str,
path: &Path,
) -> Option<ByteView<'static>> {
// There is no built-in utility that normalizes paths without access to
// the file system. We start by removing the file name from the given
// path and then start to manually resolve the path components to a
// final path.
let mut zip_path = PathBuf::from(name);
zip_path.pop();
for component in path.components() {
match component {
Component::ParentDir => {
zip_path.pop();
}
Component::Normal(p) => {
zip_path.push(p);
}
_ => {
// `Component::CurDir` leaves the path as-is, and the
// remaining `Component::RootDir` and `Component::Prefix` do
// not make sense in ZIP files.
}
}
}
zip_path
.to_str()
.and_then(|name| zip.by_name(name).ok())
.and_then(|f| ByteView::read(f).ok())
}
/// Resolves a file relative to this source and reads it into a `ByteView`.
///
/// The target is always resolved relative to the directory of the source,
/// excluding its file name. The path "../changed" relative to a source
/// pointing to "path/to/file" will resolve in "path/changed".
///
/// The returned ByteView will allow random-access to the data until it is
/// disposed. If the source points to a ZIP file, the target is fully read
/// into a memory buffer. See `ByteView::from_reader` for more information.
pub fn get_relative<P>(&mut self, path: P) -> Option<ByteView<'static>>
where
P: AsRef<Path>,
{
match *self {
DifSource::FileSystem(base) => Self::get_relative_fs(base, path.as_ref()),
DifSource::Zip(ref mut zip, name) => Self::get_relative_zip(*zip, name, path.as_ref()),
}
}
}
/// Information returned by `assemble_difs` containing flat lists of incomplete
/// DIFs and their missing chunks.
type MissingDifsInfo<'data, 'm> = (Vec<&'m ChunkedDifMatch<'data>>, Vec<Chunk<'m>>);
/// Verifies that the given path contains a ZIP file and opens it.
fn try_open_zip<P>(path: P) -> Result<Option<ZipFileArchive>, Error>
where
P: AsRef<Path>,
{
if path.as_ref().extension() != Some("zip".as_ref()) {
return Ok(None);
}
let mut magic: [u8; 2] = [0; 2];
let mut file = File::open(path)?;
if file.read_exact(&mut magic).is_err() {
// Catch empty or single-character files
return Ok(None);
}
file.seek(SeekFrom::Start(0))?;
Ok(match &magic {
b"PK" => Some(ZipArchive::new(BufReader::new(file))?),
_ => None,
})
}
/// Searches the given ZIP for potential DIFs and passes them to the callback.
///
/// To avoid unnecessary file operations, the file extension is already checked
/// for every entry before opening it.
///
/// This function will not recurse into ZIPs contained in this ZIP.
fn walk_difs_zip<F>(mut zip: ZipFileArchive, options: &DifUpload, mut func: F) -> Result<(), Error>
where
F: FnMut(DifSource<'_>, String, ByteView<'static>) -> Result<(), Error>,
{
for index in 0..zip.len() {
let (name, buffer) = {
let zip_file = zip.by_index(index)?;
let name = zip_file.name().to_string();
if !options.valid_extension(Path::new(&name).extension()) {
continue;
}
(name, ByteView::read(zip_file).map_err(SyncFailure::new)?)
};
func(DifSource::Zip(&mut zip, &name), name.clone(), buffer)?;
}
Ok(())
}
/// Recursively searches the given location for potential DIFs and passes them
/// to the callback.
///
/// If `DifUpload::allow_zips` is set, then this function will attempt to open
/// the ZIP and search it for DIFs as well, however not recursing further into
/// nested ZIPs.
///
/// To avoid unnecessary file operations, the file extension is already checked
/// for every entry before opening it.
fn walk_difs_directory<F, P>(location: P, options: &DifUpload, mut func: F) -> Result<(), Error>
where
P: AsRef<Path>,
F: FnMut(DifSource<'_>, String, ByteView<'static>) -> Result<(), Error>,
{
let location = location.as_ref();
let directory = if location.is_dir() {
location
} else {
location.parent().unwrap_or_else(|| Path::new(""))
};
debug!("searching location {}", location.display());
for entry in WalkDir::new(location).into_iter().filter_map(Result::ok) {
if !entry.metadata()?.is_file() {
// Walkdir recurses automatically into folders
continue;
}
let path = entry.path();
match try_open_zip(path) {
Ok(Some(zip)) => {
debug!("searching zip archive {}", path.display());
walk_difs_zip(zip, options, &mut func)?;
debug!("finished zip archive {}", path.display());
continue;
}
Err(e) => {
debug!("skipping zip archive {}", path.display());
debug!("error: {}", e);
continue;
}
Ok(None) => {
// this is not a zip archive
}
}
if !options.valid_extension(path.extension()) {
continue;
}
let buffer = ByteView::open(path).map_err(SyncFailure::new)?;
let name = path
.strip_prefix(directory)
.unwrap()
.to_string_lossy()
.into_owned();
func(DifSource::FileSystem(path), name, buffer)?;
}
debug!("finished location {}", directory.display());
Ok(())
}
/// Searches for mapping PLists next to the given `source`. It returns a mapping
/// of Plist name to owning buffer of the file's contents. This function should
/// only be called for dSYMs.
fn find_uuid_plists(
object: &Object<'_>,
source: &mut DifSource<'_>,
) -> Option<BTreeMap<String, ByteView<'static>>> {
let uuid = object.debug_id().uuid();
if uuid.is_nil() {
return None;
}
// When uploading an XCode build archive to iTunes Connect, Apple will
// re-build the app for different architectures, causing new UUIDs in the
// final bundle. To allow mapping back to the original symbols, it adds
// PList files in the `Resources` folder (one level above the binary) that
// contains the original UUID, one for each object contained in the fat
// object.
//
// The folder structure looks like this:
//
// App.dSYM
// ├─ Info.plist
// └─ Resources
// ├─ 1B205CD0-67D0-4D69-A0FA-C6BDDDB2A609.plist
// ├─ 1C228684-3EE5-472B-AB8D-29B3FBF63A70.plist
// └─ DWARF
// └─ App
let plist_name = format!("{:X}.plist", uuid.to_hyphenated_ref());
let plist = match source.get_relative(format!("../{}", &plist_name)) {
Some(plist) => plist,
None => return None,
};
let mut plists = BTreeMap::new();
plists.insert(plist_name, plist);
Some(plists)
}
/// Patch debug identifiers for PDBs where the corresponding PE specifies a different age.
fn fix_pdb_ages(difs: &mut [DifMatch<'_>], age_overrides: &BTreeMap<Uuid, u32>) {
for dif in difs {
if dif.object().file_format() != FileFormat::Pdb {
continue;
}
let debug_id = dif.object().debug_id();
let age = match age_overrides.get(&debug_id.uuid()) {
Some(age) => *age,
None => continue,
};
if age == debug_id.appendix() {
continue;
}
log::debug!(
"overriding age for {} ({} -> {})",
dif.name,
debug_id.appendix(),
age
);
dif.debug_id = Some(DebugId::from_parts(debug_id.uuid(), age));
}
}
/// Searches matching debug information files.
fn search_difs(options: &DifUpload) -> Result<Vec<DifMatch<'static>>, Error> {
let progress_style = ProgressStyle::default_spinner().template(
"{spinner} Searching for debug symbol files...\
\n found {prefix:.yellow} {msg:.dim}",
);
let progress = ProgressBar::new_spinner();
progress.enable_steady_tick(100);
progress.set_style(progress_style);
let mut age_overrides = BTreeMap::new();
let mut collected = Vec::new();
for base_path in &options.paths {
if base_path == Path::new("") {
warn!(
"Skipping uploading from an empty path (\"\"). \
Maybe you expanded an empty shell variable?"
);
continue;
}
walk_difs_directory(base_path, options, |mut source, name, buffer| {
progress.set_message(&name);
// Try to parse a potential object file. If this is not possible,
// then we're not dealing with an object file, thus silently
// skipping it.
let format = Archive::peek(&buffer);
// Override this behavior for PE files. Their debug identifier is
// needed in case PDBs should be uploaded to fix an eventual age
// mismatch
let should_override_age =
format == FileFormat::Pe && options.valid_format(FileFormat::Pdb);
if !should_override_age && !options.valid_format(format) {
return Ok(());
}
debug!("trying to parse dif {}", name);
let archive = match Archive::parse(&buffer) {
Ok(archive) => archive,
Err(e) => {
warn!("Skipping invalid debug file {}: {}", name, e);
return Ok(());
}
};
// Each `FatObject` might contain multiple matching objects, each of
// which needs to retain a reference to the original fat file. We
// create a shared instance here and clone it into `DifMatche`s
// below.
for object in archive.objects() {
// Silently skip all objects that we cannot process. This can
// happen due to invalid object files, which we then just
// discard rather than stopping the scan.
let object = match object {
Ok(object) => object,
Err(_) => continue,
};
// Objects without debug id will be skipped altogether. While frames
// during symbolication might be lacking debug identifiers,
// Sentry requires object files to have one during upload.
let id = object.debug_id();
if id.is_nil() {
continue;
}
// Store a mapping of "age" values for all encountered PE files,
// regardless of whether they will be uploaded. This is used later
// to fix up PDB files.
if should_override_age {
age_overrides.insert(id.uuid(), id.appendix());
// Skip if this object was only retained for the PDB override.
if !options.valid_format(format) {
continue;
}
}
// We can only process objects with features, such as a symbol
// table or debug information. If this object has no features,
// Sentry cannot process it and so we skip the upload. If object
// features were specified, this will skip all other objects.
if !options.valid_features(&object) {
continue;
}
// Skip this object if we're only looking for certain IDs.
if !options.valid_id(id) {
continue;
}
// Skip this entire file if it exceeds the maximum allowed file size.
let file_size = object.data().len() as u64;
if file_size > options.max_file_size {
warn!(
"Skipping debug file since it exceeds {}: {} ({})",
HumanBytes(options.max_file_size),
name,
HumanBytes(file_size),
);
break;
}
// Invoke logic to retrieve attachments specific to the kind
// of object file. These are used for processing. Since only
// dSYMs equire processing currently, all other kinds are
// skipped.
let attachments = match object.file_format() {
FileFormat::MachO => find_uuid_plists(&object, &mut source),
_ => None,
};
// We retain the buffer and the borrowed object in a new SelfCell. This is
// incredibly unsafe, but in our case it is fine, since the SelfCell owns the same
// buffer that was used to retrieve the object.
let cell = unsafe { SelfCell::from_raw(buffer.clone(), transmute(object)) };
collected.push(DifMatch {
_backing: None,
object: cell,
name: name.clone(),
debug_id: None,
attachments,
});
progress.set_prefix(&collected.len().to_string());
}
Ok(())
})?;
}
if !age_overrides.is_empty() {
fix_pdb_ages(&mut collected, &age_overrides);
}
progress.finish_and_clear();
println!(
"{} Found {} debug information {}",
style(">").dim(),
style(collected.len()).yellow(),
match collected.len() {
1 => "file",
_ => "files",
}
);
Ok(collected)
}
/// Resolves BCSymbolMaps and replaces hidden symbols in a `DifMatch` using
/// `dsymutil`. If successful, this will return a new `DifMatch` based on a
/// temporary file. The original dSYM is not touched.
///
/// Note that this process copies the file to a temporary location and might
/// incur significant I/O for larger debug files.
fn resolve_hidden_symbols<'a>(dif: DifMatch<'a>, symbol_map: &Path) -> Result<DifMatch<'a>, Error> {
if dif.attachments.is_none() {
println!(
"{} {}: Could not locate UUID mapping for {}",
style(">").dim(),
style("Warning").red(),
style(dif.file_name()).yellow(),
);
return Ok(dif);
}
// We need to rebuild the Resources folder of a dSYM structure in a temp
// directory that is guaranteed to be deleted after this operation. The
// Info.plist is not needed for this operation:
// Resources
// ├─ 1B205CD0-67D0-4D69-A0FA-C6BDDDB2A609.plist
// ├─ 1C228684-3EE5-472B-AB8D-29B3FBF63A70.plist
// └─ DWARF
// └─ ObjectFile
let temp_dir = TempDir::create()?;
fs::create_dir_all(temp_dir.path().join("DWARF"))?;
// Copy the object file binary
let temp_path = temp_dir.path().join("DWARF").join(dif.file_name());
let mut temp_file = File::create(&temp_path)?;
temp_file.write_all(dif.data())?;
temp_file.sync_data()?;
// Copy the UUID plists
for (name, view) in dif.attachments().unwrap() {
let mut plist = File::create(temp_dir.path().join(name))?;
plist.write_all(&view)?;
plist.sync_data()?;
}
let output = Command::new("dsymutil")
.arg("-symbol-map")
.arg(symbol_map)
.arg(&temp_path)
.output()?;
if !output.status.success() {
if let Ok(error) = str::from_utf8(&output.stderr) {
bail!("Could not resolve BCSymbolMaps: {}", error);
} else {
bail!("Could not resolve BCSymbolMaps due to an unknown error");
}
}
// Take ownership of the modified (fat) object file and move it somewhere
// else so it is safe to delete the temp directory.
DifMatch::take_temp(temp_path, dif.path())
}
/// Runs all `DifMatch` objects through the provided callback and displays a
/// progress bar while doing so.
///
/// ```
/// prepare_difs(processed, |m| HashedDifMatch::from(m))?
/// ```
fn prepare_difs<'data, F, T>(items: Vec<DifMatch<'data>>, mut func: F) -> Result<Vec<T>, Error>
where
F: FnMut(DifMatch<'data>) -> Result<T, Error>,
{
let progress_style = ProgressStyle::default_bar().template(
"{prefix:.dim} Preparing for upload... {msg:.dim}\
\n{wide_bar} {pos}/{len}",
);
let progress = ProgressBar::new(items.len() as u64);
progress.set_style(progress_style);
progress.set_prefix(">");
let mut calculated = Vec::new();
for item in items {
progress.inc(1);
progress.set_message(item.path());
calculated.push(func(item)?);
}
progress.finish_and_clear();
println!(
"{} Prepared debug information {} for upload",
style(">").dim(),
match calculated.len() {
1 => "file",
_ => "files",
}
);
Ok(calculated)
}
/// Resolves BCSymbolMaps for all debug files with hidden symbols. All other
/// files are not touched. Note that this only applies to Apple dSYMs.
///
/// If there are debug files with hidden symbols but no `symbol_map` path is
/// given, a warning is emitted.
fn process_symbol_maps<'a>(
difs: Vec<DifMatch<'a>>,
symbol_map: Option<&Path>,
) -> Result<Vec<DifMatch<'a>>, Error> {
let (with_hidden, mut without_hidden): (Vec<_>, _) =
difs.into_iter().partition(DifMatch::needs_symbol_map);
if with_hidden.is_empty() {
return Ok(without_hidden);
}
let symbol_map = match symbol_map {
Some(path) => path,
_ => {
println!(
"{} {}: Found {} symbol files with hidden symbols (need BCSymbolMaps)",
style(">").dim(),
style("Warning").red(),
style(with_hidden.len()).yellow()
);
without_hidden.extend(with_hidden);
return Ok(without_hidden);
}
};
let len = with_hidden.len();
let progress_style = ProgressStyle::default_bar().template(
"{prefix:.dim} Resolving BCSymbolMaps... {msg:.dim}\
\n{wide_bar} {pos}/{len}",
);
let progress = ProgressBar::new(len as u64);
progress.set_style(progress_style);
progress.set_prefix(">");
for dif in with_hidden {
progress.inc(1);
progress.set_message(dif.path());
without_hidden.push(resolve_hidden_symbols(dif, symbol_map)?);
}
progress.finish_and_clear();
println!(
"{} Resolved BCSymbolMaps for {} debug information {}",
style(">").dim(),
style(len).yellow(),
match len {
1 => "file",
_ => "files",
}
);
Ok(without_hidden)
}
/// Default filter function to skip over bad sources we do not want to include.
pub fn filter_bad_sources(entry: &FileEntry) -> bool {
if entry.name_str().ends_with(".pch") {
// always ignore pch files
false
} else if let Ok(meta) = fs::metadata(&entry.abs_path_str()) {
// ignore files larger than 1MB
meta.len() < 1_000_000
} else {
// if a file metadata could not be read it will be skipped later.
true
}
}
/// Resolves BCSymbolMaps for all debug files with hidden symbols. All other
/// files are not touched. Note that this only applies to Apple dSYMs.
///
/// If there are debug files with hidden symbols but no `symbol_map` path is
/// given, a warning is emitted.
fn create_source_bundles<'a>(difs: &[DifMatch<'a>]) -> Result<Vec<DifMatch<'a>>, Error> {
let mut source_bundles = Vec::new();
let progress_style = ProgressStyle::default_bar().template(
"{prefix:.dim} Resolving source code... {msg:.dim}\
\n{wide_bar} {pos}/{len}",
);
let progress = ProgressBar::new(difs.len() as u64);
progress.set_style(progress_style);
progress.set_prefix(">");
for dif in difs {
progress.inc(1);
progress.set_message(dif.path());
let object = dif.object();
if object.has_sources() {
// Do not create standalone source bundles if the original object already contains
// source code. This would just store duplicate information in Sentry.
continue;
}
let temp_file = TempFile::create()?;
let writer = SourceBundleWriter::start(BufWriter::new(temp_file.open()?))?;
// Resolve source files from the object and write their contents into the archive. Skip to
// upload this bundle if no source could be written. This can happen if there is no file or
// line information in the object file, or if none of the files could be resolved.
let written =
writer.write_object_with_filter(object, dif.file_name(), filter_bad_sources)?;
if !written {
continue;
}
let mut source_bundle = DifMatch::from_temp(temp_file, dif.path())?;
source_bundle.debug_id = dif.debug_id;
source_bundles.push(source_bundle);
}
let len = source_bundles.len();
progress.finish_and_clear();
println!(
"{} Resolved source code for {} debug information {}",
style(">").dim(),
style(len).yellow(),
match len {
1 => "file",
_ => "files",
}
);
Ok(source_bundles)
}
/// Calls the assemble endpoint and returns the state for every `DifMatch` along
/// with info on missing chunks.
///
/// The returned value contains separate vectors for incomplete DIFs and
/// missing chunks for convenience.
fn try_assemble_difs<'data, 'm>(
difs: &'m [ChunkedDifMatch<'data>],
options: &DifUpload,
) -> Result<MissingDifsInfo<'data, 'm>, Error> {
let api = Api::current();
let request = difs.iter().map(ChunkedDifMatch::to_assemble).collect();
let response = api.assemble_difs(&options.org, &options.project, &request)?;
// We map all DIFs by their checksum, so we can access them faster when
// iterating through the server response below. Since the caller will invoke
// this function multiple times (most likely twice), this operation is
// performed twice with the same data. While this is redundant, it is also
// fast enough and keeping it here makes the `try_assemble_difs` interface
// nicer.
let difs_by_checksum = difs
.iter()
.map(|m| (m.checksum, m))
.collect::<BTreeMap<_, _>>();
let mut difs = Vec::new();
let mut chunks = Vec::new();
for (checksum, ref file_response) in response {
let chunked_match = *difs_by_checksum
.get(&checksum)
.ok_or_else(|| err_msg("Server returned unexpected checksum"))?;
match file_response.state {
ChunkedFileState::Error => {
// One of the files could not be uploaded properly and resulted
// in an error. We include this file in the return value so that
// it shows up in the final report.
difs.push(chunked_match);
}
ChunkedFileState::Assembling => {
// This file is currently assembling. The caller will have to poll this file later
// until it either resolves or errors.
difs.push(chunked_match);
}
ChunkedFileState::NotFound => {
// Assembling for one of the files has not started because some
// (or all) of its chunks have not been found. We report its
// missing chunks to the caller and then continue. The caller
// will have to call `try_assemble_difs` again after uploading
// them.
let mut missing_chunks = chunked_match
.chunks()
.filter(|&Chunk((c, _))| file_response.missing_chunks.contains(&c))
.peekable();
// Usually every file that is NotFound should also contain a set
// of missing chunks. However, if we tried to upload an empty
// file or the server returns an invalid response, we need to
// make sure that this match is not included in the missing
// difs.
if missing_chunks.peek().is_some() {
difs.push(chunked_match);
}
chunks.extend(missing_chunks);
}
_ => {
// This file has already finished. No action required anymore.
}
}
}
Ok((difs, chunks))
}
/// Concurrently uploads chunks specified in `missing_info` in batches. The
/// batch size and number of concurrent requests is controlled by
/// `chunk_options`.
///
/// This function blocks until all chunks have been uploaded.
fn upload_missing_chunks(
missing_info: &MissingDifsInfo<'_, '_>,
chunk_options: &ChunkUploadOptions,
) -> Result<(), Error> {
let &(ref difs, ref chunks) = missing_info;
// Chunks might be empty if errors occurred in a previous upload. We do
// not need to render a progress bar or perform an upload in this case.
if chunks.is_empty() {
return Ok(());
}
let progress_style = ProgressStyle::default_bar().template(&format!(
"{} Uploading {} missing debug information file{}...\
\n{{wide_bar}} {{bytes}}/{{total_bytes}} ({{eta}})",
style(">").dim(),
style(difs.len().to_string()).yellow(),
if difs.len() == 1 { "" } else { "s" }
));
upload_chunks(chunks, chunk_options, progress_style)?;
println!(
"{} Uploaded {} missing debug information {}",
style(">").dim(),
style(difs.len().to_string()).yellow(),
match difs.len() {
1 => "file",
_ => "files",
}
);
Ok(())
}
/// Renders the given detail string to the command line. If the `detail` is
/// either missing or empty, the optional fallback will be used.
fn render_detail(detail: &Option<String>, fallback: Option<&str>) {
let mut string = match *detail {
Some(ref string) => string.as_str(),
None => "",
};
if string.is_empty() {
if let Some(fallback) = fallback {
string = fallback;
}
}
for line in string.lines() {
if !line.is_empty() {
println!(" {}", style(line).dim());
}
}
}
/// Polls the assemble endpoint until all DIFs have either completed or errored. Returns a list of
/// `DebugInfoFile`s that have been created successfully and also prints a summary to the user.
///
/// This function assumes that all chunks have been uploaded successfully. If there are still
/// missing chunks in the assemble response, this likely indicates a bug in the server.
fn poll_dif_assemble(
difs: &[&ChunkedDifMatch<'_>],
options: &DifUpload,
) -> Result<(Vec<DebugInfoFile>, bool), Error> {
let progress_style = ProgressStyle::default_bar().template(
"{prefix:.dim} Processing files...\
\n{wide_bar} {pos}/{len}",
);
let api = Api::current();
let progress = ProgressBar::new(difs.len() as u64);
progress.set_style(progress_style);
progress.set_prefix(">");
let request = difs.iter().map(|d| d.to_assemble()).collect();
let response = loop {
let response = api.assemble_difs(&options.org, &options.project, &request)?;
let chunks_missing = response
.values()
.any(|r| r.state == ChunkedFileState::NotFound);
if chunks_missing {
return Err(err_msg(
"Some uploaded files are now missing on the server. Please retry by running \
`sentry-cli upload-dif` again. If this problem persists, please report a bug.",
));
}
// Poll until there is a response, unless the user has specified to skip polling. In
// that case, we return the potentially partial response from the server. This might
// still contain a cached error.
if !options.wait {
break response;
}
let pending = response
.iter()
.filter(|&(_, r)| r.state.is_pending())
.count();
progress.set_position((difs.len() - pending) as u64);
if pending == 0 {
break response;
}
thread::sleep(ASSEMBLE_POLL_INTERVAL);
};
progress.finish_and_clear();
if response.values().any(|r| r.state.is_pending()) {
println!("{} File upload complete:\n", style(">").dim());
} else {
println!("{} File processing complete:\n", style(">").dim());
}
let (mut successes, errors): (Vec<_>, _) = response
.into_iter()
.partition(|&(_, ref r)| !r.state.is_err());
// Print a summary of all successes first, so that errors show up at the
// bottom for the user
successes.sort_by_key(|&(_, ref success)| {
success
.dif
.as_ref()
.map(|x| x.object_name.as_str())
.unwrap_or("")
.to_owned()
});
let difs_by_checksum: BTreeMap<_, _> = difs.iter().map(|m| (m.checksum, m)).collect();
for &(checksum, ref success) in &successes {
// Silently skip all OK entries without a "dif" record since the server
// will always return one.
if let Some(ref dif) = success.dif {
// Files that have completed processing will contain a `dif` record
// returned by the server. Use this to show detailed information.
println!(
" {:>7} {} ({}; {}{})",
style("OK").green(),
style(&dif.id()).dim(),
dif.object_name,
dif.cpu_name,
dif.data
.kind
.map(|c| format!(" {:#}", c))
.unwrap_or_default()
);
render_detail(&success.detail, None);
} else if let Some(dif) = difs_by_checksum.get(&checksum) {
// If we skip waiting for the server to finish processing, there
// are pending entries. We only expect results that have been
// uploaded in the first place, so we can skip everything else.
let object = dif.object.get();
let kind = match object.kind() {
symbolic::debuginfo::ObjectKind::None => String::new(),
k => format!(" {:#}", k),
};
println!(
" {:>7} {} ({}; {}{})",
style("PENDING").yellow(),
style(object.debug_id()).dim(),
dif.name,
object.arch().name(),
kind,
);
}
// All other entries will be in the `errors` list.
}
// Print a summary of all errors at the bottom.
let mut errored = vec![];
for (checksum, error) in errors {
let dif = difs_by_checksum
.get(&checksum)
.ok_or_else(|| err_msg("Server returned unexpected checksum"))?;
errored.push((dif, error));
}
errored.sort_by_key(|x| x.0.file_name());
let has_errors = !errored.is_empty();
for (dif, error) in errored {
let fallback = match error.state {
ChunkedFileState::NotFound => Some("The file could not be saved"),
_ => Some("An unknown error occurred"),
};
println!(" {:>7} {}", style("ERROR").red(), dif.file_name());
render_detail(&error.detail, fallback);
}
// Return only successful uploads
Ok((
successes.into_iter().filter_map(|(_, r)| r.dif).collect(),
has_errors,
))
}
/// Uploads debug info files using the chunk-upload endpoint.
fn upload_difs_chunked(
options: &DifUpload,
chunk_options: &ChunkUploadOptions,
) -> Result<(Vec<DebugInfoFile>, bool), Error> {
// Search for debug files in the file system and ZIPs
let found = search_difs(options)?;
if found.is_empty() {
println!("{} No debug information files found", style(">").dim());
return Ok(Default::default());
}
// Try to resolve BCSymbolMaps
let symbol_map = options.symbol_map.as_deref();
let mut processed = process_symbol_maps(found, symbol_map)?;
// Resolve source code context if specified
if options.include_sources {
let source_bundles = create_source_bundles(&processed)?;
processed.extend(source_bundles);
}
// Calculate checksums and chunks
let chunked = prepare_difs(processed, |m| {
ChunkedDifMatch::from(m, chunk_options.chunk_size)
})?;
// Upload missing chunks to the server and remember incomplete difs
let missing_info = try_assemble_difs(&chunked, options)?;
upload_missing_chunks(&missing_info, chunk_options)?;
// Only if DIFs were missing, poll until assembling is complete
let (missing_difs, _) = missing_info;
if !missing_difs.is_empty() {
poll_dif_assemble(&missing_difs, options)
} else {
println!(
"{} Nothing to upload, all files are on the server",
style(">").dim()
);
Ok((Default::default(), false))
}
}
/// Returns debug files missing on the server.
fn get_missing_difs<'data>(
objects: Vec<HashedDifMatch<'data>>,
options: &DifUpload,
) -> Result<Vec<HashedDifMatch<'data>>, Error> {
info!(
"Checking for missing debug information files: {:#?}",
&objects
);
let api = Api::current();
let missing_checksums = {
let checksums = objects.iter().map(HashedDifMatch::checksum);
api.find_missing_dif_checksums(&options.org, &options.project, checksums)?
};
let missing = objects
.into_iter()
.filter(|sym| missing_checksums.contains(&sym.checksum()))
.collect();
info!("Missing debug information files: {:#?}", &missing);
Ok(missing)
}
/// Compresses the given batch into a ZIP archive.
fn create_batch_archive(difs: &[HashedDifMatch<'_>]) -> Result<TempFile, Error> {
let total_bytes = difs.iter().map(ItemSize::size).sum();
let pb = make_byte_progress_bar(total_bytes);
let tf = TempFile::create()?;
{
let mut zip = ZipWriter::new(tf.open()?);
for symbol in difs {
zip.start_file(symbol.file_name(), FileOptions::default())?;
copy_with_progress(&pb, &mut symbol.data(), &mut zip)?;
}
}
pb.finish_and_clear();
Ok(tf)
}
/// Uploads the given DIFs to the server in batched ZIP archives.
fn upload_in_batches(
objects: &[HashedDifMatch<'_>],
options: &DifUpload,
) -> Result<Vec<DebugInfoFile>, Error> {
let api = Api::current();
let max_size = Config::current().get_max_dif_archive_size()?;
let mut dsyms = Vec::new();
for (i, (batch, _)) in objects.batches(max_size, MAX_CHUNKS).enumerate() {
println!("\n{}", style(format!("Batch {}", i + 1)).bold());
println!(
"{} Compressing {} debug symbol files",
style(">").dim(),
style(batch.len()).yellow()
);
let archive = create_batch_archive(&batch)?;
println!("{} Uploading debug symbol files", style(">").dim());
dsyms.extend(api.upload_dif_archive(&options.org, &options.project, archive.path())?);
}
Ok(dsyms)
}
/// Uploads debug info files using the legacy endpoint.
fn upload_difs_batched(options: &DifUpload) -> Result<Vec<DebugInfoFile>, Error> {
// Search for debug files in the file system and ZIPs
let found = search_difs(options)?;
if found.is_empty() {
println!("{} No debug information files found", style(">").dim());
return Ok(Default::default());
}
// Try to resolve BCSymbolMaps
let symbol_map = options.symbol_map.as_deref();
let processed = process_symbol_maps(found, symbol_map)?;
// Calculate checksums
let hashed = prepare_difs(processed, HashedDifMatch::from)?;
// Check which files are missing on the server
let missing = get_missing_difs(hashed, options)?;
if missing.is_empty() {
println!(
"{} Nothing to upload, all files are on the server",
style(">").dim()
);
println!("{} Nothing to upload", style(">").dim());
return Ok(Default::default());
}
// Upload missing DIFs in batches
let uploaded = upload_in_batches(&missing, options)?;
if !uploaded.is_empty() {
println!("{} File upload complete:\n", style(">").dim());
for dif in &uploaded {
println!(
" {} ({}; {})",
style(&dif.id()).dim(),
&dif.object_name,
dif.cpu_name
);
}
}
Ok(uploaded)
}
/// Searches, processes and uploads debug information files (DIFs).
///
/// This struct is created with the `DifUpload::new` function. Then, set
/// search parameters and start the upload via `DifUpload::upload`.
///
/// ```
/// use utils::dif_upload::DifUpload;
///
/// DifUpload::new("org".into(), "project".into())
/// .search_path(".")
/// .upload()?;
/// ```
///
/// The upload tries to perform a chunked upload by requesting the new
/// `chunk-upload/` endpoint. If chunk uploads are disabled or the server does
/// not support them yet, it falls back to the legacy `files/dsyms/` endpoint.
///
/// The uploader will walk the given `paths` in the file system recursively and
/// search for DIFs. If `allow_zips` is not deactivated, it will also open ZIP
/// files and search there.
///
/// By default, all supported object files will be included. To customize this,
/// use the `filter_id`, `filter_kind`, `filter_class` and `filter_extension`
/// methods.
///
/// If `symbol_map` is set and Apple dSYMs with hidden symbols are found, the
/// uploader will first try to locate BCSymbolMaps and generate new dSYMs with
/// resolved symbols.
#[derive(Debug, Default)]
pub struct DifUpload {
org: String,
project: String,
paths: Vec<PathBuf>,
ids: BTreeSet<DebugId>,
formats: BTreeSet<FileFormat>,
features: DifFeatures,
extensions: BTreeSet<OsString>,
symbol_map: Option<PathBuf>,
zips_allowed: bool,
max_file_size: u64,
pdbs_allowed: bool,
sources_allowed: bool,
include_sources: bool,
wait: bool,
}
impl DifUpload {
/// Creates a new `DifUpload` with default parameters.
///
/// To use it, also add paths using `DifUpload::search_path`. It will scan
/// the paths and contained ZIPs for all supported object files and upload
/// them.
///
/// Use `DifUpload::symbol_map` to configure a location of BCSymbolMap files
/// to resolve hidden symbols in dSYMs obtained from iTunes Connect.
///
/// ```
/// use utils::dif_upload::DifUpload;
///
/// DifUpload::new("org", "project")
/// .search_path(".")
/// .upload()?;
/// ```
pub fn new(org: String, project: String) -> Self {
DifUpload {
org,
project,
paths: Vec::new(),
ids: BTreeSet::new(),
formats: BTreeSet::new(),
features: DifFeatures::all(),
extensions: BTreeSet::new(),
symbol_map: None,
zips_allowed: true,
max_file_size: DEFAULT_MAX_DIF_SIZE,
pdbs_allowed: false,
sources_allowed: false,
include_sources: false,
wait: false,
}
}
/// Adds a path to search for debug information files.
pub fn search_path<P>(&mut self, path: P) -> &mut Self
where
P: Into<PathBuf>,
{
self.paths.push(path.into());
self
}
/// Adds paths to search for debug information files.
pub fn search_paths<I>(&mut self, paths: I) -> &mut Self
where
I: IntoIterator,
I::Item: Into<PathBuf>,
{
for path in paths {
self.paths.push(path.into())
}
self
}
/// Add a `DebugId` to filter for.
///
/// By default, all DebugIds will be included.
pub fn filter_id<I>(&mut self, id: I) -> &mut Self
where
I: Into<DebugId>,
{
self.ids.insert(id.into());
self
}
/// Add `DebugId`s to filter for.
///
/// By default, all DebugIds will be included. If `ids` is empty, this will
/// not be changed.
pub fn filter_ids<I>(&mut self, ids: I) -> &mut Self
where
I: IntoIterator,
I::Item: Into<DebugId>,
{
for id in ids {
self.ids.insert(id.into());
}
self
}
/// Add an `FileFormat` to filter for.
///
/// By default, all object formats will be included.
pub fn filter_format(&mut self, format: FileFormat) -> &mut Self {
self.formats.insert(format);
self
}
/// Add `FileFormat`s to filter for.
///
/// By default, all object formats will be included. If `formats` is empty, this
/// will not be changed.
pub fn filter_formats<I>(&mut self, formats: I) -> &mut Self
where
I: IntoIterator<Item = FileFormat>,
{
self.formats.extend(formats);
self
}
/// Add an `ObjectFeature` to filter for.
///
/// By default, all object features will be included.
pub fn filter_features(&mut self, features: DifFeatures) -> &mut Self {
self.features = features;
self
}
/// Add a file extension to filter for.
///
/// By default, all file extensions will be included.
pub fn filter_extension<S>(&mut self, extension: S) -> &mut Self
where
S: Into<OsString>,
{
self.extensions.insert(extension.into());
self
}
/// Add a file extension to filter for.
///
/// By default, all file extensions will be included.
pub fn filter_extensions<I>(&mut self, extensions: I) -> &mut Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
for extension in extensions {
self.extensions.insert(extension.into());
}
self
}
/// Set a path containing BCSymbolMaps to resolve hidden symbols in dSYMs
/// obtained from iTunes Connect. This requires the `dsymutil` command.
///
/// By default, hidden symbol resolution will be skipped.
pub fn symbol_map<P>(&mut self, path: P) -> Result<&mut Self, Error>
where
P: Into<PathBuf>,
{
which("dsymutil").map_err(|_| err_msg("Command `dsymutil` not found"))?;
self.symbol_map = Some(path.into());
Ok(self)
}
/// Set whether opening and searching ZIPs for debug information files is
/// allowed or not.
///
/// Defaults to `true`.
pub fn allow_zips(&mut self, allow: bool) -> &mut Self {
self.zips_allowed = allow;
self
}
/// Set whether source files should be resolved during the scan process and
/// uploaded as a separate archive.
///
/// Defaults to `false`.
pub fn include_sources(&mut self, include: bool) -> &mut Self {
self.include_sources = include;
self
}
/// Set whether the upload should wait for the server to complete processing
/// files or exit immediately after the upload.
///
/// Defaults to `false`.
pub fn wait(&mut self, wait: bool) -> &mut Self {
self.wait = wait;
self
}
/// Performs the search for DIFs and uploads them.
///
/// ```
/// use utils::dif_upload::DifUpload;
///
/// DifUpload::new("org", "project")
/// .search_path(".")
/// .upload()?;
/// ```
///
/// The okay part of the return value is `(files, has_errors)`. The
/// latter can be used to indicate a fail state from the upload.
pub fn upload(&mut self) -> Result<(Vec<DebugInfoFile>, bool), Error> {
if self.paths.is_empty() {
println!("{}: No paths were provided.", style("Warning").yellow());
return Ok(Default::default());
}
let api = Api::current();
if let Some(ref chunk_options) = api.get_chunk_upload_options(&self.org)? {
if chunk_options.max_file_size > 0 {
self.max_file_size = chunk_options.max_file_size;
}
self.pdbs_allowed = chunk_options.supports(ChunkUploadCapability::Pdbs);
self.sources_allowed = chunk_options.supports(ChunkUploadCapability::Sources);
if chunk_options.supports(ChunkUploadCapability::DebugFiles) {
self.validate_capabilities();
return upload_difs_chunked(self, chunk_options);
}
}
self.validate_capabilities();
Ok((upload_difs_batched(self)?, false))
}
/// Validate that the server supports all requested capabilities.
fn validate_capabilities(&mut self) {
// Checks whether source bundles are *explicitly* requested on the command line.
if (self.formats.contains(&FileFormat::SourceBundle) || self.include_sources)
&& !self.sources_allowed
{
warn!("Source uploads are not supported by the configured Sentry server");
self.include_sources = false;
}
// Checks whether PDBs or PEs were *explicitly* requested on the command line.
if (self.formats.contains(&FileFormat::Pdb) || self.formats.contains(&FileFormat::Pe))
&& !self.pdbs_allowed
{
warn!("PDBs and PEs are not supported by the configured Sentry server");
// This is validated additionally in .valid_format()
}
}
/// Determines if this `DebugId` matches the search criteria.
fn valid_id(&self, id: DebugId) -> bool {
self.ids.is_empty() || self.ids.contains(&id)
}
/// Determines if this file extension matches the search criteria.
fn valid_extension(&self, ext: Option<&OsStr>) -> bool {
self.extensions.is_empty() || ext.map_or(false, |e| self.extensions.contains(e))
}
/// Determines if this `FileFormat` matches the search criteria.
fn valid_format(&self, format: FileFormat) -> bool {
match format {
FileFormat::Unknown => false,
FileFormat::Pdb | FileFormat::Pe if !self.pdbs_allowed => false,
FileFormat::SourceBundle if !self.sources_allowed => false,
format => self.formats.is_empty() || self.formats.contains(&format),
}
}
/// Determines if the given `Object` matches the features search criteria.
fn valid_features(&self, object: &Object<'_>) -> bool {
self.features.symtab && object.has_symbols()
|| self.features.debug && object.has_debug_info()
|| self.features.unwind && object.has_unwind_info()
|| self.features.sources && object.has_sources()
}
}
| 33.835511 | 100 | 0.586374 |
cc1d37493e790e98a4766c5a6c707aa0a62a7499
| 5,796 |
use std::{borrow::Cow, future::Future};
use hyper::upgrade::OnUpgrade;
use tokio_tungstenite::tungstenite::protocol::Role;
use super::{utils::sign, WebSocketStream};
use crate::{
error::ErrorBodyHasBeenTaken,
http::{
header::{self, HeaderValue},
Method, StatusCode,
},
Body, Error, FromRequest, IntoResponse, Request, Response, Result,
};
/// An extractor that can accept websocket connections.
pub struct WebSocket {
key: HeaderValue,
on_upgrade: OnUpgrade,
protocols: Option<Box<[Cow<'static, str>]>>,
sec_websocket_protocol: Option<HeaderValue>,
}
#[async_trait::async_trait]
impl<'a> FromRequest<'a> for WebSocket {
async fn from_request(req: &'a Request, body: &mut Option<Body>) -> Result<Self> {
if req.method() != Method::GET
|| req.headers().get(header::CONNECTION) == Some(&HeaderValue::from_static("upgrade"))
|| req.headers().get(header::UPGRADE) == Some(&HeaderValue::from_static("websocket"))
|| req.headers().get(header::SEC_WEBSOCKET_VERSION)
== Some(&HeaderValue::from_static("13"))
{
return Err(Error::bad_request(anyhow::anyhow!("bad request")));
}
let key = req
.headers()
.get(header::SEC_WEBSOCKET_KEY)
.cloned()
.ok_or_else(|| Error::bad_request(anyhow::anyhow!("bad request")))?;
let sec_websocket_protocol = req.headers().get(header::SEC_WEBSOCKET_PROTOCOL).cloned();
let hyper_req = {
let mut hyper_req = hyper::Request::default();
*hyper_req.method_mut() = req.method().clone();
*hyper_req.uri_mut() = req.uri().clone();
*hyper_req.version_mut() = req.version();
*hyper_req.headers_mut() = req.headers().clone();
*hyper_req.body_mut() = body.take().ok_or(ErrorBodyHasBeenTaken)?.0;
hyper_req
};
let on_upgrade = hyper::upgrade::on(hyper_req);
Ok(Self {
key,
on_upgrade,
protocols: None,
sec_websocket_protocol,
})
}
}
impl WebSocket {
/// Set the known protocols.
///
/// If the protocol name specified by `Sec-WebSocket-Protocol` header
/// to match any of them, the upgrade response will include
/// `Sec-WebSocket-Protocol` header and return the protocol name.
///
/// ```
/// use futures_util::{SinkExt, StreamExt};
/// use poem::{get, handler, route, web::websocket::WebSocket, IntoResponse};
///
/// #[handler]
/// async fn index(ws: WebSocket) -> impl IntoResponse {
/// ws.protocols(vec!["graphql-rs", "graphql-transport-ws"])
/// .on_upgrade(|socket| async move {
/// // ...
/// })
/// }
///
/// let app = route().at("/", get(index));
/// ```
#[must_use]
pub fn protocols<I>(mut self, protocols: I) -> Self
where
I: IntoIterator,
I::Item: Into<Cow<'static, str>>,
{
self.protocols = Some(
protocols
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
);
self
}
/// Finalize upgrading the connection and call the provided `callback` with
/// the stream.
///
/// Note that the return value of this function must be returned from the
/// handler.
#[must_use]
pub fn on_upgrade<F, Fut>(self, callback: F) -> impl IntoResponse
where
F: Fn(WebSocketStream) -> Fut + Send + Sync + 'static,
Fut: Future + Send + 'static,
{
WebSocketUpgraded {
websocket: self,
callback,
}
}
}
struct WebSocketUpgraded<F> {
websocket: WebSocket,
callback: F,
}
impl<F, Fut> IntoResponse for WebSocketUpgraded<F>
where
F: Fn(WebSocketStream) -> Fut + Send + Sync + 'static,
Fut: Future + Send + 'static,
{
fn into_response(self) -> Result<Response> {
// check requested protocols
let protocol = self
.websocket
.sec_websocket_protocol
.as_ref()
.and_then(|req_protocols| {
let req_protocols = req_protocols.to_str().ok()?;
let protocols = self.websocket.protocols.as_ref()?;
req_protocols
.split(',')
.map(|req_p| req_p.trim())
.find(|req_p| protocols.iter().any(|p| p == req_p))
});
let protocol = match protocol {
Some(protocol) => Some(
protocol
.parse::<HeaderValue>()
.map_err(|_| Error::bad_request(anyhow::anyhow!("bad request")))?,
),
None => None,
};
let mut builder = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
.header(header::CONNECTION, "upgrade")
.header(header::UPGRADE, "websocket")
.header(
header::SEC_WEBSOCKET_ACCEPT,
sign(self.websocket.key.as_bytes()),
);
if let Some(protocol) = protocol {
builder = builder.header(header::SEC_WEBSOCKET_PROTOCOL, protocol);
}
let resp = builder.body(Body::empty())?;
tokio::spawn(async move {
let upgraded = match self.websocket.on_upgrade.await {
Ok(upgraded) => upgraded,
Err(_) => return,
};
let stream =
tokio_tungstenite::WebSocketStream::from_raw_socket(upgraded, Role::Server, None)
.await;
(self.callback)(WebSocketStream::new(stream)).await;
});
Ok(resp)
}
}
| 31.32973 | 98 | 0.545204 |
386d2042fb6e9abfd6bc0bbbe61168db106c1c60
| 3,137 |
#![cfg_attr(not(feature = "std"), no_std)]
use band_bridge;
#[cfg(feature = "std")]
use borsh::BorshDeserialize;
use frame_support::{decl_error, decl_event, decl_module, decl_storage, dispatch};
use frame_system::{self as system};
use sp_std::prelude::*;
/// The pallet's configuration trait.
pub trait Trait: system::Trait + band_bridge::Trait {
// Add other types and constants required to configure this pallet.
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
// This pallet's storage items.
decl_storage! {
trait Store for Module<T: Trait> as SimplePriceDB {
BlockIDToPrice get(fn simple_map): map hasher(blake2_128_concat) T::BlockNumber => u64;
}
}
// The pallet's events
decl_event!(
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
BlockNumber = <T as system::Trait>::BlockNumber,
{
SetPriceAtBlock(u64, BlockNumber),
DecodeFail(AccountId),
}
);
// The pallet's errors
decl_error! {
pub enum Error for Module<T: Trait> {
/// Value was None
BorshDecodeFail,
/// Value reached maximum and cannot be incremented further
StorageOverflow,
///
VerificationFail,
}
}
// Define struct Price
#[cfg(feature = "std")]
#[derive(BorshDeserialize)]
struct Price {
px: u64,
}
// The pallet's dispatchable functions.
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initializing errors
// this includes information about your errors in the node's metadata.
// it is needed only if you are using errors in your pallet
type Error = Error<T>;
// Initializing events
// this is needed only if you are using events in your pallet
fn deposit_event() = default;
#[weight = frame_support::weights::SimpleDispatchInfo::default()]
pub fn set_price(_origin, proof_data: Vec<u8>) -> dispatch::DispatchResult {
// Call Bridge contract to verify proof
let res_opt = <band_bridge::Module<T>>::verify_proof(proof_data.clone());
match res_opt {
Some(res) => {
#[cfg(feature = "std")]
let price = Price::try_from_slice(&res).map_err(|_| Error::<T>::BorshDecodeFail)?;
// Call the `system` pallet to get the current block number
let current_block = <system::Module<T>>::block_number();
// Update key-value
#[cfg(feature = "std")]
<BlockIDToPrice<T>>::insert(
¤t_block,
&price.px
);
// Here we are raising the SetPriceAtBlock event
#[cfg(feature = "std")]
Self::deposit_event(RawEvent::SetPriceAtBlock(price.px, current_block));
Ok(())
},
None => Err(Error::<T>::VerificationFail)?,
}
}
}
}
| 31.37 | 102 | 0.583041 |
e613745b2e2c6578495ad46faa8ab29d3adc4b5e
| 224 |
use encoding_rs::SHIFT_JIS;
pub fn to_utf8_string(v: &[u8]) -> String {
let decoded = SHIFT_JIS.decode(v);
if !decoded.2 {
return decoded.0.to_string();
}
String::from_utf8(v.to_owned()).unwrap()
}
| 20.363636 | 44 | 0.625 |
08ca8c55e4ec6e3c2c5e17f4da7d2dd85c197d4b
| 5,165 |
// بِسْمِ اللَّهِ الرَّحْمَنِ الرَّحِيم
// This file is part of Setheum.
// Copyright (C) 2019-2021 Setheum Labs.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Mocks for the evm-accounts module.
#![cfg(test)]
use super::*;
use frame_support::{construct_runtime, parameter_types};
use orml_traits::parameter_type_with_key;
use primitives::{Amount, Balance, CurrencyId, TokenSymbol};
use sp_core::{crypto::AccountId32, H256};
use sp_io::hashing::keccak_256;
use sp_runtime::{testing::Header, traits::IdentityLookup};
pub type AccountId = AccountId32;
pub type BlockNumber = u64;
pub const ALICE: AccountId = AccountId32::new([0u8; 32]);
pub const BOB: AccountId = AccountId32::new([1u8; 32]);
mod evm_accounts {
pub use super::super::*;
}
parameter_types! {
pub const BlockHashCount: u64 = 250;
}
impl frame_system::Config for Runtime {
type Origin = Origin;
type Index = u64;
type BlockNumber = BlockNumber;
type Call = Call;
type Hash = H256;
type Hashing = ::sp_runtime::traits::BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type BlockWeights = ();
type BlockLength = ();
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type DbWeight = ();
type BaseCallFilter = ();
type SystemWeightInfo = ();
type SS58Prefix = ();
type OnSetCode = ();
}
parameter_types! {
pub const ExistentialDeposit: u64 = 1;
}
impl pallet_balances::Config for Runtime {
type Balance = Balance;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = frame_system::Pallet<Runtime>;
type MaxLocks = ();
type MaxReserves = ();
type ReserveIdentifier = [u8; 8];
type WeightInfo = ();
}
parameter_type_with_key! {
pub ExistentialDeposits: |_currency_id: CurrencyId| -> Balance {
Default::default()
};
}
impl orml_tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = CurrencyId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
type MaxLocks = ();
type DustRemovalWhitelist = ();
}
parameter_types! {
pub const GetNativeCurrencyId: CurrencyId = CurrencyId::Token(TokenSymbol::SETM);
}
impl orml_currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency = AdaptedBasicCurrency;
type GetNativeCurrencyId = GetNativeCurrencyId;
type WeightInfo = ();
}
pub type AdaptedBasicCurrency = orml_currencies::BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>;
impl Config for Runtime {
type Event = Event;
type Currency = Balances;
type AddressMapping = EvmAddressMapping<Runtime>;
type TransferAll = Currencies;
type WeightInfo = ();
}
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Storage, Config, Event<T>},
EvmAccountsModule: evm_accounts::{Pallet, Call, Storage, Event<T>},
Tokens: orml_tokens::{Pallet, Storage, Event<T>, Config<T>},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
Currencies: orml_currencies::{Pallet, Call, Event<T>},
}
);
pub struct ExtBuilder();
impl Default for ExtBuilder {
fn default() -> Self {
Self()
}
}
impl ExtBuilder {
pub fn build(self) -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Runtime>()
.unwrap();
pallet_balances::GenesisConfig::<Runtime> {
balances: vec![(bob_account_id(), 100000)],
}
.assimilate_storage(&mut t)
.unwrap();
let mut ext = sp_io::TestExternalities::new(t);
ext.execute_with(|| System::set_block_number(1));
ext
}
}
pub fn alice() -> secp256k1::SecretKey {
secp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap()
}
pub fn bob() -> secp256k1::SecretKey {
secp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap()
}
pub fn bob_account_id() -> AccountId {
let address = EvmAccountsModule::eth_address(&bob());
let mut data = [0u8; 32];
data[0..4].copy_from_slice(b"evm:");
data[4..24].copy_from_slice(&address[..]);
AccountId32::from(Into::<[u8; 32]>::into(data))
}
| 27.918919 | 110 | 0.722943 |
e9f10f500f444898c8c1dec923b4bba748b95d1b
| 3,818 |
use bio::io::bed;
use rust_htslib::bam::IndexedReader;
use std::sync::{Arc, Mutex};
use rust_htslib::prelude::*;
pub fn get_count_in_region(idxr: &Arc<Mutex<IndexedReader>>, rec: &bed::Record) -> u32 {
use rust_htslib::bam;
let chrom_as_bytes = rec.chrom().as_bytes();
let mut idxr = idxr.lock().unwrap();
let tid = idxr.header().tid(chrom_as_bytes).unwrap();
idxr.fetch(tid, rec.start() as u32, rec.end() as u32).unwrap();
let mut bam_rec = bam::Record::new();
let mut count = 0;
while let Ok(_r) = idxr.read(&mut bam_rec) {
let pos = if bam_rec.is_reverse() {
bam_rec.pos() - 4
} else {
bam_rec.pos() + 4 // bam is 0based beds, then plus 4 for shift
//TODO: check on left-rightness of bam records
//TODO: check if bam/bed zero base assumptions are correct
} as u32;
if pos >= rec.start() as u32 && pos <= rec.end() as u32 {
count += 1;
}
}
count as u32
}
pub fn counts(bed_path: &str, bams: &Vec<&str>, p: usize) {
//&Vec<&str>
use rayon::prelude::*;
use csv;
use rust_htslib::bam::IndexedReader;
use super::regions::expand_region;
use super::regions;
use indicatif::{ProgressBar, ProgressStyle};
use rayon::ThreadPoolBuilder;
use std::io;
use bio::io::bed;
use ndarray::prelude::*;
let mut reader = bed::Reader::from_file(bed_path).unwrap();
// vector of regions with expanded coords
let recs: Vec<bed::Record> = reader.records()
.map(|a| a.unwrap())
.map(|a| expand_region(a, -5, 5)) // 5 both sides
.collect();
let n_row = recs.len() as u64;
let n_col = bams.len() as u64;
ThreadPoolBuilder::new().num_threads(p).build_global().unwrap();
//------------------------------------
// BASIC PARALLEL VERSION
/*let mut cuts_vec: Vec<u32> = Vec::new();
for bam in bams {
let pb = ProgressBar::new(n_row);
pb.set_style(ProgressStyle::default_bar()
.template("[{eta_precise}] {bar:40.red/blue} {pos:>7}/{len:7} {msg}")
.progress_chars("$$-"));
let idxr = Arc::new(Mutex::new(IndexedReader::from_path(bam).unwrap()));
let cuts: Vec<u32> = recs.par_iter()
.map(|a| { pb.inc(1); get_count_in_region(&idxr, &a)})
.collect();
cuts_vec.extend(cuts);
pb.finish_with_message("Cash Money!!");
}
let arr = Array::from_shape_vec((n_col as usize, n_row as usize), cuts_vec).unwrap()
.reversed_axes();*/
//------------------------------------
// MEGA PARALLEL
let pb = ProgressBar::new(n_row * n_col);
pb.set_style(ProgressStyle::default_bar()
.template("Counting transposition events... {bar:40.blue/red} {percent}% {msg}")
.progress_chars("##-"));
// TODO make this a flat matrix and elim the flattening below
let cuts_vec: Vec<Vec<u32>> = bams.par_iter().map(|bam| {
let idxr = Arc::new(Mutex::new(IndexedReader::from_path(bam).unwrap()));
let cuts: Vec<u32> = recs.par_iter()
.map(|a| { pb.inc(1); get_count_in_region(&idxr, &a)})
.collect();
cuts
}).collect();
pb.finish_with_message("Complete.");
let cuts_vec_flat: Vec<u32> = cuts_vec.iter()
.flat_map(|a| a.iter())
.cloned()
.collect();
let arr = Array::from_shape_vec((n_col as usize, n_row as usize), cuts_vec_flat).unwrap()
.reversed_axes();
//------------------------------------
let mut csv_header = vec!["region"];
csv_header.append(&mut bams.clone());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(&csv_header).unwrap();
for (i, c) in recs.iter().enumerate() {
wtr.write_field(regions::region_as_string(c)).unwrap();
//&recs[i]
wtr.serialize(arr.row(i).to_vec()).unwrap();
}
wtr.flush().unwrap();
}
| 26.331034 | 93 | 0.5901 |
e242c5f1bc809129dee1cb44fa91e7181bee1d9b
| 835 |
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ip.rsh"
// #pragma rs_fp_relaxed
static float bright = 1.2439024f;
/*
void setBright(float v) {
bright = 255.f / (255.f - v);
}
*/
float4 RS_KERNEL exposure(float4 in)
{
return in * bright;
}
| 25.30303 | 75 | 0.71018 |
91c1789a2ffb1b592da5b6c06132962fa074aed1
| 11,445 |
use crate::utils;
use crate::utils::paths;
use crate::utils::sugg::Sugg;
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind, Mutability, Param, Pat, PatKind, Path, PathSegment, QPath};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::{self, subst::GenericArgKind};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
declare_clippy_lint! {
/// **What it does:**
/// Detects uses of `Vec::sort_by` passing in a closure
/// which compares the two arguments, either directly or indirectly.
///
/// **Why is this bad?**
/// It is more clear to use `Vec::sort_by_key` (or `Vec::sort` if
/// possible) than to use `Vec::sort_by` and a more complicated
/// closure.
///
/// **Known problems:**
/// If the suggested `Vec::sort_by_key` uses Reverse and it isn't already
/// imported by a use statement, then it will need to be added manually.
///
/// **Example:**
///
/// ```rust
/// # struct A;
/// # impl A { fn foo(&self) {} }
/// # let mut vec: Vec<A> = Vec::new();
/// vec.sort_by(|a, b| a.foo().cmp(&b.foo()));
/// ```
/// Use instead:
/// ```rust
/// # struct A;
/// # impl A { fn foo(&self) {} }
/// # let mut vec: Vec<A> = Vec::new();
/// vec.sort_by_key(|a| a.foo());
/// ```
pub UNNECESSARY_SORT_BY,
complexity,
"Use of `Vec::sort_by` when `Vec::sort_by_key` or `Vec::sort` would be clearer"
}
declare_lint_pass!(UnnecessarySortBy => [UNNECESSARY_SORT_BY]);
enum LintTrigger {
Sort(SortDetection),
SortByKey(SortByKeyDetection),
}
struct SortDetection {
vec_name: String,
unstable: bool,
}
struct SortByKeyDetection {
vec_name: String,
closure_arg: String,
closure_body: String,
reverse: bool,
unstable: bool,
}
/// Detect if the two expressions are mirrored (identical, except one
/// contains a and the other replaces it with b)
fn mirrored_exprs(
cx: &LateContext<'_>,
a_expr: &Expr<'_>,
a_ident: &Ident,
b_expr: &Expr<'_>,
b_ident: &Ident,
) -> bool {
match (&a_expr.kind, &b_expr.kind) {
// Two boxes with mirrored contents
(ExprKind::Box(left_expr), ExprKind::Box(right_expr)) => {
mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident)
},
// Two arrays with mirrored contents
(ExprKind::Array(left_exprs), ExprKind::Array(right_exprs)) => left_exprs
.iter()
.zip(right_exprs.iter())
.all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident)),
// The two exprs are function calls.
// Check to see that the function itself and its arguments are mirrored
(ExprKind::Call(left_expr, left_args), ExprKind::Call(right_expr, right_args)) => {
mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident)
&& left_args
.iter()
.zip(right_args.iter())
.all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident))
},
// The two exprs are method calls.
// Check to see that the function is the same and the arguments are mirrored
// This is enough because the receiver of the method is listed in the arguments
(
ExprKind::MethodCall(left_segment, _, left_args, _),
ExprKind::MethodCall(right_segment, _, right_args, _),
) => {
left_segment.ident == right_segment.ident
&& left_args
.iter()
.zip(right_args.iter())
.all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident))
},
// Two tuples with mirrored contents
(ExprKind::Tup(left_exprs), ExprKind::Tup(right_exprs)) => left_exprs
.iter()
.zip(right_exprs.iter())
.all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident)),
// Two binary ops, which are the same operation and which have mirrored arguments
(ExprKind::Binary(left_op, left_left, left_right), ExprKind::Binary(right_op, right_left, right_right)) => {
left_op.node == right_op.node
&& mirrored_exprs(cx, left_left, a_ident, right_left, b_ident)
&& mirrored_exprs(cx, left_right, a_ident, right_right, b_ident)
},
// Two unary ops, which are the same operation and which have the same argument
(ExprKind::Unary(left_op, left_expr), ExprKind::Unary(right_op, right_expr)) => {
left_op == right_op && mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident)
},
// The two exprs are literals of some kind
(ExprKind::Lit(left_lit), ExprKind::Lit(right_lit)) => left_lit.node == right_lit.node,
(ExprKind::Cast(left, _), ExprKind::Cast(right, _)) => mirrored_exprs(cx, left, a_ident, right, b_ident),
(ExprKind::DropTemps(left_block), ExprKind::DropTemps(right_block)) => {
mirrored_exprs(cx, left_block, a_ident, right_block, b_ident)
},
(ExprKind::Field(left_expr, left_ident), ExprKind::Field(right_expr, right_ident)) => {
left_ident.name == right_ident.name && mirrored_exprs(cx, left_expr, a_ident, right_expr, right_ident)
},
// Two paths: either one is a and the other is b, or they're identical to each other
(
ExprKind::Path(QPath::Resolved(
_,
Path {
segments: left_segments,
..
},
)),
ExprKind::Path(QPath::Resolved(
_,
Path {
segments: right_segments,
..
},
)),
) => {
(left_segments
.iter()
.zip(right_segments.iter())
.all(|(left, right)| left.ident == right.ident)
&& left_segments
.iter()
.all(|seg| &seg.ident != a_ident && &seg.ident != b_ident))
|| (left_segments.len() == 1
&& &left_segments[0].ident == a_ident
&& right_segments.len() == 1
&& &right_segments[0].ident == b_ident)
},
// Matching expressions, but one or both is borrowed
(
ExprKind::AddrOf(left_kind, Mutability::Not, left_expr),
ExprKind::AddrOf(right_kind, Mutability::Not, right_expr),
) => left_kind == right_kind && mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident),
(_, ExprKind::AddrOf(_, Mutability::Not, right_expr)) => {
mirrored_exprs(cx, a_expr, a_ident, right_expr, b_ident)
},
(ExprKind::AddrOf(_, Mutability::Not, left_expr), _) => mirrored_exprs(cx, left_expr, a_ident, b_expr, b_ident),
_ => false,
}
}
fn detect_lint(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<LintTrigger> {
if_chain! {
if let ExprKind::MethodCall(name_ident, _, args, _) = &expr.kind;
if let name = name_ident.ident.name.to_ident_string();
if name == "sort_by" || name == "sort_unstable_by";
if let [vec, Expr { kind: ExprKind::Closure(_, _, closure_body_id, _, _), .. }] = args;
if utils::match_type(cx, &cx.tables().expr_ty(vec), &paths::VEC);
if let closure_body = cx.tcx.hir().body(*closure_body_id);
if let &[
Param { pat: Pat { kind: PatKind::Binding(_, _, left_ident, _), .. }, ..},
Param { pat: Pat { kind: PatKind::Binding(_, _, right_ident, _), .. }, .. }
] = &closure_body.params;
if let ExprKind::MethodCall(method_path, _, [ref left_expr, ref right_expr], _) = &closure_body.value.kind;
if method_path.ident.name.to_ident_string() == "cmp";
then {
let (closure_body, closure_arg, reverse) = if mirrored_exprs(
&cx,
&left_expr,
&left_ident,
&right_expr,
&right_ident
) {
(Sugg::hir(cx, &left_expr, "..").to_string(), left_ident.name.to_string(), false)
} else if mirrored_exprs(&cx, &left_expr, &right_ident, &right_expr, &left_ident) {
(Sugg::hir(cx, &left_expr, "..").to_string(), right_ident.name.to_string(), true)
} else {
return None;
};
let vec_name = Sugg::hir(cx, &args[0], "..").to_string();
let unstable = name == "sort_unstable_by";
if_chain! {
if let ExprKind::Path(QPath::Resolved(_, Path {
segments: [PathSegment { ident: left_name, .. }], ..
})) = &left_expr.kind;
if left_name == left_ident;
then {
return Some(LintTrigger::Sort(SortDetection { vec_name, unstable }))
} else {
if !key_returns_borrow(cx, left_expr) {
return Some(LintTrigger::SortByKey(SortByKeyDetection {
vec_name,
unstable,
closure_arg,
closure_body,
reverse
}))
}
}
}
}
}
None
}
fn key_returns_borrow(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
if let Some(def_id) = utils::fn_def_id(cx, expr) {
let output = cx.tcx.fn_sig(def_id).output();
let ty = output.skip_binder();
return matches!(ty.kind, ty::Ref(..))
|| ty.walk().any(|arg| matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
}
false
}
impl LateLintPass<'_> for UnnecessarySortBy {
fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
match detect_lint(cx, expr) {
Some(LintTrigger::SortByKey(trigger)) => utils::span_lint_and_sugg(
cx,
UNNECESSARY_SORT_BY,
expr.span,
"use Vec::sort_by_key here instead",
"try",
format!(
"{}.sort{}_by_key(|&{}| {})",
trigger.vec_name,
if trigger.unstable { "_unstable" } else { "" },
trigger.closure_arg,
if trigger.reverse {
format!("Reverse({})", trigger.closure_body)
} else {
trigger.closure_body.to_string()
},
),
if trigger.reverse {
Applicability::MaybeIncorrect
} else {
Applicability::MachineApplicable
},
),
Some(LintTrigger::Sort(trigger)) => utils::span_lint_and_sugg(
cx,
UNNECESSARY_SORT_BY,
expr.span,
"use Vec::sort here instead",
"try",
format!(
"{}.sort{}()",
trigger.vec_name,
if trigger.unstable { "_unstable" } else { "" },
),
Applicability::MachineApplicable,
),
None => {},
}
}
}
| 40.441696 | 120 | 0.533246 |
cc2ea3ac91323cc1f7d0ca6920d03af5bed22738
| 1,286 |
use crate::common::ast;
use crate::common::tld::tld_kind;
/// 宣言
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone)]
pub struct TopLevelDecl {
pub kind: tld_kind::TLDKind,
}
impl TopLevelDecl {
pub fn new(k: tld_kind::TLDKind) -> Self {
Self { kind: k }
}
pub fn new_alias(src_type: &str) -> Self {
Self::new(tld_kind::TLDKind::ALIAS {
src_type: src_type.to_string(),
})
}
pub fn new_const(type_name: &str, expr: String) -> Self {
Self::new(tld_kind::TLDKind::CONST {
type_name: type_name.to_string(),
expr,
})
}
pub fn new_function_from_ast(fn_ty: ast::FunctionTypeDef) -> Self {
Self::new(tld_kind::TLDKind::FN {
return_type: fn_ty.return_type,
args: fn_ty.args,
})
}
pub fn new_struct_from_ast(st_ty: ast::StructDef) -> Self {
Self::new(tld_kind::TLDKind::STRUCT {
members: st_ty.members,
})
}
pub fn new_enum(en_ty: ast::EnumDef) -> Self {
Self::new(tld_kind::TLDKind::ENUM {
variants: en_ty
.variants
.iter()
.map(|(name, variant)| (name.to_string(), variant.tag))
.collect(),
})
}
}
| 26.244898 | 71 | 0.540435 |
f8fa2c5f177ea937108c948b92025ea93d1af353
| 1,650 |
#![feature(const_raw_ptr_deref)]
#![feature(const_ptr_offset_from)]
#![feature(core_intrinsics)]
use std::intrinsics::ptr_offset_from;
#[repr(C)]
struct Struct {
data: u8,
field: u8,
}
pub const DIFFERENT_ALLOC: usize = {
let uninit = std::mem::MaybeUninit::<Struct>::uninit();
let base_ptr: *const Struct = &uninit as *const _ as *const Struct;
let uninit2 = std::mem::MaybeUninit::<Struct>::uninit();
let field_ptr: *const Struct = &uninit2 as *const _ as *const Struct;
let offset = unsafe { ptr_offset_from(field_ptr, base_ptr) }; //~ERROR evaluation of constant value failed
//~| cannot compute offset of pointers into different allocations.
offset as usize
};
pub const NOT_PTR: usize = {
unsafe { (42 as *const u8).offset_from(&5u8) as usize }
};
pub const NOT_MULTIPLE_OF_SIZE: isize = {
let data = [5u8, 6, 7];
let base_ptr = data.as_ptr();
let field_ptr = &data[1] as *const u8 as *const u16;
unsafe { ptr_offset_from(field_ptr, base_ptr as *const u16) } //~ERROR evaluation of constant value failed
//~| 1_isize cannot be divided by 2_isize without remainder
};
pub const OFFSET_FROM_NULL: isize = {
let ptr = 0 as *const u8;
unsafe { ptr_offset_from(ptr, ptr) } //~ERROR evaluation of constant value failed
//~| null pointer is not a valid pointer
};
pub const DIFFERENT_INT: isize = { // offset_from with two different integers: like DIFFERENT_ALLOC
let ptr1 = 8 as *const u8;
let ptr2 = 16 as *const u8;
unsafe { ptr_offset_from(ptr2, ptr1) } //~ERROR any use of this value will cause an error
//~| WARN previously accepted
};
fn main() {}
| 33.673469 | 110 | 0.684242 |
5005801d5bb08ca31ac19b7881e3e7e6fa711c11
| 43,573 |
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Integer trait and functions.
//!
//! ## Compatibility
//!
//! The `num-integer` crate is tested for rustc 1.8 and greater.
#![doc(html_root_url = "https://docs.rs/num-integer/0.1")]
#![no_std]
#[cfg(feature = "std")]
extern crate std;
extern crate num_traits as traits;
use core::mem;
use core::ops::Add;
use traits::{Num, Signed, Zero};
mod roots;
pub use roots::Roots;
pub use roots::{cbrt, nth_root, sqrt};
mod average;
pub use average::Average;
pub use average::{average_ceil, average_floor};
pub trait Integer: Sized + Num + PartialOrd + Ord + Eq {
/// Floored integer division.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert!(( 8).div_floor(& 3) == 2);
/// assert!(( 8).div_floor(&-3) == -3);
/// assert!((-8).div_floor(& 3) == -3);
/// assert!((-8).div_floor(&-3) == 2);
///
/// assert!(( 1).div_floor(& 2) == 0);
/// assert!(( 1).div_floor(&-2) == -1);
/// assert!((-1).div_floor(& 2) == -1);
/// assert!((-1).div_floor(&-2) == 0);
/// ~~~
fn div_floor(&self, other: &Self) -> Self;
/// Floored integer modulo, satisfying:
///
/// ~~~
/// # use num_integer::Integer;
/// # let n = 1; let d = 1;
/// assert!(n.div_floor(&d) * d + n.mod_floor(&d) == n)
/// ~~~
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert!(( 8).mod_floor(& 3) == 2);
/// assert!(( 8).mod_floor(&-3) == -1);
/// assert!((-8).mod_floor(& 3) == 1);
/// assert!((-8).mod_floor(&-3) == -2);
///
/// assert!(( 1).mod_floor(& 2) == 1);
/// assert!(( 1).mod_floor(&-2) == -1);
/// assert!((-1).mod_floor(& 2) == 1);
/// assert!((-1).mod_floor(&-2) == -1);
/// ~~~
fn mod_floor(&self, other: &Self) -> Self;
/// Ceiled integer division.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_ceil( &3), 3);
/// assert_eq!(( 8).div_ceil(&-3), -2);
/// assert_eq!((-8).div_ceil( &3), -2);
/// assert_eq!((-8).div_ceil(&-3), 3);
///
/// assert_eq!(( 1).div_ceil( &2), 1);
/// assert_eq!(( 1).div_ceil(&-2), 0);
/// assert_eq!((-1).div_ceil( &2), 0);
/// assert_eq!((-1).div_ceil(&-2), 1);
/// ~~~
fn div_ceil(&self, other: &Self) -> Self {
let (q, r) = self.div_mod_floor(other);
if r.is_zero() {
q
} else {
q + Self::one()
}
}
/// Greatest Common Divisor (GCD).
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(6.gcd(&8), 2);
/// assert_eq!(7.gcd(&3), 1);
/// ~~~
fn gcd(&self, other: &Self) -> Self;
/// Lowest Common Multiple (LCM).
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(7.lcm(&3), 21);
/// assert_eq!(2.lcm(&4), 4);
/// assert_eq!(0.lcm(&0), 0);
/// ~~~
fn lcm(&self, other: &Self) -> Self;
/// Greatest Common Divisor (GCD) and
/// Lowest Common Multiple (LCM) together.
///
/// Potentially more efficient than calling `gcd` and `lcm`
/// individually for identical inputs.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(10.gcd_lcm(&4), (2, 20));
/// assert_eq!(8.gcd_lcm(&9), (1, 72));
/// ~~~
#[inline]
fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
(self.gcd(other), self.lcm(other))
}
/// Greatest common divisor and Bézout coefficients.
///
/// # Examples
///
/// ~~~
/// # extern crate num_integer;
/// # extern crate num_traits;
/// # fn main() {
/// # use num_integer::{ExtendedGcd, Integer};
/// # use num_traits::NumAssign;
/// fn check<A: Copy + Integer + NumAssign>(a: A, b: A) -> bool {
/// let ExtendedGcd { gcd, x, y, .. } = a.extended_gcd(&b);
/// gcd == x * a + y * b
/// }
/// assert!(check(10isize, 4isize));
/// assert!(check(8isize, 9isize));
/// # }
/// ~~~
#[inline]
fn extended_gcd(&self, other: &Self) -> ExtendedGcd<Self>
where
Self: Clone,
{
let mut s = (Self::zero(), Self::one());
let mut t = (Self::one(), Self::zero());
let mut r = (other.clone(), self.clone());
while !r.0.is_zero() {
let q = r.1.clone() / r.0.clone();
let f = |mut r: (Self, Self)| {
mem::swap(&mut r.0, &mut r.1);
r.0 = r.0 - q.clone() * r.1.clone();
r
};
r = f(r);
s = f(s);
t = f(t);
}
if r.1 >= Self::zero() {
ExtendedGcd {
gcd: r.1,
x: s.1,
y: t.1,
}
} else {
ExtendedGcd {
gcd: Self::zero() - r.1,
x: Self::zero() - s.1,
y: Self::zero() - t.1,
}
}
}
/// Greatest common divisor, least common multiple, and Bézout coefficients.
#[inline]
fn extended_gcd_lcm(&self, other: &Self) -> (ExtendedGcd<Self>, Self)
where
Self: Clone + Signed,
{
(self.extended_gcd(other), self.lcm(other))
}
/// Deprecated, use `is_multiple_of` instead.
fn divides(&self, other: &Self) -> bool;
/// Returns `true` if `self` is a multiple of `other`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(9.is_multiple_of(&3), true);
/// assert_eq!(3.is_multiple_of(&9), false);
/// ~~~
fn is_multiple_of(&self, other: &Self) -> bool;
/// Returns `true` if the number is even.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(3.is_even(), false);
/// assert_eq!(4.is_even(), true);
/// ~~~
fn is_even(&self) -> bool;
/// Returns `true` if the number is odd.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(3.is_odd(), true);
/// assert_eq!(4.is_odd(), false);
/// ~~~
fn is_odd(&self) -> bool;
/// Simultaneous truncated integer division and modulus.
/// Returns `(quotient, remainder)`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_rem( &3), ( 2, 2));
/// assert_eq!(( 8).div_rem(&-3), (-2, 2));
/// assert_eq!((-8).div_rem( &3), (-2, -2));
/// assert_eq!((-8).div_rem(&-3), ( 2, -2));
///
/// assert_eq!(( 1).div_rem( &2), ( 0, 1));
/// assert_eq!(( 1).div_rem(&-2), ( 0, 1));
/// assert_eq!((-1).div_rem( &2), ( 0, -1));
/// assert_eq!((-1).div_rem(&-2), ( 0, -1));
/// ~~~
fn div_rem(&self, other: &Self) -> (Self, Self);
/// Simultaneous floored integer division and modulus.
/// Returns `(quotient, remainder)`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_mod_floor( &3), ( 2, 2));
/// assert_eq!(( 8).div_mod_floor(&-3), (-3, -1));
/// assert_eq!((-8).div_mod_floor( &3), (-3, 1));
/// assert_eq!((-8).div_mod_floor(&-3), ( 2, -2));
///
/// assert_eq!(( 1).div_mod_floor( &2), ( 0, 1));
/// assert_eq!(( 1).div_mod_floor(&-2), (-1, -1));
/// assert_eq!((-1).div_mod_floor( &2), (-1, 1));
/// assert_eq!((-1).div_mod_floor(&-2), ( 0, -1));
/// ~~~
fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
(self.div_floor(other), self.mod_floor(other))
}
/// Rounds up to nearest multiple of argument.
///
/// # Notes
///
/// For signed types, `a.next_multiple_of(b) = a.prev_multiple_of(b.neg())`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 16).next_multiple_of(& 8), 16);
/// assert_eq!(( 23).next_multiple_of(& 8), 24);
/// assert_eq!(( 16).next_multiple_of(&-8), 16);
/// assert_eq!(( 23).next_multiple_of(&-8), 16);
/// assert_eq!((-16).next_multiple_of(& 8), -16);
/// assert_eq!((-23).next_multiple_of(& 8), -16);
/// assert_eq!((-16).next_multiple_of(&-8), -16);
/// assert_eq!((-23).next_multiple_of(&-8), -24);
/// ~~~
#[inline]
fn next_multiple_of(&self, other: &Self) -> Self
where
Self: Clone,
{
let m = self.mod_floor(other);
self.clone()
+ if m.is_zero() {
Self::zero()
} else {
other.clone() - m
}
}
/// Rounds down to nearest multiple of argument.
///
/// # Notes
///
/// For signed types, `a.prev_multiple_of(b) = a.next_multiple_of(b.neg())`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 16).prev_multiple_of(& 8), 16);
/// assert_eq!(( 23).prev_multiple_of(& 8), 16);
/// assert_eq!(( 16).prev_multiple_of(&-8), 16);
/// assert_eq!(( 23).prev_multiple_of(&-8), 24);
/// assert_eq!((-16).prev_multiple_of(& 8), -16);
/// assert_eq!((-23).prev_multiple_of(& 8), -24);
/// assert_eq!((-16).prev_multiple_of(&-8), -16);
/// assert_eq!((-23).prev_multiple_of(&-8), -16);
/// ~~~
#[inline]
fn prev_multiple_of(&self, other: &Self) -> Self
where
Self: Clone,
{
self.clone() - self.mod_floor(other)
}
}
/// Greatest common divisor and Bézout coefficients
///
/// ```no_build
/// let e = isize::extended_gcd(a, b);
/// assert_eq!(e.gcd, e.x*a + e.y*b);
/// ```
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ExtendedGcd<A> {
pub gcd: A,
pub x: A,
pub y: A,
}
/// Simultaneous integer division and modulus
#[inline]
pub fn div_rem<T: Integer>(x: T, y: T) -> (T, T) {
x.div_rem(&y)
}
/// Floored integer division
#[inline]
pub fn div_floor<T: Integer>(x: T, y: T) -> T {
x.div_floor(&y)
}
/// Floored integer modulus
#[inline]
pub fn mod_floor<T: Integer>(x: T, y: T) -> T {
x.mod_floor(&y)
}
/// Simultaneous floored integer division and modulus
#[inline]
pub fn div_mod_floor<T: Integer>(x: T, y: T) -> (T, T) {
x.div_mod_floor(&y)
}
/// Ceiled integer division
#[inline]
pub fn div_ceil<T: Integer>(x: T, y: T) -> T {
x.div_ceil(&y)
}
/// Calculates the Greatest Common Divisor (GCD) of the number and `other`. The
/// result is always non-negative.
#[inline(always)]
pub fn gcd<T: Integer>(x: T, y: T) -> T {
x.gcd(&y)
}
/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
#[inline(always)]
pub fn lcm<T: Integer>(x: T, y: T) -> T {
x.lcm(&y)
}
/// Calculates the Greatest Common Divisor (GCD) and
/// Lowest Common Multiple (LCM) of the number and `other`.
#[inline(always)]
pub fn gcd_lcm<T: Integer>(x: T, y: T) -> (T, T) {
x.gcd_lcm(&y)
}
macro_rules! impl_integer_for_isize {
($T:ty, $test_mod:ident) => {
impl Integer for $T {
/// Floored integer division
#[inline]
fn div_floor(&self, other: &Self) -> Self {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
let (d, r) = self.div_rem(other);
if (r > 0 && *other < 0) || (r < 0 && *other > 0) {
d - 1
} else {
d
}
}
/// Floored integer modulo
#[inline]
fn mod_floor(&self, other: &Self) -> Self {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
let r = *self % *other;
if (r > 0 && *other < 0) || (r < 0 && *other > 0) {
r + *other
} else {
r
}
}
/// Calculates `div_floor` and `mod_floor` simultaneously
#[inline]
fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
let (d, r) = self.div_rem(other);
if (r > 0 && *other < 0) || (r < 0 && *other > 0) {
(d - 1, r + *other)
} else {
(d, r)
}
}
#[inline]
fn div_ceil(&self, other: &Self) -> Self {
let (d, r) = self.div_rem(other);
if (r > 0 && *other > 0) || (r < 0 && *other < 0) {
d + 1
} else {
d
}
}
/// Calculates the Greatest Common Divisor (GCD) of the number and
/// `other`. The result is always non-negative.
#[inline]
fn gcd(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 {
return (m | n).abs();
}
// find common factors of 2
let shift = (m | n).trailing_zeros();
// The algorithm needs positive numbers, but the minimum value
// can't be represented as a positive one.
// It's also a power of two, so the gcd can be
// calculated by bitshifting in that case
// Assuming two's complement, the number created by the shift
// is positive for all numbers except gcd = abs(min value)
// The call to .abs() causes a panic in debug mode
if m == Self::min_value() || n == Self::min_value() {
return (1 << shift).abs();
}
// guaranteed to be positive now, rest like unsigned algorithm
m = m.abs();
n = n.abs();
// divide n and m by 2 until odd
m >>= m.trailing_zeros();
n >>= n.trailing_zeros();
while m != n {
if m > n {
m -= n;
m >>= m.trailing_zeros();
} else {
n -= m;
n >>= n.trailing_zeros();
}
}
m << shift
}
#[inline]
fn extended_gcd_lcm(&self, other: &Self) -> (ExtendedGcd<Self>, Self) {
let egcd = self.extended_gcd(other);
// should not have to recalculate abs
let lcm = if egcd.gcd.is_zero() {
Self::zero()
} else {
(*self * (*other / egcd.gcd)).abs()
};
(egcd, lcm)
}
/// Calculates the Lowest Common Multiple (LCM) of the number and
/// `other`.
#[inline]
fn lcm(&self, other: &Self) -> Self {
self.gcd_lcm(other).1
}
/// Calculates the Greatest Common Divisor (GCD) and
/// Lowest Common Multiple (LCM) of the number and `other`.
#[inline]
fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
if self.is_zero() && other.is_zero() {
return (Self::zero(), Self::zero());
}
let gcd = self.gcd(other);
// should not have to recalculate abs
let lcm = (*self * (*other / gcd)).abs();
(gcd, lcm)
}
/// Deprecated, use `is_multiple_of` instead.
#[inline]
fn divides(&self, other: &Self) -> bool {
self.is_multiple_of(other)
}
/// Returns `true` if the number is a multiple of `other`.
#[inline]
fn is_multiple_of(&self, other: &Self) -> bool {
if other.is_zero() {
return self.is_zero();
}
*self % *other == 0
}
/// Returns `true` if the number is divisible by `2`
#[inline]
fn is_even(&self) -> bool {
(*self) & 1 == 0
}
/// Returns `true` if the number is not divisible by `2`
#[inline]
fn is_odd(&self) -> bool {
!self.is_even()
}
/// Simultaneous truncated integer division and modulus.
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self) {
(*self / *other, *self % *other)
}
/// Rounds up to nearest multiple of argument.
#[inline]
fn next_multiple_of(&self, other: &Self) -> Self {
// Avoid the overflow of `MIN % -1`
if *other == -1 {
return *self;
}
let m = Integer::mod_floor(self, other);
*self + if m == 0 { 0 } else { other - m }
}
/// Rounds down to nearest multiple of argument.
#[inline]
fn prev_multiple_of(&self, other: &Self) -> Self {
// Avoid the overflow of `MIN % -1`
if *other == -1 {
return *self;
}
*self - Integer::mod_floor(self, other)
}
}
#[cfg(test)]
mod $test_mod {
use core::mem;
use Integer;
/// Checks that the division rule holds for:
///
/// - `n`: numerator (dividend)
/// - `d`: denominator (divisor)
/// - `qr`: quotient and remainder
#[cfg(test)]
fn test_division_rule((n, d): ($T, $T), (q, r): ($T, $T)) {
assert_eq!(d * q + r, n);
}
#[test]
fn test_div_rem() {
fn test_nd_dr(nd: ($T, $T), qr: ($T, $T)) {
let (n, d) = nd;
let separate_div_rem = (n / d, n % d);
let combined_div_rem = n.div_rem(&d);
assert_eq!(separate_div_rem, qr);
assert_eq!(combined_div_rem, qr);
test_division_rule(nd, separate_div_rem);
test_division_rule(nd, combined_div_rem);
}
test_nd_dr((8, 3), (2, 2));
test_nd_dr((8, -3), (-2, 2));
test_nd_dr((-8, 3), (-2, -2));
test_nd_dr((-8, -3), (2, -2));
test_nd_dr((1, 2), (0, 1));
test_nd_dr((1, -2), (0, 1));
test_nd_dr((-1, 2), (0, -1));
test_nd_dr((-1, -2), (0, -1));
}
#[test]
fn test_div_mod_floor() {
fn test_nd_dm(nd: ($T, $T), dm: ($T, $T)) {
let (n, d) = nd;
let separate_div_mod_floor =
(Integer::div_floor(&n, &d), Integer::mod_floor(&n, &d));
let combined_div_mod_floor = Integer::div_mod_floor(&n, &d);
assert_eq!(separate_div_mod_floor, dm);
assert_eq!(combined_div_mod_floor, dm);
test_division_rule(nd, separate_div_mod_floor);
test_division_rule(nd, combined_div_mod_floor);
}
test_nd_dm((8, 3), (2, 2));
test_nd_dm((8, -3), (-3, -1));
test_nd_dm((-8, 3), (-3, 1));
test_nd_dm((-8, -3), (2, -2));
test_nd_dm((1, 2), (0, 1));
test_nd_dm((1, -2), (-1, -1));
test_nd_dm((-1, 2), (-1, 1));
test_nd_dm((-1, -2), (0, -1));
}
#[test]
fn test_gcd() {
assert_eq!((10 as $T).gcd(&2), 2 as $T);
assert_eq!((10 as $T).gcd(&3), 1 as $T);
assert_eq!((0 as $T).gcd(&3), 3 as $T);
assert_eq!((3 as $T).gcd(&3), 3 as $T);
assert_eq!((56 as $T).gcd(&42), 14 as $T);
assert_eq!((3 as $T).gcd(&-3), 3 as $T);
assert_eq!((-6 as $T).gcd(&3), 3 as $T);
assert_eq!((-4 as $T).gcd(&-2), 2 as $T);
}
#[test]
fn test_gcd_cmp_with_euclidean() {
fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
while m != 0 {
mem::swap(&mut m, &mut n);
m %= n;
}
n.abs()
}
// gcd(-128, b) = 128 is not representable as positive value
// for i8
for i in -127..127 {
for j in -127..127 {
assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
}
}
// last value
// FIXME: Use inclusive ranges for above loop when implemented
let i = 127;
for j in -127..127 {
assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
}
assert_eq!(127.gcd(&127), 127);
}
#[test]
fn test_gcd_min_val() {
let min = <$T>::min_value();
let max = <$T>::max_value();
let max_pow2 = max / 2 + 1;
assert_eq!(min.gcd(&max), 1 as $T);
assert_eq!(max.gcd(&min), 1 as $T);
assert_eq!(min.gcd(&max_pow2), max_pow2);
assert_eq!(max_pow2.gcd(&min), max_pow2);
assert_eq!(min.gcd(&42), 2 as $T);
assert_eq!((42 as $T).gcd(&min), 2 as $T);
}
#[test]
#[should_panic]
fn test_gcd_min_val_min_val() {
let min = <$T>::min_value();
assert!(min.gcd(&min) >= 0);
}
#[test]
#[should_panic]
fn test_gcd_min_val_0() {
let min = <$T>::min_value();
assert!(min.gcd(&0) >= 0);
}
#[test]
#[should_panic]
fn test_gcd_0_min_val() {
let min = <$T>::min_value();
assert!((0 as $T).gcd(&min) >= 0);
}
#[test]
fn test_lcm() {
assert_eq!((1 as $T).lcm(&0), 0 as $T);
assert_eq!((0 as $T).lcm(&1), 0 as $T);
assert_eq!((1 as $T).lcm(&1), 1 as $T);
assert_eq!((-1 as $T).lcm(&1), 1 as $T);
assert_eq!((1 as $T).lcm(&-1), 1 as $T);
assert_eq!((-1 as $T).lcm(&-1), 1 as $T);
assert_eq!((8 as $T).lcm(&9), 72 as $T);
assert_eq!((11 as $T).lcm(&5), 55 as $T);
}
#[test]
fn test_gcd_lcm() {
use core::iter::once;
for i in once(0)
.chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
.chain(once(-128))
{
for j in once(0)
.chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
.chain(once(-128))
{
assert_eq!(i.gcd_lcm(&j), (i.gcd(&j), i.lcm(&j)));
}
}
}
#[test]
fn test_extended_gcd_lcm() {
use core::fmt::Debug;
use traits::NumAssign;
use ExtendedGcd;
fn check<A: Copy + Debug + Integer + NumAssign>(a: A, b: A) {
let ExtendedGcd { gcd, x, y, .. } = a.extended_gcd(&b);
assert_eq!(gcd, x * a + y * b);
}
use core::iter::once;
for i in once(0)
.chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
.chain(once(-128))
{
for j in once(0)
.chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
.chain(once(-128))
{
check(i, j);
let (ExtendedGcd { gcd, .. }, lcm) = i.extended_gcd_lcm(&j);
assert_eq!((gcd, lcm), (i.gcd(&j), i.lcm(&j)));
}
}
}
#[test]
fn test_even() {
assert_eq!((-4 as $T).is_even(), true);
assert_eq!((-3 as $T).is_even(), false);
assert_eq!((-2 as $T).is_even(), true);
assert_eq!((-1 as $T).is_even(), false);
assert_eq!((0 as $T).is_even(), true);
assert_eq!((1 as $T).is_even(), false);
assert_eq!((2 as $T).is_even(), true);
assert_eq!((3 as $T).is_even(), false);
assert_eq!((4 as $T).is_even(), true);
}
#[test]
fn test_odd() {
assert_eq!((-4 as $T).is_odd(), false);
assert_eq!((-3 as $T).is_odd(), true);
assert_eq!((-2 as $T).is_odd(), false);
assert_eq!((-1 as $T).is_odd(), true);
assert_eq!((0 as $T).is_odd(), false);
assert_eq!((1 as $T).is_odd(), true);
assert_eq!((2 as $T).is_odd(), false);
assert_eq!((3 as $T).is_odd(), true);
assert_eq!((4 as $T).is_odd(), false);
}
#[test]
fn test_multiple_of_one_limits() {
for x in &[<$T>::min_value(), <$T>::max_value()] {
for one in &[1, -1] {
assert_eq!(Integer::next_multiple_of(x, one), *x);
assert_eq!(Integer::prev_multiple_of(x, one), *x);
}
}
}
}
};
}
impl_integer_for_isize!(i8, test_integer_i8);
impl_integer_for_isize!(i16, test_integer_i16);
impl_integer_for_isize!(i32, test_integer_i32);
impl_integer_for_isize!(i64, test_integer_i64);
impl_integer_for_isize!(isize, test_integer_isize);
#[cfg(has_i128)]
impl_integer_for_isize!(i128, test_integer_i128);
macro_rules! impl_integer_for_usize {
($T:ty, $test_mod:ident) => {
impl Integer for $T {
/// Unsigned integer division. Returns the same result as `div` (`/`).
#[inline]
fn div_floor(&self, other: &Self) -> Self {
*self / *other
}
/// Unsigned integer modulo operation. Returns the same result as `rem` (`%`).
#[inline]
fn mod_floor(&self, other: &Self) -> Self {
*self % *other
}
#[inline]
fn div_ceil(&self, other: &Self) -> Self {
*self / *other + (0 != *self % *other) as Self
}
/// Calculates the Greatest Common Divisor (GCD) of the number and `other`
#[inline]
fn gcd(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 {
return m | n;
}
// find common factors of 2
let shift = (m | n).trailing_zeros();
// divide n and m by 2 until odd
m >>= m.trailing_zeros();
n >>= n.trailing_zeros();
while m != n {
if m > n {
m -= n;
m >>= m.trailing_zeros();
} else {
n -= m;
n >>= n.trailing_zeros();
}
}
m << shift
}
#[inline]
fn extended_gcd_lcm(&self, other: &Self) -> (ExtendedGcd<Self>, Self) {
let egcd = self.extended_gcd(other);
// should not have to recalculate abs
let lcm = if egcd.gcd.is_zero() {
Self::zero()
} else {
*self * (*other / egcd.gcd)
};
(egcd, lcm)
}
/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
#[inline]
fn lcm(&self, other: &Self) -> Self {
self.gcd_lcm(other).1
}
/// Calculates the Greatest Common Divisor (GCD) and
/// Lowest Common Multiple (LCM) of the number and `other`.
#[inline]
fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
if self.is_zero() && other.is_zero() {
return (Self::zero(), Self::zero());
}
let gcd = self.gcd(other);
let lcm = *self * (*other / gcd);
(gcd, lcm)
}
/// Deprecated, use `is_multiple_of` instead.
#[inline]
fn divides(&self, other: &Self) -> bool {
self.is_multiple_of(other)
}
/// Returns `true` if the number is a multiple of `other`.
#[inline]
fn is_multiple_of(&self, other: &Self) -> bool {
if other.is_zero() {
return self.is_zero();
}
*self % *other == 0
}
/// Returns `true` if the number is divisible by `2`.
#[inline]
fn is_even(&self) -> bool {
*self % 2 == 0
}
/// Returns `true` if the number is not divisible by `2`.
#[inline]
fn is_odd(&self) -> bool {
!self.is_even()
}
/// Simultaneous truncated integer division and modulus.
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self) {
(*self / *other, *self % *other)
}
}
#[cfg(test)]
mod $test_mod {
use core::mem;
use Integer;
#[test]
fn test_div_mod_floor() {
assert_eq!(<$T as Integer>::div_floor(&10, &3), 3 as $T);
assert_eq!(<$T as Integer>::mod_floor(&10, &3), 1 as $T);
assert_eq!(<$T as Integer>::div_mod_floor(&10, &3), (3 as $T, 1 as $T));
assert_eq!(<$T as Integer>::div_floor(&5, &5), 1 as $T);
assert_eq!(<$T as Integer>::mod_floor(&5, &5), 0 as $T);
assert_eq!(<$T as Integer>::div_mod_floor(&5, &5), (1 as $T, 0 as $T));
assert_eq!(<$T as Integer>::div_floor(&3, &7), 0 as $T);
assert_eq!(<$T as Integer>::div_floor(&3, &7), 0 as $T);
assert_eq!(<$T as Integer>::mod_floor(&3, &7), 3 as $T);
assert_eq!(<$T as Integer>::div_mod_floor(&3, &7), (0 as $T, 3 as $T));
}
#[test]
fn test_gcd() {
assert_eq!((10 as $T).gcd(&2), 2 as $T);
assert_eq!((10 as $T).gcd(&3), 1 as $T);
assert_eq!((0 as $T).gcd(&3), 3 as $T);
assert_eq!((3 as $T).gcd(&3), 3 as $T);
assert_eq!((56 as $T).gcd(&42), 14 as $T);
}
#[test]
fn test_gcd_cmp_with_euclidean() {
fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
while m != 0 {
mem::swap(&mut m, &mut n);
m %= n;
}
n
}
for i in 0..255 {
for j in 0..255 {
assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
}
}
// last value
// FIXME: Use inclusive ranges for above loop when implemented
let i = 255;
for j in 0..255 {
assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
}
assert_eq!(255.gcd(&255), 255);
}
#[test]
fn test_lcm() {
assert_eq!((1 as $T).lcm(&0), 0 as $T);
assert_eq!((0 as $T).lcm(&1), 0 as $T);
assert_eq!((1 as $T).lcm(&1), 1 as $T);
assert_eq!((8 as $T).lcm(&9), 72 as $T);
assert_eq!((11 as $T).lcm(&5), 55 as $T);
assert_eq!((15 as $T).lcm(&17), 255 as $T);
}
#[test]
fn test_gcd_lcm() {
for i in (0..).take(256) {
for j in (0..).take(256) {
assert_eq!(i.gcd_lcm(&j), (i.gcd(&j), i.lcm(&j)));
}
}
}
#[test]
fn test_is_multiple_of() {
assert!((0 as $T).is_multiple_of(&(0 as $T)));
assert!((6 as $T).is_multiple_of(&(6 as $T)));
assert!((6 as $T).is_multiple_of(&(3 as $T)));
assert!((6 as $T).is_multiple_of(&(1 as $T)));
assert!(!(42 as $T).is_multiple_of(&(5 as $T)));
assert!(!(5 as $T).is_multiple_of(&(3 as $T)));
assert!(!(42 as $T).is_multiple_of(&(0 as $T)));
}
#[test]
fn test_even() {
assert_eq!((0 as $T).is_even(), true);
assert_eq!((1 as $T).is_even(), false);
assert_eq!((2 as $T).is_even(), true);
assert_eq!((3 as $T).is_even(), false);
assert_eq!((4 as $T).is_even(), true);
}
#[test]
fn test_odd() {
assert_eq!((0 as $T).is_odd(), false);
assert_eq!((1 as $T).is_odd(), true);
assert_eq!((2 as $T).is_odd(), false);
assert_eq!((3 as $T).is_odd(), true);
assert_eq!((4 as $T).is_odd(), false);
}
}
};
}
impl_integer_for_usize!(u8, test_integer_u8);
impl_integer_for_usize!(u16, test_integer_u16);
impl_integer_for_usize!(u32, test_integer_u32);
impl_integer_for_usize!(u64, test_integer_u64);
impl_integer_for_usize!(usize, test_integer_usize);
#[cfg(has_i128)]
impl_integer_for_usize!(u128, test_integer_u128);
/// An iterator over binomial coefficients.
pub struct IterBinomial<T> {
a: T,
n: T,
k: T,
}
impl<T> IterBinomial<T>
where
T: Integer,
{
/// For a given n, iterate over all binomial coefficients binomial(n, k), for k=0...n.
///
/// Note that this might overflow, depending on `T`. For the primitive
/// integer types, the following n are the largest ones for which there will
/// be no overflow:
///
/// type | n
/// -----|---
/// u8 | 10
/// i8 | 9
/// u16 | 18
/// i16 | 17
/// u32 | 34
/// i32 | 33
/// u64 | 67
/// i64 | 66
///
/// For larger n, `T` should be a bigint type.
pub fn new(n: T) -> IterBinomial<T> {
IterBinomial {
k: T::zero(),
a: T::one(),
n: n,
}
}
}
impl<T> Iterator for IterBinomial<T>
where
T: Integer + Clone,
{
type Item = T;
fn next(&mut self) -> Option<T> {
if self.k > self.n {
return None;
}
self.a = if !self.k.is_zero() {
multiply_and_divide(
self.a.clone(),
self.n.clone() - self.k.clone() + T::one(),
self.k.clone(),
)
} else {
T::one()
};
self.k = self.k.clone() + T::one();
Some(self.a.clone())
}
}
/// Calculate r * a / b, avoiding overflows and fractions.
///
/// Assumes that b divides r * a evenly.
fn multiply_and_divide<T: Integer + Clone>(r: T, a: T, b: T) -> T {
// See http://blog.plover.com/math/choose-2.html for the idea.
let g = gcd(r.clone(), b.clone());
r / g.clone() * (a / (b / g))
}
/// Calculate the binomial coefficient.
///
/// Note that this might overflow, depending on `T`. For the primitive integer
/// types, the following n are the largest ones possible such that there will
/// be no overflow for any k:
///
/// type | n
/// -----|---
/// u8 | 10
/// i8 | 9
/// u16 | 18
/// i16 | 17
/// u32 | 34
/// i32 | 33
/// u64 | 67
/// i64 | 66
///
/// For larger n, consider using a bigint type for `T`.
pub fn binomial<T: Integer + Clone>(mut n: T, k: T) -> T {
// See http://blog.plover.com/math/choose.html for the idea.
if k > n {
return T::zero();
}
if k > n.clone() - k.clone() {
return binomial(n.clone(), n - k);
}
let mut r = T::one();
let mut d = T::one();
loop {
if d > k {
break;
}
r = multiply_and_divide(r, n.clone(), d.clone());
n = n - T::one();
d = d + T::one();
}
r
}
/// Calculate the multinomial coefficient.
pub fn multinomial<T: Integer + Clone>(k: &[T]) -> T
where
for<'a> T: Add<&'a T, Output = T>,
{
let mut r = T::one();
let mut p = T::zero();
for i in k {
p = p + i;
r = r * binomial(p.clone(), i.clone());
}
r
}
#[test]
fn test_lcm_overflow() {
macro_rules! check {
($t:ty, $x:expr, $y:expr, $r:expr) => {{
let x: $t = $x;
let y: $t = $y;
let o = x.checked_mul(y);
assert!(
o.is_none(),
"sanity checking that {} input {} * {} overflows",
stringify!($t),
x,
y
);
assert_eq!(x.lcm(&y), $r);
assert_eq!(y.lcm(&x), $r);
}};
}
// Original bug (Issue #166)
check!(i64, 46656000000000000, 600, 46656000000000000);
check!(i8, 0x40, 0x04, 0x40);
check!(u8, 0x80, 0x02, 0x80);
check!(i16, 0x40_00, 0x04, 0x40_00);
check!(u16, 0x80_00, 0x02, 0x80_00);
check!(i32, 0x4000_0000, 0x04, 0x4000_0000);
check!(u32, 0x8000_0000, 0x02, 0x8000_0000);
check!(i64, 0x4000_0000_0000_0000, 0x04, 0x4000_0000_0000_0000);
check!(u64, 0x8000_0000_0000_0000, 0x02, 0x8000_0000_0000_0000);
}
#[test]
fn test_iter_binomial() {
macro_rules! check_simple {
($t:ty) => {{
let n: $t = 3;
let expected = [1, 3, 3, 1];
for (b, &e) in IterBinomial::new(n).zip(&expected) {
assert_eq!(b, e);
}
}};
}
check_simple!(u8);
check_simple!(i8);
check_simple!(u16);
check_simple!(i16);
check_simple!(u32);
check_simple!(i32);
check_simple!(u64);
check_simple!(i64);
macro_rules! check_binomial {
($t:ty, $n:expr) => {{
let n: $t = $n;
let mut k: $t = 0;
for b in IterBinomial::new(n) {
assert_eq!(b, binomial(n, k));
k += 1;
}
}};
}
// Check the largest n for which there is no overflow.
check_binomial!(u8, 10);
check_binomial!(i8, 9);
check_binomial!(u16, 18);
check_binomial!(i16, 17);
check_binomial!(u32, 34);
check_binomial!(i32, 33);
check_binomial!(u64, 67);
check_binomial!(i64, 66);
}
#[test]
fn test_binomial() {
macro_rules! check {
($t:ty, $x:expr, $y:expr, $r:expr) => {{
let x: $t = $x;
let y: $t = $y;
let expected: $t = $r;
assert_eq!(binomial(x, y), expected);
if y <= x {
assert_eq!(binomial(x, x - y), expected);
}
}};
}
check!(u8, 9, 4, 126);
check!(u8, 0, 0, 1);
check!(u8, 2, 3, 0);
check!(i8, 9, 4, 126);
check!(i8, 0, 0, 1);
check!(i8, 2, 3, 0);
check!(u16, 100, 2, 4950);
check!(u16, 14, 4, 1001);
check!(u16, 0, 0, 1);
check!(u16, 2, 3, 0);
check!(i16, 100, 2, 4950);
check!(i16, 14, 4, 1001);
check!(i16, 0, 0, 1);
check!(i16, 2, 3, 0);
check!(u32, 100, 2, 4950);
check!(u32, 35, 11, 417225900);
check!(u32, 14, 4, 1001);
check!(u32, 0, 0, 1);
check!(u32, 2, 3, 0);
check!(i32, 100, 2, 4950);
check!(i32, 35, 11, 417225900);
check!(i32, 14, 4, 1001);
check!(i32, 0, 0, 1);
check!(i32, 2, 3, 0);
check!(u64, 100, 2, 4950);
check!(u64, 35, 11, 417225900);
check!(u64, 14, 4, 1001);
check!(u64, 0, 0, 1);
check!(u64, 2, 3, 0);
check!(i64, 100, 2, 4950);
check!(i64, 35, 11, 417225900);
check!(i64, 14, 4, 1001);
check!(i64, 0, 0, 1);
check!(i64, 2, 3, 0);
}
#[test]
fn test_multinomial() {
macro_rules! check_binomial {
($t:ty, $k:expr) => {{
let n: $t = $k.iter().fold(0, |acc, &x| acc + x);
let k: &[$t] = $k;
assert_eq!(k.len(), 2);
assert_eq!(multinomial(k), binomial(n, k[0]));
}};
}
check_binomial!(u8, &[4, 5]);
check_binomial!(i8, &[4, 5]);
check_binomial!(u16, &[2, 98]);
check_binomial!(u16, &[4, 10]);
check_binomial!(i16, &[2, 98]);
check_binomial!(i16, &[4, 10]);
check_binomial!(u32, &[2, 98]);
check_binomial!(u32, &[11, 24]);
check_binomial!(u32, &[4, 10]);
check_binomial!(i32, &[2, 98]);
check_binomial!(i32, &[11, 24]);
check_binomial!(i32, &[4, 10]);
check_binomial!(u64, &[2, 98]);
check_binomial!(u64, &[11, 24]);
check_binomial!(u64, &[4, 10]);
check_binomial!(i64, &[2, 98]);
check_binomial!(i64, &[11, 24]);
check_binomial!(i64, &[4, 10]);
macro_rules! check_multinomial {
($t:ty, $k:expr, $r:expr) => {{
let k: &[$t] = $k;
let expected: $t = $r;
assert_eq!(multinomial(k), expected);
}};
}
check_multinomial!(u8, &[2, 1, 2], 30);
check_multinomial!(u8, &[2, 3, 0], 10);
check_multinomial!(i8, &[2, 1, 2], 30);
check_multinomial!(i8, &[2, 3, 0], 10);
check_multinomial!(u16, &[2, 1, 2], 30);
check_multinomial!(u16, &[2, 3, 0], 10);
check_multinomial!(i16, &[2, 1, 2], 30);
check_multinomial!(i16, &[2, 3, 0], 10);
check_multinomial!(u32, &[2, 1, 2], 30);
check_multinomial!(u32, &[2, 3, 0], 10);
check_multinomial!(i32, &[2, 1, 2], 30);
check_multinomial!(i32, &[2, 3, 0], 10);
check_multinomial!(u64, &[2, 1, 2], 30);
check_multinomial!(u64, &[2, 3, 0], 10);
check_multinomial!(i64, &[2, 1, 2], 30);
check_multinomial!(i64, &[2, 3, 0], 10);
check_multinomial!(u64, &[], 1);
check_multinomial!(u64, &[0], 1);
check_multinomial!(u64, &[12345], 1);
}
| 31.415285 | 98 | 0.444794 |
5b8450b807aa054d92d41f3418334202ac7af04e
| 3,897 |
use std::sync::{Arc, Mutex};
use arrow::array::StringArray;
use arrow::datatypes::SchemaRef;
use arrow::datatypes::{DataType, Field, Schema};
use arrow::record_batch::RecordBatch;
use datafusion::execution::context::ExecutionContext;
use datafusion::sql::parser::Statement as DFStatement;
use sqlparser::ast::{ColumnDef, DataType as SQLDataType, Ident};
use sqlparser::ast::{ObjectName, SetExpr, Statement as SQLStatement, Value, Expr as SQLExpr};
use crate::core::core_def::StmtCacheDef;
use crate::core::global_context::GlobalContext;
use crate::core::output::{CoreOutput, ResultSet, StmtPrepare};
use crate::core::session_context::SessionContext;
use crate::core::stmt_context::StmtContext;
use crate::meta::meta_def::{SparrowColumnDef, TableDef};
use crate::meta::meta_util;
use crate::mysql::error::MysqlResult;
use crate::mysql::metadata::Column;
use crate::util::convert::ToObjectName;
pub struct ComStmtPrepare {
global_context: Arc<Mutex<GlobalContext>>,
session_context: SessionContext,
execution_context: ExecutionContext,
}
impl ComStmtPrepare {
pub fn new(
global_context: Arc<Mutex<GlobalContext>>,
session_context: SessionContext,
execution_context: ExecutionContext,
) -> Self {
Self {
global_context,
session_context,
execution_context,
}
}
pub async fn execute(&mut self, stmt_context: &mut StmtContext, df_statements: Vec<DFStatement>) -> MysqlResult<StmtPrepare> {
let mut stmt_cache = StmtCacheDef::new(df_statements.clone());
let schema_name = "schema".to_object_name();
let table_name = "table".to_object_name();
let mut params = vec![];
for sql_statement in df_statements.clone() {
match sql_statement.clone() {
DFStatement::Statement(statement) => match statement {
SQLStatement::Insert { source, .. } => match &source.body {
SetExpr::Values(values) => {
for row in values.0.iter() {
for v in row {
match v {
SQLExpr::Value(Value::OnlyString(_)) => {
let column_def = ColumnDef {
name: "name".into(),
data_type: SQLDataType::Char(Some(100)),
collation: None,
options: vec![],
};
let sparrow_column_def =
SparrowColumnDef::new(1, 1, column_def);
let column = Column::new(
schema_name.clone(),
table_name.clone(),
&sparrow_column_def,
);
params.push(column);
}
_ => {},
}
}
}
}
_ => {}
},
_ => {}
},
_ => {}
};
}
stmt_cache.set_num_params(params.len());
stmt_context.add_stmt(stmt_cache);
let stmt_id = stmt_context.stmt_id;
let stmt_prepare = StmtPrepare::new(stmt_id, vec![], params);
Ok(stmt_prepare)
}
}
| 40.59375 | 131 | 0.467026 |
1eb77c531835a165f898ba04d35ab50b64ae05bb
| 118 |
#[macro_export]
macro_rules! implicit_hasher_fn {
() => {
pub fn f(input: &HashMap<u32, u32>) {}
};
}
| 16.857143 | 46 | 0.550847 |
f58367f3eecdf256cfc74d0725acbeaa256d9d5d
| 1,874 |
// Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use std::net::SocketAddr;
use common_exception::ErrorCode;
use common_exception::Result;
use common_runtime::tokio;
use metrics::counter;
use warp::http::Uri;
use warp::hyper::Client;
use crate::metrics::MetricService;
pub static METRIC_TEST: &str = "metrics.test";
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_metrics_server() -> Result<()> {
let mut service = MetricService::create();
let listening = "0.0.0.0:0".parse::<SocketAddr>()?;
let listening = service.start(listening).await?;
assert_eq!(do_get(listening).await?.find("metrics_test 1"), None);
counter!(METRIC_TEST, 1);
assert!(do_get(listening).await?.find("metrics_test 1").is_some());
Ok(())
}
async fn do_get(address: SocketAddr) -> Result<String> {
let uri = match format!("http://{}", address).parse::<Uri>() {
Ok(uri) => uri,
Err(error) => {
return Err(ErrorCode::LogicalError(format!(
"Cannot parse uri {}",
error
)))
}
};
let client = Client::new();
match client.get(uri).await {
Err(error) => Err(ErrorCode::LogicalError(format!(
"Cannot request uri {}",
error
))),
Ok(mut response) => match warp::hyper::body::to_bytes(response.body_mut()).await {
Err(error) => Err(ErrorCode::LogicalError(format!(
"Cannot parse response body {}",
error
))),
Ok(body) => match std::str::from_utf8(body.as_ref()) {
Ok(str) => Ok(str.to_string()),
Err(error) => Err(ErrorCode::LogicalError(format!(
"Cannot from utf8 {}",
error
))),
},
},
}
}
| 30.225806 | 90 | 0.561366 |
1c75636cb052d39304e06224a520c1b7eecfa5a0
| 10,435 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
use core::clone::Clone;
use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
use core::convert::AsRef;
use core::hash::{Hash, Hasher};
use core::marker::Sized;
use core::ops::Deref;
use core::option::Option;
use fmt;
use alloc::{boxed, rc, arc};
use self::Cow::*;
/// A trait for borrowing data.
///
/// In general, there may be several ways to "borrow" a piece of data. The
/// typical ways of borrowing a type `T` are `&T` (a shared borrow) and `&mut T`
/// (a mutable borrow). But types like `Vec<T>` provide additional kinds of
/// borrows: the borrowed slices `&[T]` and `&mut [T]`.
///
/// When writing generic code, it is often desirable to abstract over all ways
/// of borrowing data from a given type. That is the role of the `Borrow`
/// trait: if `T: Borrow<U>`, then `&U` can be borrowed from `&T`. A given
/// type can be borrowed as multiple different types. In particular, `Vec<T>:
/// Borrow<Vec<T>>` and `Vec<T>: Borrow<[T]>`.
///
/// `Borrow` is very similar to, but different than, `AsRef`. See
/// [the book][book] for more.
///
/// [book]: ../../book/borrow-and-asref.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Borrow<Borrowed: ?Sized> {
/// Immutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::Borrow;
///
/// fn check<T: Borrow<str>>(s: T) {
/// assert_eq!("Hello", s.borrow());
/// }
///
/// let s = "Hello".to_string();
///
/// check(s);
///
/// let s = "Hello";
///
/// check(s);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow(&self) -> &Borrowed;
}
/// A trait for mutably borrowing data.
///
/// Similar to `Borrow`, but for mutable borrows.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BorrowMut<Borrowed: ?Sized> : Borrow<Borrowed> {
/// Mutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::BorrowMut;
///
/// fn check<T: BorrowMut<[i32]>>(mut v: T) {
/// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
/// }
///
/// let v = vec![1, 2, 3];
///
/// check(v);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow_mut(&mut self) -> &mut Borrowed;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for T {
fn borrow(&self) -> &T { self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for T {
fn borrow_mut(&mut self) -> &mut T { self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Borrow<T> for &'a T {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Borrow<T> for &'a mut T {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> BorrowMut<T> for &'a mut T {
fn borrow_mut(&mut self) -> &mut T { &mut **self }
}
impl<T: ?Sized> Borrow<T> for boxed::Box<T> {
fn borrow(&self) -> &T { &**self }
}
impl<T: ?Sized> BorrowMut<T> for boxed::Box<T> {
fn borrow_mut(&mut self) -> &mut T { &mut **self }
}
impl<T: ?Sized> Borrow<T> for rc::Rc<T> {
fn borrow(&self) -> &T { &**self }
}
impl<T: ?Sized> Borrow<T> for arc::Arc<T> {
fn borrow(&self) -> &T { &**self }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B> where B: ToOwned, <B as ToOwned>::Owned: 'a {
fn borrow(&self) -> &B {
&**self
}
}
/// A generalization of `Clone` to borrowed data.
///
/// Some types make it possible to go from borrowed to owned, usually by
/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
/// from any borrow of a given type.
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToOwned {
#[stable(feature = "rust1", since = "1.0.0")]
type Owned: Borrow<Self>;
/// Creates owned data from borrowed data, usually by cloning.
#[stable(feature = "rust1", since = "1.0.0")]
fn to_owned(&self) -> Self::Owned;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ToOwned for T where T: Clone {
type Owned = T;
fn to_owned(&self) -> T { self.clone() }
}
/// A clone-on-write smart pointer.
///
/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
/// can enclose and provide immutable access to borrowed data, and clone the
/// data lazily when mutation or ownership is required. The type is designed to
/// work with general borrowed data via the `Borrow` trait.
///
/// `Cow` implements `Deref`, which means that you can call
/// non-mutating methods directly on the data it encloses. If mutation
/// is desired, `to_mut` will obtain a mutable reference to an owned
/// value, cloning if necessary.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// fn abs_all(input: &mut Cow<[i32]>) {
/// for i in 0..input.len() {
/// let v = input[i];
/// if v < 0 {
/// // clones into a vector the first time (if not already owned)
/// input.to_mut()[i] = -v;
/// }
/// }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Cow<'a, B: ?Sized + 'a> where B: ToOwned {
/// Borrowed data.
#[stable(feature = "rust1", since = "1.0.0")]
Borrowed(&'a B),
/// Owned data.
#[stable(feature = "rust1", since = "1.0.0")]
Owned(<B as ToOwned>::Owned)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Clone for Cow<'a, B> where B: ToOwned {
fn clone(&self) -> Cow<'a, B> {
match *self {
Borrowed(b) => Borrowed(b),
Owned(ref o) => {
let b: &B = o.borrow();
Owned(b.to_owned())
},
}
}
}
impl<'a, B: ?Sized> Cow<'a, B> where B: ToOwned {
/// Acquires a mutable reference to the owned form of the data.
///
/// Clones the data if it is not already owned.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// let mut cow: Cow<[_]> = Cow::Owned(vec![1, 2, 3]);
///
/// let hello = cow.to_mut();
///
/// assert_eq!(hello, &[1, 2, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
match *self {
Borrowed(borrowed) => {
*self = Owned(borrowed.to_owned());
self.to_mut()
}
Owned(ref mut owned) => owned
}
}
/// Extracts the owned data.
///
/// Clones the data if it is not already owned.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// let cow: Cow<[_]> = Cow::Owned(vec![1, 2, 3]);
///
/// let hello = cow.into_owned();
///
/// assert_eq!(vec![1, 2, 3], hello);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_owned(self) -> <B as ToOwned>::Owned {
match self {
Borrowed(borrowed) => borrowed.to_owned(),
Owned(owned) => owned
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Deref for Cow<'a, B> where B: ToOwned {
type Target = B;
fn deref(&self) -> &B {
match *self {
Borrowed(borrowed) => borrowed,
Owned(ref owned) => owned.borrow()
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Eq for Cow<'a, B> where B: Eq + ToOwned {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Ord for Cow<'a, B> where B: Ord + ToOwned {
#[inline]
fn cmp(&self, other: &Cow<'a, B>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B> where
B: PartialEq<C> + ToOwned, C: ToOwned,
{
#[inline]
fn eq(&self, other: &Cow<'b, C>) -> bool {
PartialEq::eq(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> PartialOrd for Cow<'a, B> where B: PartialOrd + ToOwned,
{
#[inline]
fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> fmt::Debug for Cow<'a, B> where
B: fmt::Debug + ToOwned,
<B as ToOwned>::Owned: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Borrowed(ref b) => fmt::Debug::fmt(b, f),
Owned(ref o) => fmt::Debug::fmt(o, f),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> fmt::Display for Cow<'a, B> where
B: fmt::Display + ToOwned,
<B as ToOwned>::Owned: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Borrowed(ref b) => fmt::Display::fmt(b, f),
Owned(ref o) => fmt::Display::fmt(o, f),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Hash for Cow<'a, B> where B: Hash + ToOwned
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&**self, state)
}
}
/// Trait for moving into a `Cow`.
#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`")]
pub trait IntoCow<'a, B: ?Sized> where B: ToOwned {
/// Moves `self` into `Cow`
fn into_cow(self) -> Cow<'a, B>;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned {
fn into_cow(self) -> Cow<'a, B> {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized + ToOwned> AsRef<T> for Cow<'a, T> {
fn as_ref(&self) -> &T {
self
}
}
| 28.746556 | 90 | 0.547676 |
3379ab53c3fb1fde1631fd7df7073ae3110a1502
| 3,001 |
use crate::init;
use chrono::prelude::*;
use lightspeed_core::error::LightSpeedError;
use lightspeed_core::model::language::Language;
use lightspeed_hash::dto::{ValidationCodeRequestDto, VerifyValidationCodeRequestDto};
#[tokio::test]
async fn should_generate_validation_code() -> Result<(), LightSpeedError> {
// Arrange
let hash_module = init().await;
let validation_code_service = &hash_module.validation_code_service;
let validation_code_validity_seconds = 100;
let validation_code_request = ValidationCodeRequestDto {
to_be_validated: "123456789".to_owned(),
language: Some(Language::IT),
code: format!("{}", Utc::now().timestamp_millis()),
validation_code_validity_seconds,
};
// Act
let validation_code_response = validation_code_service.generate_validation_code(validation_code_request.clone())?;
assert_eq!(validation_code_request.to_be_validated, validation_code_response.to_be_validated);
assert_eq!(
validation_code_response.created_ts_seconds + validation_code_validity_seconds,
validation_code_response.expiration_ts_seconds
);
// Verify valid code
let verify_code_request = VerifyValidationCodeRequestDto {
data: validation_code_response.clone(),
code: validation_code_request.code.clone(),
};
let verify_code_response = validation_code_service.verify_validation_code(verify_code_request.clone())?;
assert!(verify_code_response.code_valid);
assert_eq!(verify_code_request.data.to_be_validated, verify_code_response.to_be_validated);
// Use bad code
let mut bad_verify_code = verify_code_request.clone();
bad_verify_code.code = "abced".to_owned();
assert!(!validation_code_service.verify_validation_code(bad_verify_code.clone()).unwrap().code_valid);
// tampered to_be_validated data
let mut bad_verify_code = verify_code_request.clone();
bad_verify_code.data.to_be_validated = "2233223322".to_owned();
assert!(!validation_code_service.verify_validation_code(bad_verify_code.clone()).unwrap().code_valid);
// tampered created_ts_seconds
let mut bad_verify_code = verify_code_request.clone();
bad_verify_code.data.created_ts_seconds = verify_code_request.data.created_ts_seconds + 1;
assert!(!validation_code_service.verify_validation_code(bad_verify_code.clone()).unwrap().code_valid);
// tampered expiration_ts_seconds
let mut bad_verify_code = verify_code_request.clone();
bad_verify_code.data.expiration_ts_seconds = verify_code_request.data.expiration_ts_seconds + 1;
assert!(!validation_code_service.verify_validation_code(bad_verify_code.clone()).unwrap().code_valid);
// tampered token_hash number
let mut bad_verify_code = verify_code_request.clone();
bad_verify_code.data.token_hash = format!("{}1", verify_code_request.data.token_hash);
assert!(!validation_code_service.verify_validation_code(bad_verify_code.clone()).unwrap().code_valid);
Ok(())
}
| 44.791045 | 118 | 0.767744 |
48d70355ab6661505ee1338f85693155b10f14e6
| 1,507 |
extern crate image;
fn main() {
let width = 800 as u32;
let height = 600 as u32;
let mut pixels = vec![0 as u8; (width * height * 4) as usize];
unsafe {
render(&mut pixels, width as u32, height as u32, 200);
}
image::save_buffer("image.png", &pixels, width, height, image::ColorType::Rgba8).unwrap()
}
const MAX_ITERATIONS: i32 = 60;
const RADIUS: f64 = 0.7885;
pub unsafe fn render<'a>(pixels: &mut [u8], width: u32, height: u32, angle: i32) {
let total = width * height;
let mid_x: f64 = width as f64 / 2.0;
let mid_y: f64 = height as f64 / 2.0;
let a: f64 = (angle as f64 % 360.0).to_radians();
let c_x: f64 = RADIUS * a.cos();
let c_y: f64 = RADIUS * a.sin();
let val: f64 = 255 as f64 / (MAX_ITERATIONS as f64);
for idx in 0..total {
let x = (idx % width) as i32;
let y = (idx / width) as i32;
let mut zx: f64 = (2 * x - width as i32) as f64 / mid_x;
let mut zy: f64 = (2 * y - height as i32) as f64 / mid_y;
let mut i = 0;
while i < MAX_ITERATIONS && (zx * zx + zy * zy) <= 4.0 {
let tmp_zx: f64 = zx * zx - zy * zy + c_x;
zy = 2.0 * zx * zy + c_y;
zx = tmp_zx;
i += 1;
}
let p_idx = 4 * idx as usize;
pixels[p_idx] = (x * 255 / width as i32) as u8;
pixels[p_idx + 1] = (val * i as f64) as u8;
pixels[p_idx + 2] = (y * 255 / height as i32) as u8;
pixels[p_idx + 3] = 255;
}
}
| 30.755102 | 93 | 0.523557 |
010530a6f76ad14d062a88c421d9098a1cff52db
| 1,116 |
use font_kit::family_name::FamilyName;
use font_kit::handle::Handle;
use font_kit::properties::Properties;
use font_kit::source::SystemSource;
pub fn load(name: &str) -> Option<&'static [u8]> {
let mut families = Vec::new();
for family in name.split(',') {
let family = family.replace('\'', "");
let family = family.trim();
families.push(match family {
"serif" => FamilyName::Serif,
"sans-serif" => FamilyName::SansSerif,
"monospace" => FamilyName::Monospace,
"cursive" => FamilyName::Cursive,
"fantasy" => FamilyName::Fantasy,
_ => FamilyName::Title(family.to_string()),
});
}
let properties = Properties::default();
let source = SystemSource::new();
let handle = source.select_best_match(&families, &properties).unwrap();
if let Handle::Path { ref path, .. } = handle {
let contents = std::fs::read(path).unwrap();
let contents = Box::new(contents);
let contents = Box::leak(contents);
Some(contents.as_slice())
} else {
None
}
}
| 31.885714 | 75 | 0.583333 |
116e3547c3b40876671c603b25977ff4cb5d6e68
| 197 |
use dusk_engine::game::{GameWindowBuilder, Game};
fn main() {
let mut game = Game::new(GameWindowBuilder::new()
.size(600, 400)
.title("Some game"));
game.run();
}
| 21.888889 | 54 | 0.568528 |
ef688a6c434d3363e89e5fc841a7fb148c306ad7
| 8,142 |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use move_binary_format::{compatibility::Compatibility, normalized::Module, CompiledModule};
use move_command_line_common::files::{
extension_equals, find_filenames, MOVE_COMPILED_EXTENSION, MOVE_ERROR_DESC_EXTENSION,
};
use move_core_types::language_storage::ModuleId;
use move_lang::compiled_unit::{CompiledUnit, NamedCompiledModule};
use move_package::{BuildConfig, ModelConfig};
use std::{
collections::BTreeMap,
fs::{create_dir_all, remove_dir_all, File},
io::Read,
path::{Path, PathBuf},
};
use structopt::*;
/// Options to configure the generation of a release.
#[derive(Debug, StructOpt, Clone)]
#[structopt(
name = "Diem Frameworks",
about = "Release CLI for Diem frameworks",
author = "The Diem Core Contributors",
rename_all = "kebab-case"
)]
pub struct ReleaseOptions {
#[structopt(long = "no-check-linking-layout-compatibility")]
pub check_layout_compatibility: bool,
#[structopt(long = "no-build-docs")]
pub build_docs: bool,
#[structopt(long = "with-diagram")]
pub with_diagram: bool,
#[structopt(long = "no-script-builder")]
pub script_builder: bool,
#[structopt(long = "no-script-abi")]
pub script_abis: bool,
#[structopt(long = "no-errmap")]
pub errmap: bool,
#[structopt(long = "package", default_value = "DPN", parse(from_os_str))]
pub package: PathBuf,
#[structopt(long = "output", default_value = "current", parse(from_os_str))]
pub output: PathBuf,
}
impl Default for ReleaseOptions {
fn default() -> Self {
Self {
build_docs: true,
package: PathBuf::from("DPN"),
check_layout_compatibility: false,
with_diagram: false,
script_abis: true,
script_builder: true,
errmap: true,
output: PathBuf::from("current"),
}
}
}
impl ReleaseOptions {
pub fn create_release(&self) {
let output_path = self
.package
.join("releases")
.join("artifacts")
.join(&self.output);
let mut old_module_apis = None;
if !self.check_layout_compatibility {
old_module_apis = extract_old_apis(&output_path);
}
if output_path.exists() {
std::fs::remove_dir_all(&output_path).unwrap();
}
std::fs::create_dir_all(output_path.parent().unwrap()).unwrap();
let build_config = move_package::BuildConfig {
generate_docs: !self.build_docs,
generate_abis: !self.script_abis,
install_dir: Some(output_path.clone()),
..Default::default()
};
let package_path = Path::new(std::env!("CARGO_MANIFEST_DIR")).join(&self.package);
let compiled_package = build_config
.clone()
.compile_package(&package_path, &mut std::io::stdout())
.unwrap();
if !self.check_layout_compatibility {
println!("Checking layout compatibility");
if let Some(old_module_apis) = old_module_apis {
let new_modules = compiled_package
.transitive_compiled_units()
.into_iter()
.filter_map(|unit| match unit {
CompiledUnit::Module(NamedCompiledModule { module, .. }) => Some(module),
CompiledUnit::Script(_) => None,
});
check_api_compatibility(&old_module_apis, new_modules);
}
}
if !self.errmap {
println!("Generating error map");
generate_error_map(&package_path, &output_path, build_config)
}
if !self.script_builder {
println!("Generating script builders");
generate_script_builder(
&output_path.join("transaction_script_builder.rs"),
&[&output_path, Path::new("DPN/releases/legacy/script_abis")],
)
}
}
}
fn recreate_dir(dir_path: impl AsRef<Path>) {
let dir_path = dir_path.as_ref();
remove_dir_all(&dir_path).unwrap_or(());
create_dir_all(&dir_path).unwrap();
}
fn generate_error_map(package_path: &Path, output_path: &Path, build_config: BuildConfig) {
let mut errmap_path = output_path
.join("error_description")
.join("error_description");
errmap_path.set_extension(MOVE_ERROR_DESC_EXTENSION);
recreate_dir(&errmap_path.parent().unwrap());
let errmap_options = move_errmapgen::ErrmapOptions {
output_file: errmap_path.to_string_lossy().to_string(),
..Default::default()
};
let model = build_config
.move_model_for_package(
package_path,
ModelConfig {
target_filter: None,
all_files_as_targets: true,
},
)
.unwrap();
let mut emapgen = move_errmapgen::ErrmapGen::new(&model, &errmap_options);
emapgen.gen();
emapgen.save_result();
}
fn generate_script_builder(output_path: impl AsRef<Path>, abi_paths: &[&Path]) {
let output_path = output_path.as_ref();
let abis: Vec<_> = abi_paths
.iter()
.flat_map(|path| {
transaction_builder_generator::read_abis(&[path])
.unwrap_or_else(|_| panic!("Failed to read ABIs at {}", path.to_string_lossy()))
})
.collect();
{
let mut file = std::fs::File::create(output_path)
.expect("Failed to open file for Rust script build generation");
transaction_builder_generator::rust::output(&mut file, &abis, /* local types */ true)
.expect("Failed to generate Rust builders for Diem");
}
std::process::Command::new("rustfmt")
.arg("--config")
.arg("imports_granularity=crate")
.arg(output_path)
.status()
.expect("Failed to run rustfmt on generated code");
}
fn extract_old_apis(package_path: impl AsRef<Path>) -> Option<BTreeMap<ModuleId, Module>> {
let modules_path = package_path.as_ref();
if !modules_path.is_dir() {
eprintln!(
"Warning: failed to extract old module APIs -- path \"{}\" is not a directory",
modules_path.to_string_lossy()
);
return None;
}
let mut old_module_apis = BTreeMap::new();
let files = find_filenames(&[modules_path], |p| {
extension_equals(p, MOVE_COMPILED_EXTENSION)
})
.unwrap();
for f in files {
let mut bytes = Vec::new();
File::open(f)
.expect("Failed to open module bytecode file")
.read_to_end(&mut bytes)
.expect("Failed to read module bytecode file");
let m = CompiledModule::deserialize(&bytes).expect("Failed to deserialize module bytecode");
old_module_apis.insert(m.self_id(), Module::new(&m));
}
Some(old_module_apis)
}
fn check_api_compatibility<I>(old: &BTreeMap<ModuleId, Module>, new: I)
where
I: IntoIterator<Item = CompiledModule>,
{
let mut is_linking_layout_compatible = true;
for module in new.into_iter() {
// extract new linking/layout API and check compatibility with old
let new_module_id = module.self_id();
if let Some(old_api) = old.get(&new_module_id) {
let new_api = Module::new(&module);
let compatibility = Compatibility::check(old_api, &new_api);
if is_linking_layout_compatible && !compatibility.is_fully_compatible() {
println!("Found linking/layout-incompatible change:");
is_linking_layout_compatible = false
}
if !compatibility.struct_and_function_linking {
eprintln!("Linking API for structs/functions of module {} has changed. Need to redeploy all dependent modules.", new_module_id.name())
}
if !compatibility.struct_layout {
eprintln!("Layout API for structs of module {} has changed. Need to do a data migration of published structs", new_module_id.name())
}
}
}
}
| 34.944206 | 150 | 0.614591 |
0a02998e5a6702a77021839d6cd3148fd0371731
| 6,663 |
use std::{convert::TryFrom, sync::Arc};
use futures::FutureExt;
use indoc::indoc;
use serde::{Deserialize, Serialize};
use tower::ServiceBuilder;
use vector_core::config::proxy::ProxyConfig;
use super::{
service::LogApiRetry,
sink::{DatadogLogsJsonEncoding, LogSinkBuilder},
};
use crate::{
config::{AcknowledgementsConfig, GenerateConfig, Input, SinkConfig, SinkContext},
http::HttpClient,
sinks::{
datadog::{get_api_validate_endpoint, healthcheck, logs::service::LogApiService, Region},
util::{
encoding::EncodingConfigFixed, service::ServiceBuilderExt, BatchConfig, Compression,
SinkBatchSettings, TowerRequestConfig,
},
Healthcheck, VectorSink,
},
tls::{MaybeTlsSettings, TlsEnableableConfig},
};
// The Datadog API has a hard limit of 5MB for uncompressed payloads. Above this
// threshold the API will toss results. We previously serialized Events as they
// came in -- a very CPU intensive process -- and to avoid that we only batch up
// to 750KB below the max and then build our payloads. This does mean that in
// some situations we'll kick out over-large payloads -- for instance, a string
// of escaped double-quotes -- but we believe this should be very rare in
// practice.
pub const MAX_PAYLOAD_BYTES: usize = 5_000_000;
pub const BATCH_GOAL_BYTES: usize = 4_250_000;
pub const BATCH_MAX_EVENTS: usize = 1_000;
pub const BATCH_DEFAULT_TIMEOUT_SECS: f64 = 5.0;
#[derive(Clone, Copy, Debug, Default)]
pub struct DatadogLogsDefaultBatchSettings;
impl SinkBatchSettings for DatadogLogsDefaultBatchSettings {
const MAX_EVENTS: Option<usize> = Some(BATCH_MAX_EVENTS);
const MAX_BYTES: Option<usize> = Some(BATCH_GOAL_BYTES);
const TIMEOUT_SECS: f64 = BATCH_DEFAULT_TIMEOUT_SECS;
}
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct DatadogLogsConfig {
pub(crate) endpoint: Option<String>,
// Deprecated, replaced by the site option
pub region: Option<Region>,
pub site: Option<String>,
// Deprecated name
#[serde(alias = "api_key")]
pub default_api_key: String,
#[serde(
skip_serializing_if = "crate::serde::skip_serializing_if_default",
default
)]
pub encoding: EncodingConfigFixed<DatadogLogsJsonEncoding>,
pub tls: Option<TlsEnableableConfig>,
#[serde(default)]
pub compression: Option<Compression>,
#[serde(default)]
pub batch: BatchConfig<DatadogLogsDefaultBatchSettings>,
#[serde(default)]
pub request: TowerRequestConfig,
#[serde(
default,
deserialize_with = "crate::serde::bool_or_struct",
skip_serializing_if = "crate::serde::skip_serializing_if_default"
)]
pub acknowledgements: AcknowledgementsConfig,
}
impl GenerateConfig for DatadogLogsConfig {
fn generate_config() -> toml::Value {
toml::from_str(indoc! {r#"
default_api_key = "${DATADOG_API_KEY_ENV_VAR}"
"#})
.unwrap()
}
}
impl DatadogLogsConfig {
// TODO: We should probably hoist this type of base URI generation so that all DD sinks can
// utilize it, since it all follows the same pattern.
fn get_uri(&self) -> http::Uri {
let endpoint = self
.endpoint
.clone()
.or_else(|| {
self.site
.as_ref()
.map(|s| format!("https://http-intake.logs.{}/api/v2/logs", s))
})
.unwrap_or_else(|| match self.region {
Some(Region::Eu) => "https://http-intake.logs.datadoghq.eu/api/v2/logs".to_string(),
None | Some(Region::Us) => {
"https://http-intake.logs.datadoghq.com/api/v2/logs".to_string()
}
});
http::Uri::try_from(endpoint).expect("URI not valid")
}
}
impl DatadogLogsConfig {
pub fn build_processor(
&self,
client: HttpClient,
cx: SinkContext,
) -> crate::Result<VectorSink> {
let default_api_key: Arc<str> = Arc::from(self.default_api_key.clone().as_str());
let request_limits = self.request.unwrap_with(&Default::default());
// We forcefully cap the provided batch configuration to the size/log line limits imposed by
// the Datadog Logs API, but we still allow them to be lowered if need be.
let batch = self
.batch
.validate()?
.limit_max_bytes(BATCH_GOAL_BYTES)?
.limit_max_events(BATCH_MAX_EVENTS)?
.into_batcher_settings()?;
let service = ServiceBuilder::new()
.settings(request_limits, LogApiRetry)
.service(LogApiService::new(
client,
self.get_uri(),
cx.globals.enterprise,
));
let sink = LogSinkBuilder::new(service, cx, default_api_key, batch)
.encoding(self.encoding.clone())
.compression(self.compression.unwrap_or_default())
.build();
Ok(VectorSink::from_event_streamsink(sink))
}
pub fn build_healthcheck(&self, client: HttpClient) -> crate::Result<Healthcheck> {
let validate_endpoint =
get_api_validate_endpoint(self.endpoint.as_ref(), self.site.as_ref(), self.region)?;
Ok(healthcheck(client, validate_endpoint, self.default_api_key.clone()).boxed())
}
pub fn create_client(&self, proxy: &ProxyConfig) -> crate::Result<HttpClient> {
let tls_settings = MaybeTlsSettings::from_config(
&Some(
self.tls
.clone()
.unwrap_or_else(TlsEnableableConfig::enabled),
),
false,
)?;
Ok(HttpClient::new(tls_settings, proxy)?)
}
}
#[async_trait::async_trait]
#[typetag::serde(name = "datadog_logs")]
impl SinkConfig for DatadogLogsConfig {
async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> {
let client = self.create_client(&cx.proxy)?;
let healthcheck = self.build_healthcheck(client.clone())?;
let sink = self.build_processor(client, cx)?;
Ok((sink, healthcheck))
}
fn input(&self) -> Input {
Input::log()
}
fn sink_type(&self) -> &'static str {
"datadog_logs"
}
fn acknowledgements(&self) -> Option<&AcknowledgementsConfig> {
Some(&self.acknowledgements)
}
}
#[cfg(test)]
mod test {
use crate::sinks::datadog::logs::DatadogLogsConfig;
#[test]
fn generate_config() {
crate::test_util::test_generate_config::<DatadogLogsConfig>();
}
}
| 33.822335 | 100 | 0.639652 |
5d3f22c981717456c1fb5e5118156550022f384a
| 2,676 |
//! MP3 specific items
mod constants;
pub(crate) mod header;
mod properties;
mod read;
pub(crate) mod write;
pub use header::{ChannelMode, Layer, MpegVersion};
pub use properties::Mp3Properties;
#[cfg(feature = "ape")]
use crate::ape::tag::ape_tag::ApeTag;
use crate::error::Result;
#[cfg(feature = "id3v1")]
use crate::id3::v1::tag::Id3v1Tag;
#[cfg(feature = "id3v2")]
use crate::id3::v2::tag::Id3v2Tag;
use crate::tag_utils::tag_methods;
use crate::types::file::{AudioFile, FileType, TaggedFile};
use crate::types::properties::FileProperties;
use crate::types::tag::{Tag, TagType};
use std::io::{Read, Seek};
/// An MP3 file
#[derive(Default)]
pub struct Mp3File {
#[cfg(feature = "id3v2")]
/// An ID3v2 tag
pub(crate) id3v2_tag: Option<Id3v2Tag>,
#[cfg(feature = "id3v1")]
/// An ID3v1 tag
pub(crate) id3v1_tag: Option<Id3v1Tag>,
#[cfg(feature = "ape")]
/// An APEv1/v2 tag
pub(crate) ape_tag: Option<ApeTag>,
/// The file's audio properties
pub(crate) properties: Mp3Properties,
pub(super) first_frame_offset: Option<u64>,
pub(super) last_frame_offset: u64,
}
impl From<Mp3File> for TaggedFile {
#[allow(clippy::vec_init_then_push, unused_mut)]
fn from(input: Mp3File) -> Self {
let mut tags = Vec::<Option<Tag>>::with_capacity(3);
#[cfg(feature = "id3v2")]
tags.push(input.id3v2_tag.map(Into::into));
#[cfg(feature = "id3v1")]
tags.push(input.id3v1_tag.map(Into::into));
#[cfg(feature = "ape")]
tags.push(input.ape_tag.map(Into::into));
Self {
ty: FileType::MP3,
properties: FileProperties::from(input.properties),
tags: tags.into_iter().flatten().collect(),
}
}
}
impl AudioFile for Mp3File {
type Properties = Mp3Properties;
fn read_from<R>(reader: &mut R, read_properties: bool) -> Result<Self>
where
R: Read + Seek,
{
read::read_from(reader, true, read_properties)
}
fn properties(&self) -> &Self::Properties {
&self.properties
}
#[allow(unreachable_code)]
fn contains_tag(&self) -> bool {
#[cfg(feature = "id3v2")]
return self.id3v2_tag.is_some();
#[cfg(feature = "id3v1")]
return self.id3v1_tag.is_some();
#[cfg(feature = "ape")]
return self.ape_tag.is_some();
false
}
fn contains_tag_type(&self, tag_type: &TagType) -> bool {
match tag_type {
#[cfg(feature = "ape")]
TagType::Ape => self.ape_tag.is_some(),
#[cfg(feature = "id3v2")]
TagType::Id3v2 => self.id3v2_tag.is_some(),
#[cfg(feature = "id3v1")]
TagType::Id3v1 => self.id3v1_tag.is_some(),
_ => false,
}
}
}
impl Mp3File {
tag_methods! {
#[cfg(feature = "id3v2")];
id3v2_tag, Id3v2Tag;
#[cfg(feature = "id3v1")];
id3v1_tag, Id3v1Tag;
#[cfg(feature = "ape")];
ape_tag, ApeTag
}
}
| 23.892857 | 71 | 0.666667 |
769d49d8b10975abc3af4e760a8814dba10bb87a
| 916 |
#[test]
fn dijkstra() {
#[allow(dead_code)]
mod example {
include!("../../examples/dijkstra.rs");
pub fn test() {
match run() {
Ok(()) => (),
Err(msg) => panic!("{}", msg),
}
}
}
example::test();
}
#[test]
fn parser() {
#[allow(dead_code)]
mod example {
include!("../../examples/parser.rs");
pub fn test() {
match run() {
Ok(()) => (),
Err(msg) => panic!("{}", msg),
}
}
}
example::test();
}
#[cfg(feature = "gpl")]
#[test]
fn exploration() {
#[allow(dead_code)]
mod example {
include!("../../examples/exploration.rs");
pub fn test() {
match run() {
Ok(()) => (),
Err(msg) => panic!("{}", msg),
}
}
}
example::test();
}
| 17.615385 | 50 | 0.366812 |
d78f01e25bb6dd6b4a2538b12db0c4cccd418fea
| 801 |
// Aldaron's Device Interface / Clock
// Copyright (c) 2017 Jeron Lau <[email protected]>
// Licensed under the MIT LICENSE
//
// src/clock.rs
use std::time::Instant;
/// Clock represents the state of a Real-Time-Clock (RTC). You can use it to
/// make animations, time operations, or determine the date and time (TODO).
pub struct Clock {
time: Instant,
}
impl Clock {
/// Get the current state of the Real-Time-Clock (RTC).
pub fn new() -> Clock {
Clock { time: Instant::now() }
}
/// Get the number of seconds since self was initialized with
/// `Clock::new()`.
pub fn since(&self) -> f32 {
let duration = self.time.elapsed();
let nanos : f32 = duration.subsec_nanos() as f32
/ 1_000_000_000.0;
let secs : f32 = duration.as_secs() as f32;
return secs + nanos;
}
}
| 25.83871 | 77 | 0.669164 |
1474aa4115408e9f55d92352ced0aca567f45195
| 6,390 |
// This file is part of magic-ring-buffer. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/magic-ring-buffer/master/COPYRIGHT. No part of magic-ring-buffer, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of magic-ring-buffer. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/magic-ring-buffer/master/COPYRIGHT.
/// A magic ring buffer (also known as virtual ring buffer, VRB, or a mirrored ring buffer).
#[derive(Debug)]
pub struct MagicRingBuffer
{
writer_offset: CompareExchangeOnlyEverIncreasesMonotonicallyOffset,
unread_offset: CompareExchangeOnlyEverIncreasesMonotonicallyOffset,
read_offset: CompareExchangeOnlyEverIncreasesMonotonicallyOffset,
mirrored_memory_map: MirroredMemoryMap,
}
impl MagicRingBuffer
{
/// Creates a new instance.
///
/// Rounds `preferred_buffer_size` to page size.
#[inline(always)]
pub fn allocate(defaults: &DefaultHugePageSizes, preferred_buffer_size: NonZeroU64, inclusive_maximum_bytes_wasted: u64) -> Result<Self, MirroredMemoryMapCreationError>
{
Ok
(
Self
{
writer_offset: CompareExchangeOnlyEverIncreasesMonotonicallyOffset::default(),
unread_offset: CompareExchangeOnlyEverIncreasesMonotonicallyOffset::default(),
read_offset: CompareExchangeOnlyEverIncreasesMonotonicallyOffset::default(),
mirrored_memory_map: MirroredMemoryMap::new(defaults, preferred_buffer_size, inclusive_maximum_bytes_wasted)?,
}
)
}
/// In a recovery scenario, we can potentially (a) re-read a message and (b) will lose all messages written but not committed between `unread_offset` and `writer_offset`.
#[inline(always)]
pub fn recovery_if_using_persistent_memory(&self)
{
self.writer_offset.set(self.unread_offset.get())
}
/// The logic in `writer` must not panic; if it does, then the entire queue is effectively corrupt and irrecoverable.
#[inline(always)]
pub fn write_some_data(&self, amount_we_want_to_write: usize, writer: impl FnOnce(&mut [u8]))
{
let amount_we_want_to_write = Size::from(amount_we_want_to_write);
debug_assert!(amount_we_want_to_write <= self.unmirrored_buffer_size(), "Can not write amounts large than then ring buffer's size");
// Get a new offset to write to.
let (current_writer_state_write_offset, next_writer_state_write_offset) = self.writer_offset.fetch_add(amount_we_want_to_write);
// We exit this loop when the reader has made enough forward progress to free up space to accommodate our write (and any predecessors on other threads).
let mut current_unread_offset = loop
{
let (current_unread_offset, _current_read_offset, unread) = self.current_unread_offset_and_current_read_offset_and_unread();
// This value decrements or stays the same with every loop iteration; it can never increase.
let total_size_required_for_writes_in_progress = next_writer_state_write_offset - current_unread_offset;
let available_for_writes = self.unmirrored_buffer_size() - unread;
debug_assert!(available_for_writes <= self.unmirrored_buffer_size());
if likely!(available_for_writes >= total_size_required_for_writes_in_progress)
{
break current_unread_offset
}
busy_wait_spin_loop_hint();
};
// Write data.
writer(self.write_to_buffer(current_writer_state_write_offset, amount_we_want_to_write));
// Serialize order of writers so that they only commit their writes in ascending order with no 'holes', ie later before earlier.
loop
{
current_unread_offset = match self.unread_offset.try_to_update(current_unread_offset, current_writer_state_write_offset)
{
Ok(()) => break,
Err(was_reader_state) => was_reader_state,
};
busy_wait_spin_loop_hint();
}
}
/// Read data, assuming a single reader is active.
///
/// This is *NOT* enforced.
///
/// Returns true if there is more data to read.
#[inline(always)]
pub fn single_reader_read_some_data<E, Reader: FnOnce(&mut [u8]) -> (usize, Result<(), E>)>(&self, reader: Reader) -> Result<bool, E>
{
let (_current_unread_offset, current_read_offset, unread) = self.current_unread_offset_and_current_read_offset_and_unread();
let (actually_read, outcome) = reader(self.read_from_buffer(current_read_offset, unread));
let actually_read = Size::from(actually_read);
let updated_read_offset = current_read_offset + actually_read;
self.read_offset.set(updated_read_offset);
match outcome
{
Err(error) => Err(error),
Ok(()) =>
{
let (_current_unread_offset, _current_read_offset, unread) = self.current_unread_offset_and_current_read_offset_and_unread();
Ok(unread != Size::default())
}
}
}
// Multiple readers can be implemented using a mutual exclusion lock.
// But is there the possibility of an `unwriter_offset` - similar to that used to linearize writers - a sort of unwriter_offset
#[inline(always)]
fn unmirrored_buffer_size(&self) -> Size
{
self.mirrored_memory_map.buffer_size()
}
#[inline(always)]
fn current_unread_offset_and_current_read_offset_and_unread(&self) -> (OnlyEverIncreasesMonotonicallyOffset, OnlyEverIncreasesMonotonicallyOffset, Size)
{
let current_unread_offset = self.unread_offset.get();
let current_read_offset = self.read_offset.get();
debug_assert!(current_unread_offset >= current_read_offset);
let unread = current_unread_offset - current_read_offset;
(current_unread_offset, current_read_offset, unread)
}
#[inline(always)]
fn real_pointer(&self, offset: OnlyEverIncreasesMonotonicallyOffset) -> *mut u8
{
self.mirrored_memory_map.pointer(offset)
}
#[inline(always)]
fn write_to_buffer(&self, current_writer_state_write_offset: OnlyEverIncreasesMonotonicallyOffset, amount_we_want_to_write: Size) -> &mut [u8]
{
let write_pointer = self.real_pointer(current_writer_state_write_offset);
unsafe { from_raw_parts_mut(write_pointer, amount_we_want_to_write.into()) }
}
#[inline(always)]
fn read_from_buffer(&self, current_read_offset: OnlyEverIncreasesMonotonicallyOffset, unread: Size) -> &mut [u8]
{
let read_pointer = self.real_pointer(current_read_offset);
unsafe { from_raw_parts_mut(read_pointer, unread.into()) }
}
}
| 41.493506 | 406 | 0.777621 |
90aa029fff66799d4f20e8c9a398210295617d4a
| 1,496 |
use std::cell::RefCell;
use std::rc::Rc;
use protocol::traits::executor::contract::AccountContract;
use protocol::types::{Address, AssetID, Balance, CarryingAsset};
use crate::native_contract::NativeAccountContract;
use crate::tests::{create_state_adapter, mock_invoke_context};
#[test]
fn test_account_contract() {
let state = Rc::new(RefCell::new(create_state_adapter()));
let mut account = NativeAccountContract::new(state);
let asset =
AssetID::from_hex("0000000000000000000000000000000000000000000000000000000000000003")
.unwrap();
let fee_asset =
AssetID::from_hex("0000000000000000000000000000000000000000000000000000000000000004")
.unwrap();
let user1 = Address::from_hex("100000000000000000000000000000000000000001").unwrap();
let user2 = Address::from_hex("100000000000000000000000000000000000000002").unwrap();
account
.add_balance(&asset, &user1, 10000u64.into())
.unwrap();
let carrying_asset = CarryingAsset {
asset_id: asset.clone(),
amount: 1000u64.into(),
};
let ctx = mock_invoke_context(user1.clone(), Some(carrying_asset), 0, 1_000_000, fee_asset);
account.transfer(Rc::clone(&ctx), &user2).unwrap();
let user1_balance = account.get_balance(&asset, &user1).unwrap();
assert_eq!(user1_balance, Balance::from(9000u64));
let user2_balance = account.get_balance(&asset, &user2).unwrap();
assert_eq!(user2_balance, Balance::from(1000u64));
}
| 39.368421 | 96 | 0.71123 |
ff3c4395d1dd096e6b6fd3ffba67278092d79fcd
| 4,393 |
// The From trait is used for value-to-value conversions.
// If From is implemented correctly for a type, the Into trait should work conversely.
// You can read more about it at https://doc.rust-lang.org/std/convert/trait.From.html
#[derive(Debug)]
struct Person {
name: String,
age: usize,
}
// We implement the Default trait to use it as a fallback
// when the provided string is not convertible into a Person object
impl Default for Person {
fn default() -> Person {
Person {
name: String::from("John"),
age: 30,
}
}
}
// Your task is to complete this implementation
// in order for the line `let p = Person::from("Mark,20")` to compile
// Please note that you'll need to parse the age component into a `usize`
// with something like `"4".parse::<usize>()`. The outcome of this needs to
// be handled appropriately.
//
// Steps:
// 1. If the length of the provided string is 0, then return the default of Person
// 2. Split the given string on the commas present in it
// 3. Extract the first element from the split operation and use it as the name
// 4. If the name is empty, then return the default of Person
// 5. Extract the other element from the split operation and parse it into a `usize` as the age
// If while parsing the age, something goes wrong, then return the default of Person
// Otherwise, then return an instantiated Person object with the results
impl From<&str> for Person {
fn from(s: &str) -> Person {
if s.len() == 0 {
return Person::default()
}
let splits: Vec<&str> = s.split(',').collect();
if splits.len() != 2 || splits[0] == "" || splits[1] == "" {
return Person::default()
}
if splits[0].len() == 0 {
return Person::default()
}
match splits[1].parse::<usize>() {
Err(k) => return Person::default(),
Ok(age) => return Person {name: splits[0].to_string(),
age:splits[1].parse::<usize>().unwrap()}
}
}
}
fn main() {
// Use the `from` function
let p1 = Person::from("Mark,20");
// Since From is implemented for Person, we should be able to use Into
let p2: Person = "Gerald,70".into();
println!("{:?}", p1);
println!("{:?}", p2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default() {
// Test that the default person is 30 year old John
let dp = Person::default();
assert_eq!(dp.name, "John");
assert_eq!(dp.age, 30);
}
#[test]
fn test_bad_convert() {
// Test that John is returned when bad string is provided
let p = Person::from("");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_good_convert() {
// Test that "Mark,20" works
let p = Person::from("Mark,20");
assert_eq!(p.name, "Mark");
assert_eq!(p.age, 20);
}
#[test]
fn test_bad_age() {
// Test that "Mark,twenty" will return the default person due to an error in parsing age
let p = Person::from("Mark,twenty");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_comma_and_age() {
let p: Person = Person::from("Mark");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_age() {
let p: Person = Person::from("Mark,");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name() {
let p: Person = Person::from(",1");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name_and_age() {
let p: Person = Person::from(",");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name_and_invalid_age() {
let p: Person = Person::from(",one");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_trailing_comma() {
let p: Person = Person::from("Mike,32,");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_trailing_comma_and_some_string() {
let p: Person = Person::from("Mike,32,man");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
}
| 29.884354 | 96 | 0.572502 |
c1ef732f7fd436addfa8c9e28e8c2b9fae01402f
| 281 |
use std::collections::HashSet;
use attribute::*;
use field_access::*;
#[derive(Debug)]
#[derive(Eq)]
#[derive(PartialEq)]
pub struct Field {
pub access_flags: HashSet<FieldAccess>,
pub name_index: u16,
pub descriptor_index: u16,
pub attributes: Vec<Attribute>,
}
| 18.733333 | 43 | 0.69395 |
1e7602b566261a468314e38d444a0bb3ca5ee24b
| 2,748 |
use crate::{
guild::Permissions,
id::{RoleId, UserId},
};
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct PermissionOverwrite {
pub allow: Permissions,
pub deny: Permissions,
pub kind: PermissionOverwriteType,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum PermissionOverwriteType {
Member(UserId),
Role(RoleId),
}
#[cfg(feature = "serde-support")]
mod serde_support {
use super::{PermissionOverwrite, PermissionOverwriteType, Permissions};
use crate::id::{RoleId, UserId};
use serde::{
de::{Deserialize, Deserializer, Error as DeError},
ser::{Serialize, SerializeStruct, Serializer},
};
#[derive(serde::Deserialize, serde::Serialize)]
struct PermissionOverwriteData {
allow: Permissions,
deny: Permissions,
id: String,
#[serde(rename = "type")]
kind: PermissionOverwriteTypeName,
}
#[derive(serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "snake_case")]
enum PermissionOverwriteTypeName {
Member,
Role,
}
impl<'de> Deserialize<'de> for PermissionOverwrite {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let data = PermissionOverwriteData::deserialize(deserializer)?;
let kind = match data.kind {
PermissionOverwriteTypeName::Member => {
let id = UserId(data.id.parse().map_err(DeError::custom)?);
PermissionOverwriteType::Member(id)
},
PermissionOverwriteTypeName::Role => {
let id = RoleId(data.id.parse().map_err(DeError::custom)?);
PermissionOverwriteType::Role(id)
},
};
Ok(Self {
allow: data.allow,
deny: data.deny,
kind,
})
}
}
impl Serialize for PermissionOverwrite {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("PermissionOverwrite", 4)?;
state.serialize_field("allow", &self.allow.bits())?;
state.serialize_field("deny", &self.deny.bits())?;
match &self.kind {
PermissionOverwriteType::Member(id) => {
state.serialize_field("id", &id)?;
state.serialize_field("type", "member")?;
},
PermissionOverwriteType::Role(id) => {
state.serialize_field("id", &id)?;
state.serialize_field("type", "role")?;
},
}
state.end()
}
}
}
| 30.197802 | 89 | 0.554585 |
d554993b1bcd5ca727667dc2c5c7f891cd4216b1
| 15,795 |
use std::cmp::{max, min};
use glutin::event::ModifiersState;
use alacritty_terminal::grid::BidirectionalIterator;
use alacritty_terminal::index::{Boundary, Direction, Point};
use alacritty_terminal::term::search::{Match, RegexIter, RegexSearch};
use alacritty_terminal::term::{Term, TermMode};
use crate::config::ui_config::{Hint, HintAction};
use crate::config::UiConfig;
use crate::display::content::RegexMatches;
use crate::display::MAX_SEARCH_LINES;
/// Percentage of characters in the hints alphabet used for the last character.
const HINT_SPLIT_PERCENTAGE: f32 = 0.5;
/// Keyboard regex hint state.
pub struct HintState {
/// Hint currently in use.
hint: Option<Hint>,
/// Alphabet for hint labels.
alphabet: String,
/// Visible matches.
matches: RegexMatches,
/// Key label for each visible match.
labels: Vec<Vec<char>>,
/// Keys pressed for hint selection.
keys: Vec<char>,
}
impl HintState {
/// Initialize an inactive hint state.
pub fn new<S: Into<String>>(alphabet: S) -> Self {
Self {
alphabet: alphabet.into(),
hint: Default::default(),
matches: Default::default(),
labels: Default::default(),
keys: Default::default(),
}
}
/// Check if a hint selection is in progress.
pub fn active(&self) -> bool {
self.hint.is_some()
}
/// Start the hint selection process.
pub fn start(&mut self, hint: Hint) {
self.hint = Some(hint);
}
/// Cancel the hint highlighting process.
fn stop(&mut self) {
self.matches.clear();
self.labels.clear();
self.keys.clear();
self.hint = None;
}
/// Update the visible hint matches and key labels.
pub fn update_matches<T>(&mut self, term: &Term<T>) {
let hint = match self.hint.as_mut() {
Some(hint) => hint,
None => return,
};
// Find visible matches.
self.matches.0 = hint.regex.with_compiled(|regex| {
let mut matches = RegexMatches::new(term, regex);
// Apply post-processing and search for sub-matches if necessary.
if hint.post_processing {
matches
.drain(..)
.flat_map(|rm| HintPostProcessor::new(term, regex, rm).collect::<Vec<_>>())
.collect()
} else {
matches.0
}
});
// Cancel highlight with no visible matches.
if self.matches.is_empty() {
self.stop();
return;
}
let mut generator = HintLabels::new(&self.alphabet, HINT_SPLIT_PERCENTAGE);
let match_count = self.matches.len();
let keys_len = self.keys.len();
// Get the label for each match.
self.labels.resize(match_count, Vec::new());
for i in (0..match_count).rev() {
let mut label = generator.next();
if label.len() >= keys_len && label[..keys_len] == self.keys[..] {
self.labels[i] = label.split_off(keys_len);
} else {
self.labels[i] = Vec::new();
}
}
}
/// Handle keyboard input during hint selection.
pub fn keyboard_input<T>(&mut self, term: &Term<T>, c: char) -> Option<HintMatch> {
match c {
// Use backspace to remove the last character pressed.
'\x08' | '\x1f' => {
self.keys.pop();
},
// Cancel hint highlighting on ESC/Ctrl+c.
'\x1b' | '\x03' => self.stop(),
_ => (),
}
// Update the visible matches.
self.update_matches(term);
let hint = self.hint.as_ref()?;
// Find the last label starting with the input character.
let mut labels = self.labels.iter().enumerate().rev();
let (index, label) = labels.find(|(_, label)| !label.is_empty() && label[0] == c)?;
// Check if the selected label is fully matched.
if label.len() == 1 {
let bounds = self.matches[index].clone();
let action = hint.action.clone();
self.stop();
Some(HintMatch { action, bounds })
} else {
// Store character to preserve the selection.
self.keys.push(c);
None
}
}
/// Hint key labels.
pub fn labels(&self) -> &Vec<Vec<char>> {
&self.labels
}
/// Visible hint regex matches.
pub fn matches(&self) -> &RegexMatches {
&self.matches
}
/// Update the alphabet used for hint labels.
pub fn update_alphabet(&mut self, alphabet: &str) {
if self.alphabet != alphabet {
self.alphabet = alphabet.to_owned();
self.keys.clear();
}
}
}
/// Hint match which was selected by the user.
#[derive(PartialEq, Debug, Clone)]
pub struct HintMatch {
/// Action for handling the text.
pub action: HintAction,
/// Terminal range matching the hint.
pub bounds: Match,
}
/// Generator for creating new hint labels.
struct HintLabels {
/// Full character set available.
alphabet: Vec<char>,
/// Alphabet indices for the next label.
indices: Vec<usize>,
/// Point separating the alphabet's head and tail characters.
///
/// To make identification of the tail character easy, part of the alphabet cannot be used for
/// any other position.
///
/// All characters in the alphabet before this index will be used for the last character, while
/// the rest will be used for everything else.
split_point: usize,
}
impl HintLabels {
/// Create a new label generator.
///
/// The `split_ratio` should be a number between 0.0 and 1.0 representing the percentage of
/// elements in the alphabet which are reserved for the tail of the hint label.
fn new(alphabet: impl Into<String>, split_ratio: f32) -> Self {
let alphabet: Vec<char> = alphabet.into().chars().collect();
let split_point = ((alphabet.len() - 1) as f32 * split_ratio.min(1.)) as usize;
Self { indices: vec![0], split_point, alphabet }
}
/// Get the characters for the next label.
fn next(&mut self) -> Vec<char> {
let characters = self.indices.iter().rev().map(|index| self.alphabet[*index]).collect();
self.increment();
characters
}
/// Increment the character sequence.
fn increment(&mut self) {
// Increment the last character; if it's not at the split point we're done.
let tail = &mut self.indices[0];
if *tail < self.split_point {
*tail += 1;
return;
}
*tail = 0;
// Increment all other characters in reverse order.
let alphabet_len = self.alphabet.len();
for index in self.indices.iter_mut().skip(1) {
if *index + 1 == alphabet_len {
// Reset character and move to the next if it's already at the limit.
*index = self.split_point + 1;
} else {
// If the character can be incremented, we're done.
*index += 1;
return;
}
}
// Extend the sequence with another character when nothing could be incremented.
self.indices.push(self.split_point + 1);
}
}
/// Check if there is a hint highlighted at the specified point.
pub fn highlighted_at<T>(
term: &Term<T>,
config: &UiConfig,
point: Point,
mouse_mods: ModifiersState,
) -> Option<HintMatch> {
let mouse_mode = term.mode().intersects(TermMode::MOUSE_MODE);
config.hints.enabled.iter().find_map(|hint| {
// Check if all required modifiers are pressed.
let highlight = hint.mouse.map_or(false, |mouse| {
mouse.enabled
&& mouse_mods.contains(mouse.mods.0)
&& (!mouse_mode || mouse_mods.contains(ModifiersState::SHIFT))
});
if !highlight {
return None;
}
hint.regex.with_compiled(|regex| {
// Setup search boundaries.
let mut start = term.line_search_left(point);
start.line = max(start.line, point.line - MAX_SEARCH_LINES);
let mut end = term.line_search_right(point);
end.line = min(end.line, point.line + MAX_SEARCH_LINES);
// Function to verify that the specified point is inside the match.
let at_point = |rm: &Match| *rm.end() >= point && *rm.start() <= point;
// Check if there's any match at the specified point.
let mut iter = RegexIter::new(start, end, Direction::Right, term, regex);
let regex_match = iter.find(at_point)?;
// Apply post-processing and search for sub-matches if necessary.
let regex_match = if hint.post_processing {
HintPostProcessor::new(term, regex, regex_match).find(at_point)
} else {
Some(regex_match)
};
regex_match.map(|bounds| HintMatch { action: hint.action.clone(), bounds })
})
})
}
/// Iterator over all post-processed matches inside an existing hint match.
struct HintPostProcessor<'a, T> {
/// Regex search DFAs.
regex: &'a RegexSearch,
/// Terminal reference.
term: &'a Term<T>,
/// Next hint match in the iterator.
next_match: Option<Match>,
/// Start point for the next search.
start: Point,
/// End point for the hint match iterator.
end: Point,
}
impl<'a, T> HintPostProcessor<'a, T> {
/// Create a new iterator for an unprocessed match.
fn new(term: &'a Term<T>, regex: &'a RegexSearch, regex_match: Match) -> Self {
let mut post_processor = Self {
next_match: None,
start: *regex_match.start(),
end: *regex_match.end(),
term,
regex,
};
// Post-process the first hint match.
post_processor.next_processed_match(regex_match);
post_processor
}
/// Apply some hint post processing heuristics.
///
/// This will check the end of the hint and make it shorter if certain characters are determined
/// to be unlikely to be intentionally part of the hint.
///
/// This is most useful for identifying URLs appropriately.
fn hint_post_processing(&self, regex_match: &Match) -> Option<Match> {
let mut iter = self.term.grid().iter_from(*regex_match.start());
let mut c = iter.cell().c;
// Truncate uneven number of brackets.
let end = *regex_match.end();
let mut open_parents = 0;
let mut open_brackets = 0;
loop {
match c {
'(' => open_parents += 1,
'[' => open_brackets += 1,
')' => {
if open_parents == 0 {
iter.prev();
break;
} else {
open_parents -= 1;
}
},
']' => {
if open_brackets == 0 {
iter.prev();
break;
} else {
open_brackets -= 1;
}
},
_ => (),
}
if iter.point() == end {
break;
}
match iter.next() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
// Truncate trailing characters which are likely to be delimiters.
let start = *regex_match.start();
while iter.point() != start {
if !matches!(c, '.' | ',' | ':' | ';' | '?' | '!' | '(' | '[' | '\'') {
break;
}
match iter.prev() {
Some(indexed) => c = indexed.cell.c,
None => break,
}
}
if start > iter.point() {
None
} else {
Some(start..=iter.point())
}
}
/// Loop over submatches until a non-empty post-processed match is found.
fn next_processed_match(&mut self, mut regex_match: Match) {
self.next_match = loop {
if let Some(next_match) = self.hint_post_processing(®ex_match) {
self.start = next_match.end().add(self.term, Boundary::Grid, 1);
break Some(next_match);
}
self.start = regex_match.start().add(self.term, Boundary::Grid, 1);
if self.start > self.end {
return;
}
match self.term.regex_search_right(self.regex, self.start, self.end) {
Some(rm) => regex_match = rm,
None => return,
}
};
}
}
impl<'a, T> Iterator for HintPostProcessor<'a, T> {
type Item = Match;
fn next(&mut self) -> Option<Self::Item> {
let next_match = self.next_match.take()?;
if self.start <= self.end {
if let Some(rm) = self.term.regex_search_right(self.regex, self.start, self.end) {
self.next_processed_match(rm);
}
}
Some(next_match)
}
}
#[cfg(test)]
mod tests {
use alacritty_terminal::index::{Column, Line};
use alacritty_terminal::term::test::mock_term;
use super::*;
#[test]
fn hint_label_generation() {
let mut generator = HintLabels::new("0123", 0.5);
assert_eq!(generator.next(), vec!['0']);
assert_eq!(generator.next(), vec!['1']);
assert_eq!(generator.next(), vec!['2', '0']);
assert_eq!(generator.next(), vec!['2', '1']);
assert_eq!(generator.next(), vec!['3', '0']);
assert_eq!(generator.next(), vec!['3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '1']);
assert_eq!(generator.next(), vec!['2', '2', '2', '0']);
assert_eq!(generator.next(), vec!['2', '2', '2', '1']);
assert_eq!(generator.next(), vec!['2', '2', '3', '0']);
assert_eq!(generator.next(), vec!['2', '2', '3', '1']);
assert_eq!(generator.next(), vec!['2', '3', '2', '0']);
assert_eq!(generator.next(), vec!['2', '3', '2', '1']);
assert_eq!(generator.next(), vec!['2', '3', '3', '0']);
assert_eq!(generator.next(), vec!['2', '3', '3', '1']);
assert_eq!(generator.next(), vec!['3', '2', '2', '0']);
assert_eq!(generator.next(), vec!['3', '2', '2', '1']);
assert_eq!(generator.next(), vec!['3', '2', '3', '0']);
assert_eq!(generator.next(), vec!['3', '2', '3', '1']);
assert_eq!(generator.next(), vec!['3', '3', '2', '0']);
assert_eq!(generator.next(), vec!['3', '3', '2', '1']);
assert_eq!(generator.next(), vec!['3', '3', '3', '0']);
assert_eq!(generator.next(), vec!['3', '3', '3', '1']);
}
#[test]
fn closed_bracket_does_not_result_in_infinite_iterator() {
let term = mock_term(" ) ");
let search = RegexSearch::new("[^/ ]").unwrap();
let count = HintPostProcessor::new(
&term,
&search,
Point::new(Line(0), Column(1))..=Point::new(Line(0), Column(1)),
)
.take(1)
.count();
assert_eq!(count, 0);
}
}
| 32.234694 | 100 | 0.535486 |
875c01c9fb321890f18b88397780c389f5970a3e
| 2,502 |
//--------------------------------------------------------------------
// vec2.rs
//--------------------------------------------------------------------
// Provides a two-element vector class
//--------------------------------------------------------------------
use crate::derive_more::*;
use super::*;
use std::cmp::Ordering;
// Vec2
#[derive(Copy, Clone, Add, Sub, Mul, Div, AddAssign, SubAssign, Neg, PartialEq,
MulAssign, DivAssign, From, Into, Display, Constructor)]
#[display(fmt = "({},{})", x, y)]
pub struct Vec2 { pub x: Coord, pub y: Coord }
impl Vec2 {
pub fn zero() -> Vec2 { Vec2 { x: 0.0, y: 0.0 } }
pub fn from_angle(angle: Coord) -> Vec2 { Vec2::new(angle.cos(), angle.sin()) }
pub fn dot(&self, other: Vec2) -> Coord { self.x * other.x + self.y * other.y }
pub fn cross(&self, other: Vec2) -> Coord { self.x * other.y - self.y * other.x }
pub fn length_sq(&self) -> Coord { self.dot(*self) }
pub fn length(&self) -> Coord { self.length_sq().sqrt() }
pub fn normalized(&self) -> Vec2 { *self / self.length() }
pub fn rot_scale(&self, other: Vec2) -> Vec2 {
let x = self.x * other.x - self.y * other.y;
let y = self.x * other.y + self.y * other.x;
Vec2 { x, y }
}
pub fn rotate(&self, other: Vec2) -> Vec2 { self.rot_scale(other.normalized()) }
pub fn rotate_by_angle(&self, angle: Coord) -> Vec2 { self.rot_scale(Vec2::from_angle(angle)) }
pub fn ccw_perpendicular(&self) -> Vec2 { Vec2 { x: -self.y, y: self.x } }
pub fn cw_perpendicular(&self) -> Vec2 { -self.ccw_perpendicular() }
pub fn angle(&self) -> Coord { self.y.atan2(self.x) }
pub fn angle_facing(&self, other: Vec2) -> Coord { (other - *self).angle() }
pub fn angle_between(&self, other: Vec2) -> Coord { self.cross(other).atan2(self.dot(other)) }
pub fn roughly_zero(&self) -> bool { self.length_sq().roughly_zero_squared() }
pub fn roughly_equals(&self, other: Vec2) -> bool { (*self - other).length_sq().roughly_zero_squared() }
}
// Implement scalar * mul as required
impl core::ops::Mul<Vec2> for Coord {
type Output = Vec2;
fn mul(self, rhs: Vec2) -> Vec2 { Vec2::new(self * rhs.x, self * rhs.y) }
}
pub fn canonical(a: &Vec2, b: &Vec2) -> Ordering {
if a.y == b.y { b.x.partial_cmp(&a.x).unwrap() }
else { a.y.partial_cmp(&b.y).unwrap() }
}
impl std::fmt::Debug for Vec2 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self)
}
}
| 38.492308 | 109 | 0.550759 |
8fd15622fdd8b2ef9bc0e4a6f9e93f1c79dc731e
| 6,485 |
use crate::access_control;
use serum_common::pack::Pack;
use serum_lockup::accounts::{Safe, TokenVault, Vesting};
use serum_lockup::error::{LockupError, LockupErrorCode};
use solana_sdk::account_info::{next_account_info, AccountInfo};
use solana_sdk::info;
use solana_sdk::pubkey::Pubkey;
use std::convert::Into;
pub fn handler<'a>(
program_id: &'a Pubkey,
accounts: &'a [AccountInfo<'a>],
amount: u64,
) -> Result<(), LockupError> {
info!("handler: redeem");
let acc_infos = &mut accounts.iter();
let vesting_acc_beneficiary_info = next_account_info(acc_infos)?;
let vesting_acc_info = next_account_info(acc_infos)?;
let beneficiary_token_acc_info = next_account_info(acc_infos)?;
let safe_vault_acc_info = next_account_info(acc_infos)?;
let safe_vault_authority_acc_info = next_account_info(acc_infos)?;
let safe_acc_info = next_account_info(acc_infos)?;
let nft_token_acc_info = next_account_info(acc_infos)?;
let nft_mint_acc_info = next_account_info(acc_infos)?;
let token_program_acc_info = next_account_info(acc_infos)?;
let clock_acc_info = next_account_info(acc_infos)?;
access_control(AccessControlRequest {
program_id,
amount,
vesting_acc_beneficiary_info,
vesting_acc_info,
safe_vault_acc_info,
safe_vault_authority_acc_info,
safe_acc_info,
nft_token_acc_info,
nft_mint_acc_info,
clock_acc_info,
})?;
Vesting::unpack_mut(
&mut vesting_acc_info.try_borrow_mut_data()?,
&mut |vesting_acc: &mut Vesting| {
state_transition(StateTransitionRequest {
amount,
vesting_acc,
accounts,
safe_vault_acc_info,
safe_vault_authority_acc_info,
beneficiary_token_acc_info,
safe_acc_info,
token_program_acc_info,
nft_token_acc_info,
nft_mint_acc_info,
})
.map_err(Into::into)
},
)
.map_err(|e| LockupError::ProgramError(e))
}
fn access_control<'a>(req: AccessControlRequest<'a>) -> Result<(), LockupError> {
info!("access-control: redeem");
let AccessControlRequest {
program_id,
amount,
vesting_acc_beneficiary_info,
vesting_acc_info,
safe_vault_acc_info,
safe_vault_authority_acc_info,
safe_acc_info,
nft_token_acc_info,
nft_mint_acc_info,
clock_acc_info,
} = req;
// Beneficiary authorization.
if !vesting_acc_beneficiary_info.is_signer {
return Err(LockupErrorCode::Unauthorized)?;
}
// Account validation.
let _ = access_control::safe(safe_acc_info, program_id)?;
let _ = access_control::vault(
safe_vault_acc_info,
safe_vault_authority_acc_info,
safe_acc_info,
program_id,
)?;
let vesting = access_control::vesting(
program_id,
safe_acc_info.key,
vesting_acc_info,
vesting_acc_beneficiary_info,
)?;
let _ = access_control::locked_token(
nft_token_acc_info,
nft_mint_acc_info,
safe_vault_authority_acc_info.key,
&vesting,
)?;
// Redemption checks.
{
let clock = access_control::clock(clock_acc_info)?;
if !vesting.claimed {
return Err(LockupErrorCode::NotYetClaimed)?;
}
if amount > vesting.available_for_withdrawal(clock.slot) {
return Err(LockupErrorCode::InsufficientWithdrawalBalance)?;
}
}
info!("access-control: success");
Ok(())
}
fn state_transition<'a, 'b>(req: StateTransitionRequest<'a, 'b>) -> Result<(), LockupError> {
info!("state-transition: redeem");
let StateTransitionRequest {
vesting_acc,
amount,
accounts,
safe_vault_acc_info,
safe_vault_authority_acc_info,
beneficiary_token_acc_info,
safe_acc_info,
token_program_acc_info,
nft_token_acc_info,
nft_mint_acc_info,
} = req;
// Remove the withdrawn token from the vesting account.
{
vesting_acc.deduct(amount);
}
// Burn the NFT.
{
info!("burning token receipts");
let burn_instruction = spl_token::instruction::burn(
&spl_token::ID,
nft_token_acc_info.key,
nft_mint_acc_info.key,
&vesting_acc.beneficiary,
&[],
amount,
)?;
solana_sdk::program::invoke_signed(&burn_instruction, &accounts[..], &[])?;
}
// Transfer token from the vault to the user address.
{
info!("invoking token transfer");
let withdraw_instruction = spl_token::instruction::transfer(
&spl_token::ID,
safe_vault_acc_info.key,
beneficiary_token_acc_info.key,
&safe_vault_authority_acc_info.key,
&[],
amount,
)?;
let safe = Safe::unpack(&safe_acc_info.try_borrow_data()?)?;
let signer_seeds = TokenVault::signer_seeds(safe_acc_info.key, &safe.nonce);
solana_sdk::program::invoke_signed(
&withdraw_instruction,
&[
safe_vault_acc_info.clone(),
beneficiary_token_acc_info.clone(),
safe_vault_authority_acc_info.clone(),
token_program_acc_info.clone(),
],
&[&signer_seeds],
)?;
}
info!("state-transition: success");
Ok(())
}
struct AccessControlRequest<'a> {
program_id: &'a Pubkey,
amount: u64,
vesting_acc_beneficiary_info: &'a AccountInfo<'a>,
vesting_acc_info: &'a AccountInfo<'a>,
safe_acc_info: &'a AccountInfo<'a>,
safe_vault_acc_info: &'a AccountInfo<'a>,
safe_vault_authority_acc_info: &'a AccountInfo<'a>,
nft_token_acc_info: &'a AccountInfo<'a>,
nft_mint_acc_info: &'a AccountInfo<'a>,
clock_acc_info: &'a AccountInfo<'a>,
}
struct StateTransitionRequest<'a, 'b> {
amount: u64,
vesting_acc: &'b mut Vesting,
accounts: &'a [AccountInfo<'a>],
safe_acc_info: &'a AccountInfo<'a>,
beneficiary_token_acc_info: &'a AccountInfo<'a>,
safe_vault_acc_info: &'a AccountInfo<'a>,
safe_vault_authority_acc_info: &'a AccountInfo<'a>,
token_program_acc_info: &'a AccountInfo<'a>,
nft_token_acc_info: &'a AccountInfo<'a>,
nft_mint_acc_info: &'a AccountInfo<'a>,
}
| 30.162791 | 93 | 0.631149 |
9b943eccf30d9eb992e8be15475e5c3a256c8499
| 525 |
pub mod fast_conventional_config;
mod conventional_commit;
pub use conventional_commit::body::Body as ConventionalBody;
pub use conventional_commit::change::Change as ConventionalChange;
pub use conventional_commit::commit::Commit as ConventionalCommit;
pub use conventional_commit::scope::Scope as ConventionalScope;
pub use conventional_commit::subject::Subject as ConventionalSubject;
pub use git::revision_selection::RevisionSelection as GitRevisionSelection;
pub use git::short_ref::ShortRef as GitShortRef;
mod git;
| 37.5 | 75 | 0.841905 |
3a8e468856c566fb784cc934f309c792b0144cb8
| 22,391 |
use super::EnumKind;
use rustc_hash::FxHashMap;
use swc_atoms::js_word;
use swc_common::{util::move_map::MoveMap, Spanned, DUMMY_SP};
use swc_ecma_ast::*;
use swc_ecma_utils::{ident::IdentLike, member_expr, quote_ident, undefined, ExprFactory, Id};
use swc_ecma_visit::{noop_fold_type, Fold, FoldWith};
/// https://github.com/leonardfactory/babel-plugin-transform-typescript-metadata/blob/master/src/parameter/parameterVisitor.ts
pub(super) struct ParamMetadata;
impl Fold for ParamMetadata {
noop_fold_type!();
fn fold_class(&mut self, mut cls: Class) -> Class {
cls = cls.fold_children_with(self);
let mut decorators = cls.decorators;
cls.body = cls.body.move_map(|m| match m {
ClassMember::Constructor(mut c) => {
for (idx, param) in c.params.iter_mut().enumerate() {
//
match param {
ParamOrTsParamProp::TsParamProp(p) => {
for decorator in p.decorators.drain(..) {
let new_dec =
self.create_param_decorator(idx, decorator.expr, true);
decorators.push(new_dec);
}
}
ParamOrTsParamProp::Param(param) => {
for decorator in param.decorators.drain(..) {
let new_dec =
self.create_param_decorator(idx, decorator.expr, true);
decorators.push(new_dec);
}
}
}
}
ClassMember::Constructor(c)
}
_ => m,
});
cls.decorators = decorators;
cls
}
fn fold_class_method(&mut self, mut m: ClassMethod) -> ClassMethod {
for (idx, param) in m.function.params.iter_mut().enumerate() {
for decorator in param.decorators.drain(..) {
let new_dec = self.create_param_decorator(idx, decorator.expr, false);
m.function.decorators.push(new_dec);
}
}
m
}
}
impl ParamMetadata {
fn create_param_decorator(
&self,
param_index: usize,
decorator_expr: Box<Expr>,
is_constructor: bool,
) -> Decorator {
Decorator {
span: DUMMY_SP,
expr: Box::new(Expr::Fn(FnExpr {
ident: None,
function: Function {
params: vec![
Param {
span: DUMMY_SP,
decorators: Default::default(),
pat: Pat::Ident(quote_ident!("target").into()),
},
Param {
span: DUMMY_SP,
decorators: Default::default(),
pat: Pat::Ident(quote_ident!("key").into()),
},
],
body: Some(BlockStmt {
span: DUMMY_SP,
stmts: vec![Stmt::Return(ReturnStmt {
span: DUMMY_SP,
arg: Some(Box::new(Expr::Call(CallExpr {
span: DUMMY_SP,
callee: decorator_expr.as_callee(),
args: vec![
quote_ident!("target").as_arg(),
if is_constructor {
quote_ident!("undefined").as_arg()
} else {
quote_ident!("key").as_arg()
},
Lit::Num(Number {
span: DUMMY_SP,
value: param_index as _,
})
.as_arg(),
],
type_args: Default::default(),
}))),
})],
}),
decorators: Default::default(),
span: Default::default(),
is_generator: Default::default(),
is_async: Default::default(),
type_params: Default::default(),
return_type: Default::default(),
},
})),
}
}
}
/// https://github.com/leonardfactory/babel-plugin-transform-typescript-metadata/blob/master/src/metadata/metadataVisitor.ts
pub(super) struct Metadata<'a> {
pub(super) enums: &'a FxHashMap<Id, EnumKind>,
pub(super) class_name: Option<&'a Ident>,
}
impl Fold for Metadata<'_> {
noop_fold_type!();
fn fold_class(&mut self, mut c: Class) -> Class {
c = c.fold_children_with(self);
if c.decorators.is_empty() {
return c;
}
let constructor = c.body.iter().find_map(|m| match m {
ClassMember::Constructor(c) => Some(c),
_ => None,
});
if constructor.is_none() {
return c;
}
{
let dec = self
.create_metadata_design_decorator("design:type", quote_ident!("Function").as_arg());
c.decorators.push(dec);
}
{
let dec = self.create_metadata_design_decorator(
"design:paramtypes",
ArrayLit {
span: DUMMY_SP,
elems: constructor
.as_ref()
.unwrap()
.params
.iter()
.map(|v| match v {
ParamOrTsParamProp::TsParamProp(p) => {
let ann = match &p.param {
TsParamPropParam::Ident(i) => i.type_ann.as_ref(),
TsParamPropParam::Assign(a) => get_type_ann_of_pat(&a.left),
};
Some(serialize_type(self.class_name, ann).as_arg())
}
ParamOrTsParamProp::Param(p) => Some(
serialize_type(self.class_name, get_type_ann_of_pat(&p.pat))
.as_arg(),
),
})
.collect(),
}
.as_arg(),
);
c.decorators.push(dec);
}
c
}
fn fold_class_method(&mut self, mut m: ClassMethod) -> ClassMethod {
if m.function.decorators.is_empty() {
return m;
}
{
let dec = self
.create_metadata_design_decorator("design:type", quote_ident!("Function").as_arg());
m.function.decorators.push(dec);
}
{
let dec = self.create_metadata_design_decorator(
"design:paramtypes",
ArrayLit {
span: DUMMY_SP,
elems: m
.function
.params
.iter()
.map(|v| {
Some(
serialize_type(self.class_name, get_type_ann_of_pat(&v.pat))
.as_arg(),
)
})
.collect(),
}
.as_arg(),
);
m.function.decorators.push(dec);
}
m
}
fn fold_class_prop(&mut self, mut p: ClassProp) -> ClassProp {
if p.decorators.is_empty() {
return p;
}
if p.type_ann.is_none() {
return p;
}
if let Some(name) = p
.type_ann
.as_ref()
.map(|ty| &ty.type_ann)
.map(|type_ann| match &**type_ann {
TsType::TsTypeRef(r) => Some(r),
_ => None,
})
.flatten()
.map(|r| match &r.type_name {
TsEntityName::TsQualifiedName(_) => None,
TsEntityName::Ident(i) => Some(i),
})
.flatten()
{
if let Some(kind) = self.enums.get(&name.to_id()) {
let dec = self.create_metadata_design_decorator(
"design:type",
match kind {
EnumKind::Mixed => quote_ident!("Object").as_arg(),
EnumKind::Str => quote_ident!("String").as_arg(),
EnumKind::Num => quote_ident!("Number").as_arg(),
},
);
p.decorators.push(dec);
return p;
}
}
let dec = self.create_metadata_design_decorator(
"design:type",
serialize_type(self.class_name, p.type_ann.as_ref()).as_arg(),
);
p.decorators.push(dec);
p
}
}
impl Metadata<'_> {
fn create_metadata_design_decorator(&self, design: &str, type_arg: ExprOrSpread) -> Decorator {
Decorator {
span: DUMMY_SP,
expr: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Unary(UnaryExpr {
span: DUMMY_SP,
op: op!("typeof"),
arg: Box::new(Expr::Ident(quote_ident!("Reflect"))),
})),
op: op!("!=="),
right: Box::new(Expr::Lit(Lit::Str(Str {
span: DUMMY_SP,
value: "undefined".into(),
has_escape: false,
kind: Default::default(),
}))),
})),
op: op!("&&"),
right: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Unary(UnaryExpr {
span: DUMMY_SP,
op: op!("typeof"),
arg: member_expr!(DUMMY_SP, Reflect.metadata),
})),
op: op!("==="),
right: Box::new(Expr::Lit(Lit::Str(Str {
span: DUMMY_SP,
value: "function".into(),
has_escape: false,
kind: Default::default(),
}))),
})),
})),
op: op!("&&"),
right: Box::new(Expr::Call(CallExpr {
span: DUMMY_SP,
callee: member_expr!(DUMMY_SP, Reflect.metadata).as_callee(),
args: vec![
Str {
span: DUMMY_SP,
value: design.into(),
has_escape: false,
kind: Default::default(),
}
.as_arg(),
type_arg,
],
type_args: Default::default(),
})),
})),
}
}
}
fn serialize_type(class_name: Option<&Ident>, param: Option<&TsTypeAnn>) -> Expr {
fn serialize_type_ref(class_name: &str, ty: &TsTypeRef) -> Expr {
match &ty.type_name {
// We should omit references to self (class) since it will throw a ReferenceError at
// runtime due to babel transpile output.
TsEntityName::Ident(i) if &*i.sym == class_name => {
return quote_ident!("Object").into()
}
_ => {}
}
let member_expr = ts_entity_to_member_expr(&ty.type_name);
fn check_object_existed(expr: Box<Expr>) -> Box<Expr> {
match *expr {
Expr::Member(ref member_expr) => {
let obj_expr = match member_expr.obj {
ExprOrSuper::Expr(ref exp) => exp.clone(),
ExprOrSuper::Super(_) => panic!("Unreachable code path"),
};
Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: check_object_existed(obj_expr),
op: op!("||"),
right: Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Unary(UnaryExpr {
span: DUMMY_SP,
op: op!("typeof"),
arg: expr.clone(),
})),
op: op!("==="),
right: Box::new(Expr::Lit(Lit::Str(Str {
span: DUMMY_SP,
value: "undefined".into(),
has_escape: false,
kind: Default::default(),
}))),
})),
}))
}
_ => Box::new(Expr::Bin(BinExpr {
span: DUMMY_SP,
left: Box::new(Expr::Unary(UnaryExpr {
span: DUMMY_SP,
op: op!("typeof"),
arg: expr.clone(),
})),
op: op!("==="),
right: Box::new(Expr::Lit(Lit::Str(Str {
span: DUMMY_SP,
value: "undefined".into(),
has_escape: false,
kind: Default::default(),
}))),
})),
}
}
// We don't know if type is just a type (interface, etc.) or a concrete value
// (class, etc.)
//
// `typeof` operator allows us to use the expression even if it is not defined,
// fallback is just `Object`.
Expr::Cond(CondExpr {
span: DUMMY_SP,
test: check_object_existed(Box::new(member_expr.clone())),
cons: Box::new(quote_ident!("Object").into()),
alt: Box::new(member_expr),
})
}
fn serialize_type_list(class_name: &str, types: &[Box<TsType>]) -> Expr {
let mut u = None;
for ty in types {
// Skip parens if need be
let ty = match &**ty {
TsType::TsParenthesizedType(ty) => &ty.type_ann,
_ => ty,
};
match &**ty {
// Always elide `never` from the union/intersection if possible
TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsNeverKeyword,
..
}) => {
continue;
}
// Elide null and undefined from unions for metadata, just like what we did prior to
// the implementation of strict null checks
TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsNullKeyword,
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsUndefinedKeyword,
..
}) => {
continue;
}
_ => {}
}
let item = serialize_type_node(class_name, &ty);
// One of the individual is global object, return immediately
match item {
Expr::Ident(Ident {
sym: js_word!("Object"),
..
}) => return item,
_ => {}
}
// If there exists union that is not void 0 expression, check if the
// the common type is identifier. anything more complex
// and we will just default to Object
//
match &u {
None => {
u = Some(item);
}
Some(prev) => {
// Check for different types
match prev {
Expr::Ident(prev) => match &item {
Expr::Ident(item) if prev.sym == item.sym => {}
_ => return quote_ident!("Object").into(),
},
_ => return quote_ident!("Object").into(),
}
}
}
}
*undefined(DUMMY_SP)
}
fn serialize_type_node(class_name: &str, ty: &TsType) -> Expr {
let span = ty.span();
match ty {
TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsVoidKeyword,
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsUndefinedKeyword,
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsNullKeyword,
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsNeverKeyword,
..
}) => return *undefined(span),
TsType::TsParenthesizedType(ty) => serialize_type_node(class_name, &*ty.type_ann),
TsType::TsFnOrConstructorType(_) => quote_ident!("Function").into(),
TsType::TsArrayType(_) | TsType::TsTupleType(_) => quote_ident!("Array").into(),
TsType::TsLitType(TsLitType {
lit: TsLit::Bool(..),
..
})
| TsType::TsTypePredicate(_)
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsBooleanKeyword,
..
}) => quote_ident!("Boolean").into(),
ty if is_str(ty) => quote_ident!("String").into(),
TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsObjectKeyword,
..
}) => quote_ident!("Object").into(),
TsType::TsLitType(TsLitType {
lit: TsLit::Number(..),
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsNumberKeyword,
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsBigIntKeyword,
..
}) => quote_ident!("Number").into(),
TsType::TsLitType(ty) => {
// TODO: Proper error reporting
panic!("Bad type for decoration: {:?}", ty);
}
TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsSymbolKeyword,
..
}) => quote_ident!("Symbol").into(),
TsType::TsTypeQuery(_)
| TsType::TsTypeOperator(_)
| TsType::TsIndexedAccessType(_)
| TsType::TsTypeLit(_)
| TsType::TsMappedType(_)
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsAnyKeyword,
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsUnknownKeyword,
..
})
| TsType::TsThisType(..) => quote_ident!("Object").into(),
TsType::TsUnionOrIntersectionType(ty) => match ty {
TsUnionOrIntersectionType::TsUnionType(ty) => {
serialize_type_list(class_name, &ty.types)
}
TsUnionOrIntersectionType::TsIntersectionType(ty) => {
serialize_type_list(class_name, &ty.types)
}
},
TsType::TsConditionalType(ty) => {
serialize_type_list(class_name, &[ty.true_type.clone(), ty.false_type.clone()])
}
TsType::TsTypeRef(ty) => serialize_type_ref(class_name, ty),
_ => panic!("Bad type for decorator: {:?}", ty),
}
}
let param = match param {
Some(v) => &v.type_ann,
None => return *undefined(DUMMY_SP),
};
serialize_type_node(class_name.map(|v| &*v.sym).unwrap_or(""), &**param)
}
fn ts_entity_to_member_expr(type_name: &TsEntityName) -> Expr {
match type_name {
TsEntityName::TsQualifiedName(q) => {
let obj = ts_entity_to_member_expr(&q.left);
Expr::Member(MemberExpr {
span: DUMMY_SP,
obj: obj.as_obj(),
prop: Box::new(Expr::Ident(q.right.clone())),
computed: false,
})
}
TsEntityName::Ident(i) => Expr::Ident(i.clone()),
}
}
fn get_type_ann_of_pat(p: &Pat) -> Option<&TsTypeAnn> {
match p {
Pat::Ident(p) => &p.type_ann,
Pat::Array(p) => &p.type_ann,
Pat::Rest(p) => &p.type_ann,
Pat::Object(p) => &p.type_ann,
Pat::Assign(p) => &p.type_ann,
Pat::Invalid(_) => return None,
Pat::Expr(_) => return None,
}
.as_ref()
}
fn is_str(ty: &TsType) -> bool {
match ty {
TsType::TsLitType(TsLitType {
lit: TsLit::Str(..),
..
})
| TsType::TsKeywordType(TsKeywordType {
kind: TsKeywordTypeKind::TsStringKeyword,
..
}) => true,
TsType::TsUnionOrIntersectionType(TsUnionOrIntersectionType::TsUnionType(u)) => {
u.types.iter().all(|ty| is_str(ty))
}
_ => false,
}
}
| 35.597774 | 126 | 0.420794 |
ef5f538ed9deace6149fffbdf0b6c6c1fec22102
| 3,443 |
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Ping example
//!
//! See ../src/tutorial.rs for a step-by-step guide building the example below.
//!
//! In the first terminal window, run:
//!
//! ```sh
//! cargo run --example ping
//! ```
//!
//! It will print the PeerId and the listening addresses, e.g. `Listening on
//! "/ip4/0.0.0.0/tcp/24915"`
//!
//! In the second terminal window, start a new instance of the example with:
//!
//! ```sh
//! cargo run --example ping -- /ip4/127.0.0.1/tcp/24915
//! ```
//!
//! The two nodes establish a connection, negotiate the ping protocol
//! and begin pinging each other.
use futures::executor::block_on;
use futures::prelude::*;
use libp2p::swarm::{Swarm, SwarmEvent};
use libp2p::{identity, ping, PeerId};
use std::error::Error;
use std::task::Poll;
fn main() -> Result<(), Box<dyn Error>> {
let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {:?}", local_peer_id);
let transport = block_on(libp2p::development_transport(local_key))?;
// Create a ping network behaviour.
//
// For illustrative purposes, the ping protocol is configured to
// keep the connection alive, so a continuous sequence of pings
// can be observed.
let behaviour = ping::Behaviour::new(ping::Config::new().with_keep_alive(true));
let mut swarm = Swarm::new(transport, behaviour, local_peer_id);
// Tell the swarm to listen on all interfaces and a random, OS-assigned
// port.
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
// Dial the peer identified by the multi-address given as the second
// command-line argument, if any.
if let Some(addr) = std::env::args().nth(1) {
let remote = addr.parse()?;
swarm.dial_addr(remote)?;
println!("Dialed {}", addr)
}
block_on(future::poll_fn(move |cx| loop {
match swarm.poll_next_unpin(cx) {
Poll::Ready(Some(event)) => match event {
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {:?}", address),
SwarmEvent::Behaviour(event) => println!("{:?}", event),
_ => {}
},
Poll::Ready(None) => return Poll::Ready(()),
Poll::Pending => return Poll::Pending,
}
}));
Ok(())
}
| 37.423913 | 100 | 0.663956 |
dd69fd32cc835b02b8d7cc8a9c53342a84c2c91b
| 1,847 |
#[doc = "Register `threshold_0` reader"]
pub struct R(crate::R<THRESHOLD_0_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<THRESHOLD_0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<THRESHOLD_0_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<THRESHOLD_0_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `threshold_0` writer"]
pub struct W(crate::W<THRESHOLD_0_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<THRESHOLD_0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<THRESHOLD_0_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<THRESHOLD_0_SPEC>) -> Self {
W(writer)
}
}
impl W {
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "PRIORITY THRESHOLD Register for hart 0\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [threshold_0](index.html) module"]
pub struct THRESHOLD_0_SPEC;
impl crate::RegisterSpec for THRESHOLD_0_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [threshold_0::R](R) reader structure"]
impl crate::Readable for THRESHOLD_0_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [threshold_0::W](W) writer structure"]
impl crate::Writable for THRESHOLD_0_SPEC {
type Writer = W;
}
| 31.844828 | 352 | 0.637791 |
f806f7a79d0ee0e84438502025b9f4dd1e160dbc
| 431 |
pub mod common {
pub static CONF_ENV_VARIABLE: &'static str = "CONF";
pub static STD_LOOP_DELAY: u32 = 10;
pub static NOTARGET_DELAY: u32 = 100;
pub static MIN_COMMAND_POOL_SIZE: usize = 8;
pub static MIN_BUFFER_SIZE: u32 = 2048;
pub static CONNECTION_FINISH_TIMEOUT: u32 = 60; // sec
pub static VERIFICATION_LINE_SIZE: usize = 128;
}
pub mod messages {
pub static AUTH_FAILED_TMP: &'static str = "Auth filed!";
}
| 30.785714 | 59 | 0.723898 |
9bd4174ee532c66482214a5085ae1584276facd0
| 39,712 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
client: aws_smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// An ergonomic service client for `AWSIoTFleetHub`.
///
/// This client allows ergonomic access to a `AWSIoTFleetHub`-shaped service.
/// Each method corresponds to an endpoint defined in the service's Smithy model,
/// and the request and response shapes are auto-generated from that same model.
///
/// # Using a Client
///
/// Once you have a client set up, you can access the service's endpoints
/// by calling the appropriate method on [`Client`]. Each such method
/// returns a request builder for that endpoint, with methods for setting
/// the various fields of the request. Once your request is complete, use
/// the `send` method to send the request. `send` returns a future, which
/// you then have to `.await` to get the service's response.
///
/// [builder pattern]: https://rust-lang.github.io/api-guidelines/type-safety.html#c-builder
/// [SigV4-signed requests]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `CreateApplication` operation.
///
/// See [`CreateApplication`](crate::client::fluent_builders::CreateApplication) for more information about the
/// operation and its arguments.
pub fn create_application(&self) -> fluent_builders::CreateApplication<C, M, R> {
fluent_builders::CreateApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DeleteApplication` operation.
///
/// See [`DeleteApplication`](crate::client::fluent_builders::DeleteApplication) for more information about the
/// operation and its arguments.
pub fn delete_application(&self) -> fluent_builders::DeleteApplication<C, M, R> {
fluent_builders::DeleteApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeApplication` operation.
///
/// See [`DescribeApplication`](crate::client::fluent_builders::DescribeApplication) for more information about the
/// operation and its arguments.
pub fn describe_application(&self) -> fluent_builders::DescribeApplication<C, M, R> {
fluent_builders::DescribeApplication::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListApplications` operation.
///
/// See [`ListApplications`](crate::client::fluent_builders::ListApplications) for more information about the
/// operation and its arguments.
pub fn list_applications(&self) -> fluent_builders::ListApplications<C, M, R> {
fluent_builders::ListApplications::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListTagsForResource` operation.
///
/// See [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) for more information about the
/// operation and its arguments.
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource<C, M, R> {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `TagResource` operation.
///
/// See [`TagResource`](crate::client::fluent_builders::TagResource) for more information about the
/// operation and its arguments.
pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> {
fluent_builders::TagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UntagResource` operation.
///
/// See [`UntagResource`](crate::client::fluent_builders::UntagResource) for more information about the
/// operation and its arguments.
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> {
fluent_builders::UntagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateApplication` operation.
///
/// See [`UpdateApplication`](crate::client::fluent_builders::UpdateApplication) for more information about the
/// operation and its arguments.
pub fn update_application(&self) -> fluent_builders::UpdateApplication<C, M, R> {
fluent_builders::UpdateApplication::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `CreateApplication`.
///
/// <p>Creates a Fleet Hub for AWS IoT Device Management web application.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct CreateApplication<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_application_input::Builder,
}
impl<C, M, R> CreateApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::CreateApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateApplicationInputOperationOutputAlias,
crate::output::CreateApplicationOutput,
crate::error::CreateApplicationError,
crate::input::CreateApplicationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The name of the web application.</p>
pub fn application_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_name(inp);
self
}
/// <p>The name of the web application.</p>
pub fn set_application_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_name(input);
self
}
/// <p>An optional description of the web application.</p>
pub fn application_description(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_description(inp);
self
}
/// <p>An optional description of the web application.</p>
pub fn set_application_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_description(input);
self
}
/// <p>A unique case-sensitive identifier that you can provide to ensure the idempotency of the request.
/// Don't reuse this client token if a new idempotent request is required.</p>
pub fn client_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_token(inp);
self
}
/// <p>A unique case-sensitive identifier that you can provide to ensure the idempotency of the request.
/// Don't reuse this client token if a new idempotent request is required.</p>
pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_client_token(input);
self
}
/// <p>The ARN of the role that the web application assumes when it interacts with AWS IoT Core.</p>
/// <note>
/// <p>The name of the role must be in the form <code>AWSIotFleetHub_<i>random_string</i>
/// </code>.</p>
/// </note>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The ARN of the role that the web application assumes when it interacts with AWS IoT Core.</p>
/// <note>
/// <p>The name of the role must be in the form <code>AWSIotFleetHub_<i>random_string</i>
/// </code>.</p>
/// </note>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A set of key/value pairs that you can use to manage the web application resource.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
/// <p>A set of key/value pairs that you can use to manage the web application resource.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `DeleteApplication`.
///
/// <p>Deletes a Fleet Hub for AWS IoT Device Management web application.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DeleteApplication<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_application_input::Builder,
}
impl<C, M, R> DeleteApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DeleteApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteApplicationInputOperationOutputAlias,
crate::output::DeleteApplicationOutput,
crate::error::DeleteApplicationError,
crate::input::DeleteApplicationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The unique Id of the web application.</p>
pub fn application_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_id(inp);
self
}
/// <p>The unique Id of the web application.</p>
pub fn set_application_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_id(input);
self
}
/// <p>A unique case-sensitive identifier that you can provide to ensure the idempotency of the request.
/// Don't reuse this client token if a new idempotent request is required.</p>
pub fn client_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_token(inp);
self
}
/// <p>A unique case-sensitive identifier that you can provide to ensure the idempotency of the request.
/// Don't reuse this client token if a new idempotent request is required.</p>
pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_client_token(input);
self
}
}
/// Fluent builder constructing a request to `DescribeApplication`.
///
/// <p>Gets information about a Fleet Hub for AWS IoT Device Management web application.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct DescribeApplication<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_application_input::Builder,
}
impl<C, M, R> DescribeApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeApplicationInputOperationOutputAlias,
crate::output::DescribeApplicationOutput,
crate::error::DescribeApplicationError,
crate::input::DescribeApplicationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The unique Id of the web application.</p>
pub fn application_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_id(inp);
self
}
/// <p>The unique Id of the web application.</p>
pub fn set_application_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_id(input);
self
}
}
/// Fluent builder constructing a request to `ListApplications`.
///
/// <p>Gets a list of Fleet Hub for AWS IoT Device Management web applications for the current account.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct ListApplications<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_applications_input::Builder,
}
impl<C, M, R> ListApplications<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListApplications`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListApplicationsOutput,
aws_smithy_http::result::SdkError<crate::error::ListApplicationsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListApplicationsInputOperationOutputAlias,
crate::output::ListApplicationsOutput,
crate::error::ListApplicationsError,
crate::input::ListApplicationsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>A token used to get the next set of results.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>A token used to get the next set of results.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsForResource`.
///
/// <p>Lists the tags for the specified resource.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct ListTagsForResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl<C, M, R> ListTagsForResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListTagsForResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListTagsForResourceInputOperationOutputAlias,
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
crate::input::ListTagsForResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN of the resource.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The ARN of the resource.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
}
/// Fluent builder constructing a request to `TagResource`.
///
/// <p>Adds to or modifies the tags of the specified resource. Tags are metadata which can be used to manage a resource.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct TagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C, M, R> TagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `TagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::TagResourceInputOperationOutputAlias,
crate::output::TagResourceOutput,
crate::error::TagResourceError,
crate::input::TagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN of the resource.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The ARN of the resource.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The new or modified tags for the resource.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
/// <p>The new or modified tags for the resource.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `UntagResource`.
///
/// <p>Removes the specified tags (metadata) from the resource.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct UntagResource<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C, M, R> UntagResource<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UntagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UntagResourceInputOperationOutputAlias,
crate::output::UntagResourceOutput,
crate::error::UntagResourceError,
crate::input::UntagResourceInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ARN of the resource.</p>
pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(inp);
self
}
/// <p>The ARN of the resource.</p>
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `tagKeys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// <p>A list of the keys of the tags to be removed from the resource.</p>
pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(inp);
self
}
/// <p>A list of the keys of the tags to be removed from the resource.</p>
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
/// Fluent builder constructing a request to `UpdateApplication`.
///
/// <p>Updates information about a Fleet Hub for a AWS IoT Device Management web application.</p>
/// <note>
/// <p>Fleet Hub for AWS IoT Device Management is in public preview and is subject to change.</p>
/// </note>
#[derive(std::fmt::Debug)]
pub struct UpdateApplication<
C = aws_smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_application_input::Builder,
}
impl<C, M, R> UpdateApplication<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateApplication`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateApplicationOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateApplicationError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateApplicationInputOperationOutputAlias,
crate::output::UpdateApplicationOutput,
crate::error::UpdateApplicationError,
crate::input::UpdateApplicationInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The unique Id of the web application.</p>
pub fn application_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_id(inp);
self
}
/// <p>The unique Id of the web application.</p>
pub fn set_application_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_id(input);
self
}
/// <p>The name of the web application.</p>
pub fn application_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_name(inp);
self
}
/// <p>The name of the web application.</p>
pub fn set_application_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_name(input);
self
}
/// <p>An optional description of the web application.</p>
pub fn application_description(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_description(inp);
self
}
/// <p>An optional description of the web application.</p>
pub fn set_application_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_description(input);
self
}
/// <p>A unique case-sensitive identifier that you can provide to ensure the idempotency of the request.
/// Don't reuse this client token if a new idempotent request is required.</p>
pub fn client_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_token(inp);
self
}
/// <p>A unique case-sensitive identifier that you can provide to ensure the idempotency of the request.
/// Don't reuse this client token if a new idempotent request is required.</p>
pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_client_token(input);
self
}
}
}
impl<C> Client<C, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::new(conn)
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
aws_hyper::AwsMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut client = aws_hyper::Client::https()
.with_retry_config(retry_config.into())
.with_timeout_config(timeout_config);
client.set_sleep_impl(sleep_impl);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 42.38207 | 128 | 0.591408 |
1669a18aa515bc66cb9ed42071d2dcfac769e958
| 1,152 |
extern crate futures;
mod support;
use futures::stream;
use support::*;
#[test]
fn unfold1() {
let mut stream = stream::unfold(0, |state| {
if state <= 2 {
let res: Result<_,()> = Ok((state * 2, state + 1));
Some(delay_future(res))
} else {
None
}
});
// Creates the future with the closure
// Not ready (delayed future)
sassert_empty(&mut stream);
// future is ready, yields the item
sassert_next(&mut stream, 0);
// Repeat
sassert_empty(&mut stream);
sassert_next(&mut stream, 2);
sassert_empty(&mut stream);
sassert_next(&mut stream, 4);
// no more items
sassert_done(&mut stream);
}
#[test]
fn unfold_err1() {
let mut stream = stream::unfold(0, |state| {
if state <= 2 {
Some(Ok((state * 2, state + 1)))
} else {
Some(Err(-1))
}
});
sassert_next(&mut stream, 0);
sassert_next(&mut stream, 2);
sassert_next(&mut stream, 4);
sassert_err(&mut stream, -1);
// An error was generated by the stream, it will then finish
sassert_done(&mut stream);
}
| 21.735849 | 64 | 0.559896 |
560d71abdc50ab3b3a4bf5d4dbcb41a3d78df16c
| 5,715 |
//! An LMDB-backed trie store.
//!
//! # Usage
//!
//! ```
//! # extern crate casperlabs_engine_storage;
//! # extern crate contract_ffi;
//! # extern crate lmdb;
//! # extern crate engine_shared;
//! # extern crate tempfile;
//! use casperlabs_engine_storage::store::Store;
//! use casperlabs_engine_storage::transaction_source::{Transaction, TransactionSource};
//! use casperlabs_engine_storage::transaction_source::lmdb::LmdbEnvironment;
//! use casperlabs_engine_storage::trie::{Pointer, PointerBlock, Trie};
//! use casperlabs_engine_storage::trie_store::TrieStore;
//! use casperlabs_engine_storage::trie_store::lmdb::LmdbTrieStore;
//! use contract_ffi::bytesrepr::ToBytes;
//! use lmdb::DatabaseFlags;
//! use engine_shared::newtypes::Blake2bHash;
//! use tempfile::tempdir;
//!
//! // Create some leaves
//! let leaf_1 = Trie::Leaf { key: vec![0u8, 0, 0], value: b"val_1".to_vec() };
//! let leaf_2 = Trie::Leaf { key: vec![1u8, 0, 0], value: b"val_2".to_vec() };
//!
//! // Get their hashes
//! let leaf_1_hash = Blake2bHash::new(&leaf_1.to_bytes().unwrap());
//! let leaf_2_hash = Blake2bHash::new(&leaf_2.to_bytes().unwrap());
//!
//! // Create a node
//! let node: Trie<Vec<u8>, Vec<u8>> = {
//! let mut pointer_block = PointerBlock::new();
//! pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash));
//! pointer_block[1] = Some(Pointer::LeafPointer(leaf_2_hash));
//! let pointer_block = Box::new(pointer_block);
//! Trie::Node { pointer_block }
//! };
//!
//! // Get its hash
//! let node_hash = Blake2bHash::new(&node.to_bytes().unwrap());
//!
//! // Create the environment and the store. For both the in-memory and
//! // LMDB-backed implementations, the environment is the source of
//! // transactions.
//! let tmp_dir = tempdir().unwrap();
//! let map_size = 4096 * 2560; // map size should be a multiple of OS page size
//! let env = LmdbEnvironment::new(&tmp_dir.path().to_path_buf(), map_size).unwrap();
//! let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap();
//!
//! // First let's create a read-write transaction, persist the values, but
//! // forget to commit the transaction.
//! {
//! // Create a read-write transaction
//! let mut txn = env.create_read_write_txn().unwrap();
//!
//! // Put the values in the store
//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap();
//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap();
//! store.put(&mut txn, &node_hash, &node).unwrap();
//!
//! // Here we forget to commit the transaction before it goes out of scope
//! }
//!
//! // Now let's check to see if the values were stored
//! {
//! // Create a read transaction
//! let txn = env.create_read_txn().unwrap();
//!
//! // Observe that nothing has been persisted to the store
//! for hash in vec![&leaf_1_hash, &leaf_2_hash, &node_hash].iter() {
//! // We need to use a type annotation here to help the compiler choose
//! // a suitable FromBytes instance
//! let maybe_trie: Option<Trie<Vec<u8>, Vec<u8>>> = store.get(&txn, hash).unwrap();
//! assert!(maybe_trie.is_none());
//! }
//!
//! // Commit the read transaction. Not strictly necessary, but better to be hygienic.
//! txn.commit().unwrap();
//! }
//!
//! // Now let's try that again, remembering to commit the transaction this time
//! {
//! // Create a read-write transaction
//! let mut txn = env.create_read_write_txn().unwrap();
//!
//! // Put the values in the store
//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap();
//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap();
//! store.put(&mut txn, &node_hash, &node).unwrap();
//!
//! // Commit the transaction.
//! txn.commit().unwrap();
//! }
//!
//! // Now let's check to see if the values were stored again
//! {
//! // Create a read transaction
//! let txn = env.create_read_txn().unwrap();
//!
//! // Get the values in the store
//! assert_eq!(Some(leaf_1), store.get(&txn, &leaf_1_hash).unwrap());
//! assert_eq!(Some(leaf_2), store.get(&txn, &leaf_2_hash).unwrap());
//! assert_eq!(Some(node), store.get(&txn, &node_hash).unwrap());
//!
//! // Commit the read transaction.
//! txn.commit().unwrap();
//! }
//!
//! tmp_dir.close().unwrap();
//! ```
use lmdb::{Database, DatabaseFlags};
use contract_ffi::bytesrepr::{FromBytes, ToBytes};
use engine_shared::newtypes::Blake2bHash;
use crate::store::Store;
use crate::transaction_source::lmdb::LmdbEnvironment;
use crate::trie::Trie;
use crate::trie_store::TrieStore;
use crate::{error, trie_store};
/// An LMDB-backed trie store.
///
/// Wraps [`lmdb::Database`].
#[derive(Debug, Clone)]
pub struct LmdbTrieStore {
db: Database,
}
impl LmdbTrieStore {
pub fn new(
env: &LmdbEnvironment,
maybe_name: Option<&str>,
flags: DatabaseFlags,
) -> Result<Self, error::Error> {
let name = maybe_name
.map(|name| format!("{}-{}", trie_store::NAME, name))
.unwrap_or_else(|| String::from(trie_store::NAME));
let db = env.env().create_db(Some(&name), flags)?;
Ok(LmdbTrieStore { db })
}
pub fn open(env: &LmdbEnvironment, name: Option<&str>) -> Result<Self, error::Error> {
let db = env.env().open_db(name)?;
Ok(LmdbTrieStore { db })
}
}
impl<K, V> Store<Blake2bHash, Trie<K, V>> for LmdbTrieStore
where
K: ToBytes + FromBytes,
V: ToBytes + FromBytes,
{
type Error = error::Error;
type Handle = Database;
fn handle(&self) -> Self::Handle {
self.db
}
}
impl<K, V> TrieStore<K, V> for LmdbTrieStore
where
K: ToBytes + FromBytes,
V: ToBytes + FromBytes,
{
}
| 33.617647 | 92 | 0.630096 |
f8a877f4ca72ceecda2b4664dde0c2193bebceb0
| 1,149 |
mod servicefactory;
use actix_web::{get, web, App, HttpServer, Responder};
use servicefactory::{ServiceFactory, ServiceFactoryOptions};
#[get("/{id}")]
async fn index(
web::Path(id): web::Path<i32>,
service_factory_options: web::Data<ServiceFactoryOptions<'_>>,
) -> impl Responder {
let mut bll = ServiceFactory::new(service_factory_options.get_ref()).create_bll();
format!("Hello {}! id:{}", bll.get_user_by_id(id).unwrap().name, id)
}
#[get("/{id}/{name}")]
async fn create(
web::Path((id, name)): web::Path<(i32, String)>,
service_factory_options: web::Data<ServiceFactoryOptions<'_>>,
) -> impl Responder {
let mut bll = ServiceFactory::new(service_factory_options.get_ref()).create_bll();
bll.create_user(id, name.clone()).unwrap();
format!("Hello {}! id:{}. Created.", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| {
App::new()
.service(index)
.service(create)
.data(ServiceFactoryOptions {
db_name: "data.sqlite",
})
})
.bind("127.0.0.1:8080")?
.run()
.await
}
| 27.357143 | 86 | 0.610966 |
18290ca3fd22850a9f4ef51a325a6197d5118d0a
| 1,968 |
// Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_pipeline_walker() -> anyhow::Result<()> {
use pretty_assertions::assert_eq;
use crate::pipelines::processors::*;
use crate::sql::*;
let ctx = crate::tests::try_create_context()?;
let plan = PlanParser::create(ctx.clone()).build_from_sql(
"select sum(number+1)+2 as sumx from numbers_mt(80000) where (number+1)=4 limit 1"
)?;
let pipeline = PipelineBuilder::create(ctx, plan).build()?;
// PreOrder.
{
let mut actual: Vec<String> = vec![];
pipeline.walk_preorder(|pipe| {
let processor = pipe.processor_by_index(0).clone();
actual.push(processor.name().to_string() + " x " + &*format!("{}", pipe.nums()));
Ok(true)
})?;
let expect = vec![
"LimitTransform x 1".to_string(),
"AggregatorFinalTransform x 1".to_string(),
"MergeProcessor x 1".to_string(),
"AggregatorPartialTransform x 8".to_string(),
"FilterTransform x 8".to_string(),
"SourceTransform x 8".to_string(),
];
assert_eq!(expect, actual);
}
// PostOrder.
{
let mut actual: Vec<String> = vec![];
pipeline.walk_postorder(|pipe| {
let processor = pipe.processor_by_index(0).clone();
actual.push(processor.name().to_string() + " x " + &*format!("{}", pipe.nums()));
Ok(true)
})?;
let expect = vec![
"SourceTransform x 8".to_string(),
"FilterTransform x 8".to_string(),
"AggregatorPartialTransform x 8".to_string(),
"MergeProcessor x 1".to_string(),
"AggregatorFinalTransform x 1".to_string(),
"LimitTransform x 1".to_string(),
];
assert_eq!(expect, actual);
}
Ok(())
}
| 32.8 | 93 | 0.566057 |
d7121ef769078a43e35d0d738369b47024d2f58e
| 800 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// Testing that the parser for each file tracks its modules
// and paths independently. The load_another_mod module should
// not try to reuse the 'mod_dir_simple' path.
mod mod_dir_simple {
pub mod load_another_mod;
}
pub fn main() {
assert_eq!(mod_dir_simple::load_another_mod::test::foo(), 10);
}
| 33.333333 | 69 | 0.74125 |
d950235c3201e7ac53975a1f9273e6e89e9e5b15
| 21,377 |
//! This module implements the global `Object` object.
//!
//! The `Object` class represents one of JavaScript's data types.
//!
//! It is used to store various keyed collections and more complex entities.
//! Objects can be created using the `Object()` constructor or the
//! object initializer / literal syntax.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object
use crate::{
builtins::{
function::Function,
property::Property,
value::{same_value, ResultValue, Value, ValueData},
},
exec::Interpreter,
};
use gc::{unsafe_empty_trace, Finalize, Trace};
use rustc_hash::FxHashMap;
use std::{
borrow::Borrow,
fmt::{self, Debug, Display, Error, Formatter},
ops::Deref,
};
pub use internal_methods_trait::ObjectInternalMethods;
pub use internal_state::{InternalState, InternalStateCell};
pub mod internal_methods_trait;
mod internal_state;
/// Static `prototype`, usually set on constructors as a key to point to their respective prototype object.
pub static PROTOTYPE: &str = "prototype";
/// Static `__proto__`, usually set on Object instances as a key to point to their respective prototype object.
pub static INSTANCE_PROTOTYPE: &str = "__proto__";
/// The internal representation of an JavaScript object.
#[derive(Trace, Finalize, Clone)]
pub struct Object {
/// The type of the object.
pub kind: ObjectKind,
/// Intfiernal Slots
pub internal_slots: FxHashMap<String, Value>,
/// Properties
pub properties: FxHashMap<String, Property>,
/// Symbol Properties
pub sym_properties: FxHashMap<i32, Property>,
/// Some rust object that stores internal state
pub state: Option<InternalStateCell>,
/// [[Call]]
pub call: Option<Function>,
/// [[Construct]]
pub construct: Option<Function>,
}
impl Debug for Object {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
writeln!(f, "{{")?;
writeln!(f, "\tkind: {}", self.kind)?;
writeln!(f, "\tstate: {:?}", self.state)?;
writeln!(f, "\tcall: {:?}", self.call)?;
writeln!(f, "\tconstruct: {:?}", self.construct)?;
writeln!(f, "\tproperties: {{")?;
for (key, _) in self.properties.iter() {
writeln!(f, "\t\t{}", key)?;
}
writeln!(f, "\t }}")?;
write!(f, "}}")
}
}
impl ObjectInternalMethods for Object {
/// `Object.setPropertyOf(obj, prototype)`
///
/// This method sets the prototype (i.e., the internal `[[Prototype]]` property)
/// of a specified object to another object or `null`.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-ordinary-object-internal-methods-and-internal-slots-setprototypeof-v
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/setPrototypeOf
fn set_prototype_of(&mut self, val: Value) -> bool {
debug_assert!(val.is_object() || val.is_null());
let current = self.get_internal_slot(PROTOTYPE);
if same_value(¤t, &val, false) {
return true;
}
let extensible = self.get_internal_slot("extensible");
if extensible.is_null() {
return false;
}
let mut p = val.clone();
let mut done = false;
while !done {
if p.is_null() {
done = true
} else if same_value(&Value::from(self.clone()), &p, false) {
return false;
} else {
p = p.get_internal_slot(PROTOTYPE);
}
}
self.set_internal_slot(PROTOTYPE, val);
true
}
/// Helper function for property insertion.
fn insert_property(&mut self, name: String, p: Property) {
self.properties.insert(name, p);
}
/// Helper function for property removal.
fn remove_property(&mut self, name: &str) {
self.properties.remove(name);
}
/// Helper function to set an internal slot
fn set_internal_slot(&mut self, name: &str, val: Value) {
self.internal_slots.insert(name.to_string(), val);
}
/// Helper function to get an immutable internal slot or Null
fn get_internal_slot(&self, name: &str) -> Value {
match self.internal_slots.get(name) {
Some(v) => v.clone(),
None => Value::null(),
}
}
/// The specification returns a Property Descriptor or Undefined.
///
/// These are 2 separate types and we can't do that here.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-ordinary-object-internal-methods-and-internal-slots-getownproperty-p
fn get_own_property(&self, prop: &Value) -> Property {
debug_assert!(Property::is_property_key(prop));
// Prop could either be a String or Symbol
match *(*prop) {
ValueData::String(ref st) => {
match self.properties.get(st) {
// If O does not have an own property with key P, return undefined.
// In this case we return a new empty Property
None => Property::default(),
Some(ref v) => {
let mut d = Property::default();
if v.is_data_descriptor() {
d.value = v.value.clone();
d.writable = v.writable;
} else {
debug_assert!(v.is_accessor_descriptor());
d.get = v.get.clone();
d.set = v.set.clone();
}
d.enumerable = v.enumerable;
d.configurable = v.configurable;
d
}
}
}
ValueData::Symbol(ref sym) => {
let sym_id = (**sym)
.borrow()
.get_internal_slot("SymbolData")
.to_string()
.parse::<i32>()
.expect("Could not get Symbol ID");
match self.sym_properties.get(&sym_id) {
// If O does not have an own property with key P, return undefined.
// In this case we return a new empty Property
None => Property::default(),
Some(ref v) => {
let mut d = Property::default();
if v.is_data_descriptor() {
d.value = v.value.clone();
d.writable = v.writable;
} else {
debug_assert!(v.is_accessor_descriptor());
d.get = v.get.clone();
d.set = v.set.clone();
}
d.enumerable = v.enumerable;
d.configurable = v.configurable;
d
}
}
}
_ => Property::default(),
}
}
/// Define an own property.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-ordinary-object-internal-methods-and-internal-slots-defineownproperty-p-desc
#[allow(clippy::option_unwrap_used)]
fn define_own_property(&mut self, property_key: String, desc: Property) -> bool {
let mut current = self.get_own_property(&Value::from(property_key.to_string()));
let extensible = self.is_extensible();
// https://tc39.es/ecma262/#sec-validateandapplypropertydescriptor
// There currently isn't a property, lets create a new one
if current.value.is_none() || current.value.as_ref().expect("failed").is_undefined() {
if !extensible {
return false;
}
if desc.value.is_some() && desc.value.clone().unwrap().is_symbol() {
let sym_id = desc
.value
.clone()
.unwrap()
.to_string()
.parse::<i32>()
.expect("parsing failed");
self.sym_properties.insert(sym_id, desc);
} else {
self.properties.insert(property_key, desc);
}
return true;
}
// If every field is absent we don't need to set anything
if desc.is_none() {
return true;
}
// 4
if !current.configurable.unwrap_or(false) {
if desc.configurable.is_some() && desc.configurable.unwrap() {
return false;
}
if desc.enumerable.is_some()
&& (desc.enumerable.as_ref().unwrap() != current.enumerable.as_ref().unwrap())
{
return false;
}
}
// 5
if desc.is_generic_descriptor() {
// 6
} else if current.is_data_descriptor() != desc.is_data_descriptor() {
// a
if !current.configurable.unwrap() {
return false;
}
// b
if current.is_data_descriptor() {
// Convert to accessor
current.value = None;
current.writable = None;
} else {
// c
// convert to data
current.get = None;
current.set = None;
}
if current.value.is_some() && current.value.clone().unwrap().is_symbol() {
let sym_id = current
.value
.clone()
.unwrap()
.to_string()
.parse::<i32>()
.expect("parsing failed");
self.sym_properties.insert(sym_id, current);
} else {
self.properties.insert(property_key.clone(), current);
}
// 7
} else if current.is_data_descriptor() && desc.is_data_descriptor() {
// a
if !current.configurable.unwrap() && !current.writable.unwrap() {
if desc.writable.is_some() && desc.writable.unwrap() {
return false;
}
if desc.value.is_some()
&& !same_value(
&desc.value.clone().unwrap(),
¤t.value.clone().unwrap(),
false,
)
{
return false;
}
return true;
}
// 8
} else {
if !current.configurable.unwrap() {
if desc.set.is_some()
&& !same_value(
&desc.set.clone().unwrap(),
¤t.set.clone().unwrap(),
false,
)
{
return false;
}
if desc.get.is_some()
&& !same_value(
&desc.get.clone().unwrap(),
¤t.get.clone().unwrap(),
false,
)
{
return false;
}
}
return true;
}
// 9
self.properties.insert(property_key, desc);
true
}
}
impl Object {
/// Return a new ObjectData struct, with `kind` set to Ordinary
pub fn default() -> Self {
let mut object = Self {
kind: ObjectKind::Ordinary,
internal_slots: FxHashMap::default(),
properties: FxHashMap::default(),
sym_properties: FxHashMap::default(),
state: None,
call: None,
construct: None,
};
object.set_internal_slot("extensible", Value::from(true));
object
}
/// Return a new ObjectData struct, with `kind` set to Ordinary
pub fn function() -> Self {
let mut object = Self {
kind: ObjectKind::Function,
internal_slots: FxHashMap::default(),
properties: FxHashMap::default(),
sym_properties: FxHashMap::default(),
state: None,
call: None,
construct: None,
};
object.set_internal_slot("extensible", Value::from(true));
object
}
/// ObjectCreate is used to specify the runtime creation of new ordinary objects.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-objectcreate
// TODO: proto should be a &Value here
pub fn create(proto: Value) -> Self {
let mut obj = Self::default();
obj.internal_slots
.insert(INSTANCE_PROTOTYPE.to_string(), proto);
obj.internal_slots
.insert("extensible".to_string(), Value::from(true));
obj
}
/// Set [[Call]]
pub fn set_call(&mut self, val: Function) {
self.call = Some(val);
}
/// set [[Construct]]
pub fn set_construct(&mut self, val: Function) {
self.construct = Some(val);
}
/// Return a new Boolean object whose `[[BooleanData]]` internal slot is set to argument.
fn from_boolean(argument: &Value) -> Self {
let mut obj = Self {
kind: ObjectKind::Boolean,
internal_slots: FxHashMap::default(),
properties: FxHashMap::default(),
sym_properties: FxHashMap::default(),
state: None,
call: None,
construct: None,
};
obj.internal_slots
.insert("BooleanData".to_string(), argument.clone());
obj
}
/// Return a new `Number` object whose `[[NumberData]]` internal slot is set to argument.
fn from_number(argument: &Value) -> Self {
let mut obj = Self {
kind: ObjectKind::Number,
internal_slots: FxHashMap::default(),
properties: FxHashMap::default(),
sym_properties: FxHashMap::default(),
state: None,
call: None,
construct: None,
};
obj.internal_slots
.insert("NumberData".to_string(), argument.clone());
obj
}
/// Return a new `String` object whose `[[StringData]]` internal slot is set to argument.
fn from_string(argument: &Value) -> Self {
let mut obj = Self {
kind: ObjectKind::String,
internal_slots: FxHashMap::default(),
properties: FxHashMap::default(),
sym_properties: FxHashMap::default(),
state: None,
call: None,
construct: None,
};
obj.internal_slots
.insert("StringData".to_string(), argument.clone());
obj
}
/// Converts the `Value` to an `Object` type.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-toobject
pub fn from(value: &Value) -> Result<Self, ()> {
match *value.deref().borrow() {
ValueData::Boolean(_) => Ok(Self::from_boolean(value)),
ValueData::Rational(_) => Ok(Self::from_number(value)),
ValueData::String(_) => Ok(Self::from_string(value)),
ValueData::Object(ref obj) => Ok((*obj).deref().borrow().clone()),
_ => Err(()),
}
}
/// It determines if Object is a callable function with a [[Call]] internal method.
///
/// More information:
/// - [EcmaScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-iscallable
pub fn is_callable(&self) -> bool {
self.call.is_some()
}
/// It determines if Object is a function object with a [[Construct]] internal method.
///
/// More information:
/// - [EcmaScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-isconstructor
pub fn is_constructor(&self) -> bool {
self.construct.is_some()
}
}
/// Defines the different types of objects.
#[derive(Finalize, Debug, Copy, Clone, Eq, PartialEq)]
pub enum ObjectKind {
Function,
Array,
String,
Symbol,
Error,
Ordinary,
Boolean,
Number,
}
impl Display for ObjectKind {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
write!(
f,
"{}",
match self {
Self::Function => "Function",
Self::Array => "Array",
Self::String => "String",
Self::Symbol => "Symbol",
Self::Error => "Error",
Self::Ordinary => "Ordinary",
Self::Boolean => "Boolean",
Self::Number => "Number",
}
)
}
}
/// `Trace` implementation for `ObjectKind`.
///
/// This is indeed safe, but we need to mark this as an empty trace because neither
// `NativeFunctionData` nor Node hold any GC'd objects, but Gc doesn't know that. So we need to
/// signal it manually. `rust-gc` does not have a `Trace` implementation for `fn(_, _, _)`.
///
/// <https://github.com/Manishearth/rust-gc/blob/master/gc/src/trace.rs>
/// Waiting on <https://github.com/Manishearth/rust-gc/issues/87> until we can derive Copy
unsafe impl Trace for ObjectKind {
unsafe_empty_trace!();
}
/// Create a new object.
pub fn make_object(_: &mut Value, args: &[Value], ctx: &mut Interpreter) -> ResultValue {
if let Some(arg) = args.get(0) {
if !arg.is_null_or_undefined() {
return Ok(Value::object(Object::from(arg).unwrap()));
}
}
let global = &ctx.realm.global_obj;
let object = Value::new_object(Some(global));
Ok(object)
}
/// Get the `prototype` of an object.
pub fn get_prototype_of(_: &mut Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
let obj = args.get(0).expect("Cannot get object");
Ok(obj.get_field_slice(INSTANCE_PROTOTYPE))
}
/// Set the `prototype` of an object.
pub fn set_prototype_of(_: &mut Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
let obj = args.get(0).expect("Cannot get object").clone();
let proto = args.get(1).expect("Cannot get object").clone();
obj.set_internal_slot(INSTANCE_PROTOTYPE, proto);
Ok(obj)
}
/// Define a property in an object
pub fn define_property(_: &mut Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
let obj = args.get(0).expect("Cannot get object");
let prop = String::from(args.get(1).expect("Cannot get object"));
let desc = Property::from(args.get(2).expect("Cannot get object"));
obj.set_property(prop, desc);
Ok(Value::undefined())
}
/// `Object.prototype.toString()`
///
/// This method returns a string representing the object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-object.prototype.tostring
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/toString
pub fn to_string(this: &mut Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
Ok(Value::from(this.to_string()))
}
/// `Object.prototype.hasOwnPrototype( property )`
///
/// The method returns a boolean indicating whether the object has the specified property
/// as its own property (as opposed to inheriting it).
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-object.prototype.hasownproperty
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/hasOwnProperty
pub fn has_own_property(this: &mut Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
let prop = if args.is_empty() {
None
} else {
Some(String::from(args.get(0).expect("Cannot get object")))
};
Ok(Value::from(
prop.is_some()
&& this
.get_property(&prop.expect("Cannot get object"))
.is_some(),
))
}
/// Create a new `Object` object.
pub fn create(global: &Value) -> Value {
let prototype = Value::new_object(None);
make_builtin_fn!(has_own_property, named "hasOwnProperty", of prototype);
make_builtin_fn!(to_string, named "toString", of prototype);
let object = make_constructor_fn!(make_object, make_object, global, prototype);
object.set_field_slice("length", Value::from(1));
make_builtin_fn!(set_prototype_of, named "setPrototypeOf", with length 2, of object);
make_builtin_fn!(get_prototype_of, named "getPrototypeOf", with length 1, of object);
make_builtin_fn!(define_property, named "defineProperty", with length 3, of object);
object
}
/// Initialise the `Object` object on the global object.
#[inline]
pub fn init(global: &Value) {
global.set_field_slice("Object", create(global));
}
| 34.2032 | 121 | 0.543341 |
09f4cf374c09e6f6914ed229d506ef5ab17eede3
| 33,634 |
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::{
counters,
error::Error,
logging::{LogEntry, LogEvent, LogSchema},
shared_components::SyncState,
};
use aptos_logger::prelude::*;
use aptos_types::{
contract_event::ContractEvent, ledger_info::LedgerInfoWithSignatures,
move_resource::MoveStorage, transaction::TransactionListWithProof,
};
use event_notifications::{EventNotificationSender, EventSubscriptionService};
use executor::components::in_memory_state_calculator::IntoLedgerView;
use executor_types::ChunkExecutorTrait;
use std::sync::Arc;
use storage_interface::DbReader;
/// Proxies interactions with execution and storage for state synchronization
pub trait ExecutorProxyTrait: Send {
/// Sync the local state with the latest in storage.
fn get_local_storage_state(&self) -> Result<SyncState, Error>;
/// Execute and commit a batch of transactions
fn execute_chunk(
&mut self,
txn_list_with_proof: TransactionListWithProof,
verified_target_li: LedgerInfoWithSignatures,
intermediate_end_of_epoch_li: Option<LedgerInfoWithSignatures>,
) -> Result<(), Error>;
/// Gets chunk of transactions given the known version, target version and the max limit.
fn get_chunk(
&self,
known_version: u64,
limit: u64,
target_version: u64,
) -> Result<TransactionListWithProof, Error>;
/// Get the epoch changing ledger info for the given epoch so that we can move to next epoch.
fn get_epoch_change_ledger_info(&self, epoch: u64) -> Result<LedgerInfoWithSignatures, Error>;
/// Get ledger info at an epoch boundary version.
fn get_epoch_ending_ledger_info(&self, version: u64)
-> Result<LedgerInfoWithSignatures, Error>;
/// Returns the ledger's timestamp for the given version in microseconds
fn get_version_timestamp(&self, version: u64) -> Result<u64, Error>;
/// Publishes on-chain event notifications and reconfigurations to subscribed components
fn publish_event_notifications(&mut self, events: Vec<ContractEvent>) -> Result<(), Error>;
}
pub(crate) struct ExecutorProxy<C> {
storage: Arc<dyn DbReader>,
chunk_executor: Arc<C>,
event_subscription_service: EventSubscriptionService,
}
impl<C: ChunkExecutorTrait> ExecutorProxy<C> {
pub(crate) fn new(
storage: Arc<dyn DbReader>,
chunk_executor: Arc<C>,
event_subscription_service: EventSubscriptionService,
) -> Self {
Self {
storage,
chunk_executor,
event_subscription_service,
}
}
}
impl<C: ChunkExecutorTrait> ExecutorProxyTrait for ExecutorProxy<C> {
fn get_local_storage_state(&self) -> Result<SyncState, Error> {
let storage_info = self.storage.get_startup_info().map_err(|error| {
Error::UnexpectedError(format!(
"Failed to get startup info from storage: {}",
error
))
})?;
let storage_info = storage_info
.ok_or_else(|| Error::UnexpectedError("Missing startup info from storage".into()))?;
let current_epoch_state = storage_info.get_epoch_state().clone();
let latest_ledger_info = storage_info.latest_ledger_info.clone();
let synced_trees = storage_info
.into_latest_tree_state()
.into_ledger_view(&self.storage)
.map_err(|error| {
Error::UnexpectedError(format!(
"Failed to construct latest ledger view from storage: {}",
error
))
})?;
Ok(SyncState::new(
latest_ledger_info,
synced_trees,
current_epoch_state,
))
}
fn execute_chunk(
&mut self,
txn_list_with_proof: TransactionListWithProof,
verified_target_li: LedgerInfoWithSignatures,
intermediate_end_of_epoch_li: Option<LedgerInfoWithSignatures>,
) -> Result<(), Error> {
// track chunk execution time
let timer = counters::EXECUTE_CHUNK_DURATION.start_timer();
let (events, _) = self
.chunk_executor
.execute_and_commit_chunk(
txn_list_with_proof,
&verified_target_li,
intermediate_end_of_epoch_li.as_ref(),
)
.map_err(|error| {
Error::UnexpectedError(format!("Execute and commit chunk failed: {}", error))
})?;
timer.stop_and_record();
if let Err(e) = self.publish_event_notifications(events) {
error!(
LogSchema::event_log(LogEntry::Reconfig, LogEvent::Fail).error(&e),
"Failed to publish reconfig updates in execute_chunk"
);
counters::RECONFIG_PUBLISH_COUNT
.with_label_values(&[counters::FAIL_LABEL])
.inc();
}
Ok(())
}
fn get_chunk(
&self,
known_version: u64,
limit: u64,
target_version: u64,
) -> Result<TransactionListWithProof, Error> {
let starting_version = known_version
.checked_add(1)
.ok_or_else(|| Error::IntegerOverflow("Starting version has overflown!".into()))?;
self.storage
.get_transactions(starting_version, limit, target_version, false)
.map_err(|error| {
Error::UnexpectedError(format!("Failed to get transactions from storage {}", error))
})
}
fn get_epoch_change_ledger_info(&self, epoch: u64) -> Result<LedgerInfoWithSignatures, Error> {
let next_epoch = epoch
.checked_add(1)
.ok_or_else(|| Error::IntegerOverflow("Next epoch has overflown!".into()))?;
let mut epoch_ending_ledger_infos = self
.storage
.get_epoch_ending_ledger_infos(epoch, next_epoch)
.map_err(|error| Error::UnexpectedError(error.to_string()))?;
epoch_ending_ledger_infos
.ledger_info_with_sigs
.pop()
.ok_or_else(|| {
Error::UnexpectedError(format!(
"Missing epoch change ledger info for epoch: {:?}",
epoch
))
})
}
fn get_epoch_ending_ledger_info(
&self,
version: u64,
) -> Result<LedgerInfoWithSignatures, Error> {
self.storage
.get_epoch_ending_ledger_info(version)
.map_err(|error| Error::UnexpectedError(error.to_string()))
}
fn get_version_timestamp(&self, version: u64) -> Result<u64, Error> {
self.storage
.get_block_timestamp(version)
.map_err(|error| Error::UnexpectedError(error.to_string()))
}
fn publish_event_notifications(&mut self, events: Vec<ContractEvent>) -> Result<(), Error> {
info!(LogSchema::new(LogEntry::Reconfig).count(events.len()));
let synced_version = (&*self.storage).fetch_synced_version().map_err(|error| {
Error::UnexpectedError(format!("Failed to fetch storage synced version: {}", error))
})?;
if let Err(error) = self
.event_subscription_service
.notify_events(synced_version, events)
{
error!(
LogSchema::event_log(LogEntry::Reconfig, LogEvent::PublishError)
.error(&Error::UnexpectedError(error.to_string())),
);
Err(error.into())
} else {
counters::RECONFIG_PUBLISH_COUNT
.with_label_values(&[counters::SUCCESS_LABEL])
.inc();
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use aptos_crypto::{ed25519::*, PrivateKey, Uniform};
use aptos_infallible::RwLock;
use aptos_transaction_builder::aptos_stdlib;
use aptos_types::{
account_address::AccountAddress,
account_config::aptos_root_address,
block_metadata::BlockMetadata,
contract_event::ContractEvent,
event::EventKey,
ledger_info::LedgerInfoWithSignatures,
move_resource::MoveStorage,
on_chain_config::{
ConsensusConfigV1, OnChainConfig, OnChainConsensusConfig, Version,
ON_CHAIN_CONFIG_REGISTRY,
},
transaction::{Transaction, WriteSetPayload},
};
use aptos_vm::AptosVM;
use aptosdb::AptosDB;
use claim::{assert_err, assert_ok};
use event_notifications::{EventSubscriptionService, ReconfigNotificationListener};
use executor::{block_executor::BlockExecutor, chunk_executor::ChunkExecutor};
use executor_test_helpers::{
bootstrap_genesis, gen_block_id, gen_ledger_info_with_sigs, get_test_signed_transaction,
};
use executor_types::BlockExecutorTrait;
use futures::{future::FutureExt, stream::StreamExt};
use move_deps::move_core_types::language_storage::TypeTag;
use serde::{Deserialize, Serialize};
use storage_interface::DbReaderWriter;
use vm_genesis::TestValidator;
// TODO(joshlind): add unit tests for general executor proxy behaviour!
// TODO(joshlind): add unit tests for subscription events.. seems like these are missing?
#[test]
fn test_pub_sub_validator_set() {
let (validators, mut block_executor, mut executor_proxy, mut reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Create a dummy prologue transaction that will bump the timer, and update the validator set
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_aptos_version_transaction(0);
// Execute and commit the block
let block = vec![dummy_txn, reconfig_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
assert_ok!(executor_proxy.publish_event_notifications(reconfig_events));
// Verify reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_some());
}
#[test]
fn test_pub_sub_drop_receiver() {
let (validators, mut block_executor, mut executor_proxy, reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Create a dummy prologue transaction that will bump the timer, and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_aptos_version_transaction(0);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Drop the reconfig receiver
drop(reconfig_receiver);
// Verify publishing on-chain config updates fails due to dropped receiver
assert_err!(executor_proxy.publish_event_notifications(reconfig_events));
}
#[test]
fn test_pub_sub_rotate_validator_key() {
let (validators, mut block_executor, mut executor_proxy, mut reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Create a dummy prologue transaction that will bump the timer, and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_aptos_version_transaction(0);
// Give the validator some money so it can send a rotation tx and rotate the validator's consensus key.
let money_txn = create_transfer_to_validator_transaction(validator_account, 1);
let rotation_txn = create_consensus_key_rotation_transaction(&validators[0], 0);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn, money_txn, rotation_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
assert_ok!(executor_proxy.publish_event_notifications(reconfig_events));
// Verify reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_some());
}
#[test]
fn test_pub_sub_no_events() {
let (_validators, _block_executor, mut executor_proxy, mut reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Publish no events
assert_ok!(executor_proxy.publish_event_notifications(vec![]));
// Verify no reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_none());
}
#[test]
fn test_pub_sub_no_reconfig_events() {
let (_validators, _block_executor, mut executor_proxy, mut reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Publish no on chain config updates
let event = create_test_event(create_random_event_key());
assert_ok!(executor_proxy.publish_event_notifications(vec![event]));
// Verify no reconfig notification is sent
assert!(reconfig_receiver
.select_next_some()
.now_or_never()
.is_none());
}
#[test]
fn test_pub_sub_event_subscription() {
// Generate a genesis change set
let (genesis, _validators) = vm_genesis::test_genesis_change_set_and_validators(Some(1));
// Create test aptos database
let db_path = aptos_temppath::TempPath::new();
assert_ok!(db_path.create_as_dir());
let (db, db_rw) = DbReaderWriter::wrap(AptosDB::new_for_test(db_path.path()));
// Boostrap the genesis transaction
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis));
assert_ok!(bootstrap_genesis::<AptosVM>(&db_rw, &genesis_txn));
// Create an event subscriber
let mut event_subscription_service = EventSubscriptionService::new(
ON_CHAIN_CONFIG_REGISTRY,
Arc::new(RwLock::new(db_rw.clone())),
);
let event_key = create_random_event_key();
let mut event_receiver = event_subscription_service
.subscribe_to_events(vec![event_key])
.unwrap();
// Create an executor proxy
let chunk_executor = Arc::new(ChunkExecutor::<AptosVM>::new(db_rw).unwrap());
let mut executor_proxy = ExecutorProxy::new(db, chunk_executor, event_subscription_service);
// Publish a subscribed event
let event = create_test_event(event_key);
assert_ok!(executor_proxy.publish_event_notifications(vec![event]));
// Verify the event is received
match event_receiver.select_next_some().now_or_never() {
Some(event_notification) => {
assert_eq!(event_notification.version, 0);
assert_eq!(event_notification.subscribed_events.len(), 1);
assert_eq!(*event_notification.subscribed_events[0].key(), event_key);
}
None => {
panic!("Expected an event notification, but None received!");
}
}
}
#[test]
fn test_pub_sub_apotos_version() {
let (validators, mut block_executor, mut executor_proxy, mut reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Create a dummy prologue transaction that will bump the timer, and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let allowlist_txn = create_new_update_aptos_version_transaction(0);
// Execute and commit the reconfig block
let block = vec![dummy_txn, allowlist_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
assert_ok!(executor_proxy.publish_event_notifications(reconfig_events));
// Verify the correct reconfig notification is sent
let notification = reconfig_receiver.select_next_some().now_or_never().unwrap();
let received_config = notification.on_chain_configs.get::<Version>().unwrap();
assert_eq!(received_config, Version { major: 7 });
}
#[ignore]
#[test]
fn test_pub_sub_with_executor_proxy() {
let (validators, mut block_executor, mut executor_proxy, _reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Create a dummy prologue transaction that will bump the timer and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn_1 = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_aptos_version_transaction(0);
// Execute and commit the reconfig block
let block = vec![dummy_txn_1.clone(), reconfig_txn.clone()];
let (_, ledger_info_epoch_1) = execute_and_commit_block(&mut block_executor, block, 1);
// Give the validator some money so it can send a rotation tx, create another dummy prologue
// to bump the timer and rotate the validator's consensus key.
let money_txn = create_transfer_to_validator_transaction(validator_account, 1);
let dummy_txn_2 = create_dummy_transaction(2, validator_account);
let rotation_txn = create_consensus_key_rotation_transaction(&validators[0], 0);
// Execute and commit the reconfig block
let block = vec![money_txn.clone(), dummy_txn_2.clone(), rotation_txn.clone()];
let (_, ledger_info_epoch_2) = execute_and_commit_block(&mut block_executor, block, 2);
// Grab the first two executed transactions and verify responses
let txns = executor_proxy.get_chunk(0, 2, 2).unwrap();
assert_eq!(txns.transactions, vec![dummy_txn_1, reconfig_txn]);
assert_ok!(executor_proxy.execute_chunk(txns, ledger_info_epoch_1.clone(), None));
assert_eq!(
ledger_info_epoch_1,
executor_proxy.get_epoch_change_ledger_info(1).unwrap()
);
assert_eq!(
ledger_info_epoch_1,
executor_proxy.get_epoch_ending_ledger_info(2).unwrap()
);
// Grab the next two executed transactions (forced by limit) and verify responses
let txns = executor_proxy.get_chunk(2, 2, 5).unwrap();
assert_eq!(txns.transactions, vec![money_txn, dummy_txn_2]);
assert_err!(executor_proxy.get_epoch_ending_ledger_info(4));
// Grab the last transaction and verify responses
let txns = executor_proxy.get_chunk(4, 1, 5).unwrap();
assert_eq!(txns.transactions, vec![rotation_txn]);
assert_ok!(executor_proxy.execute_chunk(txns, ledger_info_epoch_2.clone(), None));
assert_eq!(
ledger_info_epoch_2,
executor_proxy.get_epoch_change_ledger_info(2).unwrap()
);
assert_eq!(
ledger_info_epoch_2,
executor_proxy.get_epoch_ending_ledger_info(5).unwrap()
);
}
#[ignore]
#[test]
fn test_pub_sub_with_executor_sync_state() {
let (validators, mut block_executor, executor_proxy, _reconfig_receiver) =
bootstrap_genesis_and_set_subscription(true);
// Create a dummy prologue transaction that will bump the timer and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let reconfig_txn = create_new_update_aptos_version_transaction(0);
// Execute and commit the reconfig block
let block = vec![dummy_txn, reconfig_txn];
let _ = execute_and_commit_block(&mut block_executor, block, 1);
// Verify executor proxy sync state
let sync_state = executor_proxy.get_local_storage_state().unwrap();
assert_eq!(sync_state.trusted_epoch(), 2); // 1 reconfiguration has occurred, trusted = next
assert_eq!(sync_state.committed_version(), 2); // 2 transactions have committed
assert_eq!(sync_state.synced_version(), 2); // 2 transactions have synced
// Give the validator some money so it can send a rotation tx, create another dummy prologue
// to bump the timer and rotate the validator's consensus key.
let money_txn = create_transfer_to_validator_transaction(validator_account, 1);
let dummy_txn = create_dummy_transaction(2, validator_account);
let rotation_txn = create_consensus_key_rotation_transaction(&validators[0], 0);
// Execute and commit the reconfig block
let block = vec![money_txn, dummy_txn, rotation_txn];
let _ = execute_and_commit_block(&mut block_executor, block, 2);
// Verify executor proxy sync state
let sync_state = executor_proxy.get_local_storage_state().unwrap();
assert_eq!(sync_state.trusted_epoch(), 3); // 2 reconfigurations have occurred, trusted = next
assert_eq!(sync_state.committed_version(), 5); // 5 transactions have committed
assert_eq!(sync_state.synced_version(), 5); // 5 transactions have synced
}
#[ignore]
#[test]
fn test_pub_sub_consensus_config() {
let (validators, mut block_executor, mut executor_proxy, mut reconfig_receiver) =
bootstrap_genesis_and_set_subscription(false);
// Verify that the first OnChainConsensusConfig can't be fetched (it's empty).
let reconfig_notification = reconfig_receiver.select_next_some().now_or_never().unwrap();
assert_ok!(reconfig_notification
.on_chain_configs
.get::<OnChainConsensusConfig>());
// Create a dummy prologue transaction that will bump the timer, and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let update_txn = create_new_update_consensus_config_transaction(0);
// Execute and commit the reconfig block
let block = vec![dummy_txn, update_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
assert_ok!(executor_proxy.publish_event_notifications(reconfig_events));
// Verify the correct reconfig notification is sent
let reconfig_notification = reconfig_receiver.select_next_some().now_or_never().unwrap();
let received_config = reconfig_notification
.on_chain_configs
.get::<OnChainConsensusConfig>()
.unwrap();
assert_eq!(
received_config,
OnChainConsensusConfig::V1(ConsensusConfigV1 { two_chain: false })
);
}
#[test]
fn test_missing_on_chain_config() {
// Create a test aptos database
let db_path = aptos_temppath::TempPath::new();
db_path.create_as_dir().unwrap();
let (db, db_rw) = DbReaderWriter::wrap(AptosDB::new_for_test(db_path.path()));
// Bootstrap the database with regular genesis
let (genesis, validators) = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis));
assert_ok!(bootstrap_genesis::<AptosVM>(&db_rw, &genesis_txn));
// Create a reconfig subscriber with a custom config registry (including
// a TestOnChainConfig that doesn't exist on-chain).
let mut config_registry = ON_CHAIN_CONFIG_REGISTRY.to_owned();
config_registry.push(TestOnChainConfig::CONFIG_ID);
let mut event_subscription_service =
EventSubscriptionService::new(&config_registry, Arc::new(RwLock::new(db_rw.clone())));
let mut reconfig_receiver = event_subscription_service
.subscribe_to_reconfigurations()
.unwrap();
// Initialize the configs and verify that the node doesn't panic
// (even though it can't find the TestOnChainConfig on the blockchain!).
let storage: Arc<dyn DbReader> = db.clone();
let synced_version = (&*storage).fetch_synced_version().unwrap();
event_subscription_service
.notify_initial_configs(synced_version)
.unwrap();
// Create an executor
let chunk_executor = Arc::new(ChunkExecutor::<AptosVM>::new(db_rw.clone()).unwrap());
let mut executor_proxy = ExecutorProxy::new(db, chunk_executor, event_subscription_service);
// Verify that the initial configs returned to the subscriber don't contain the unknown on-chain config
let payload = reconfig_receiver
.select_next_some()
.now_or_never()
.unwrap()
.on_chain_configs;
assert_ok!(payload.get::<Version>());
assert_err!(payload.get::<TestOnChainConfig>());
// Create a dummy prologue transaction that will bump the timer, and update the Aptos version
let validator_account = validators[0].data.address;
let dummy_txn = create_dummy_transaction(1, validator_account);
let allowlist_txn = create_new_update_aptos_version_transaction(0);
// Execute and commit the reconfig block
let mut block_executor = Box::new(BlockExecutor::<AptosVM>::new(db_rw));
let block = vec![dummy_txn, allowlist_txn];
let (reconfig_events, _) = execute_and_commit_block(&mut block_executor, block, 1);
// Publish the on chain config updates
assert_ok!(executor_proxy.publish_event_notifications(reconfig_events));
// Verify the reconfig notification still doesn't contain the unknown config
let payload = reconfig_receiver
.select_next_some()
.now_or_never()
.unwrap()
.on_chain_configs;
assert_ok!(payload.get::<Version>());
assert_ok!(payload.get::<OnChainConsensusConfig>());
assert_err!(payload.get::<TestOnChainConfig>());
}
/// Executes a genesis transaction, creates the executor proxy and sets the given reconfig
/// subscription.
fn bootstrap_genesis_and_set_subscription(
verify_initial_config: bool,
) -> (
Vec<TestValidator>,
Box<BlockExecutor<AptosVM>>,
ExecutorProxy<ChunkExecutor<AptosVM>>,
ReconfigNotificationListener,
) {
// Generate a genesis change set
let (genesis, validators) = vm_genesis::test_genesis_change_set_and_validators(Some(1));
// Create test aptos database
let db_path = aptos_temppath::TempPath::new();
assert_ok!(db_path.create_as_dir());
let (db, db_rw) = DbReaderWriter::wrap(AptosDB::new_for_test(db_path.path()));
// Boostrap the genesis transaction
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis));
assert_ok!(bootstrap_genesis::<AptosVM>(&db_rw, &genesis_txn));
// Create event subscription service and initialize configs
let mut event_subscription_service = EventSubscriptionService::new(
ON_CHAIN_CONFIG_REGISTRY,
Arc::new(RwLock::new(db_rw.clone())),
);
let mut reconfig_receiver = event_subscription_service
.subscribe_to_reconfigurations()
.unwrap();
let storage: Arc<dyn DbReader> = db.clone();
let synced_version = (&*storage).fetch_synced_version().unwrap();
assert_ok!(event_subscription_service.notify_initial_configs(synced_version));
if verify_initial_config {
// Verify initial reconfiguration notification is sent
assert!(
reconfig_receiver
.select_next_some()
.now_or_never()
.is_some(),
"Expected an initial reconfig notification!",
);
}
// Create the executors
let block_executor = Box::new(BlockExecutor::<AptosVM>::new(db_rw.clone()));
let chunk_executor = Arc::new(ChunkExecutor::<AptosVM>::new(db_rw).unwrap());
let executor_proxy = ExecutorProxy::new(db, chunk_executor, event_subscription_service);
(
validators,
block_executor,
executor_proxy,
reconfig_receiver,
)
}
/// Creates a transaction that rotates the consensus key of the given validator account.
fn create_consensus_key_rotation_transaction(
validator: &TestValidator,
sequence_number: u64,
) -> Transaction {
let operator_key = validator.key.clone();
let operator_public_key = operator_key.public_key();
let operator_account = validator.data.operator_address;
let new_consensus_key = Ed25519PrivateKey::generate_for_testing().public_key();
get_test_signed_transaction(
operator_account,
sequence_number,
operator_key,
operator_public_key,
Some(
aptos_stdlib::encode_validator_set_script_set_validator_config_and_reconfigure(
validator.data.address,
new_consensus_key.to_bytes().to_vec(),
Vec::new(),
Vec::new(),
),
),
)
}
/// Creates a dummy transaction (useful for bumping the timer).
fn create_dummy_transaction(index: u8, validator_account: AccountAddress) -> Transaction {
Transaction::BlockMetadata(BlockMetadata::new(
gen_block_id(index),
0,
index as u64,
vec![],
validator_account,
(index as u64 + 1) * 100000010,
))
}
/// Creates a transaction that creates a reconfiguration event by changing the Aptos version
fn create_new_update_aptos_version_transaction(sequence_number: u64) -> Transaction {
let genesis_key = vm_genesis::GENESIS_KEYPAIR.0.clone();
get_test_signed_transaction(
aptos_root_address(),
sequence_number,
genesis_key.clone(),
genesis_key.public_key(),
Some(aptos_stdlib::encode_version_set_version(
7, // version
)),
)
}
fn create_new_update_consensus_config_transaction(sequence_number: u64) -> Transaction {
// placeholder until supported in aptos framework
create_new_update_aptos_version_transaction(sequence_number)
// let genesis_key = vm_genesis::GENESIS_KEYPAIR.0.clone();
// get_test_signed_transaction(
// aptos_root_address(),
// sequence_number,
// genesis_key.clone(),
// genesis_key.public_key(),
// Some(aptos_stdlib::encode_update_aptos_consensus_config_script_function(
// 0,
// bcs::to_bytes(&OnChainConsensusConfig::V1(ConsensusConfigV1 {
// two_chain: false,
// }))
// .unwrap(),
// )),
// )
}
/// Creates a transaction that sends funds to the specified validator account.
fn create_transfer_to_validator_transaction(
validator_account: AccountAddress,
sequence_number: u64,
) -> Transaction {
let genesis_key = vm_genesis::GENESIS_KEYPAIR.0.clone();
get_test_signed_transaction(
aptos_root_address(),
sequence_number,
genesis_key.clone(),
genesis_key.public_key(),
Some(aptos_stdlib::encode_test_coin_transfer(
validator_account,
1_000_000,
)),
)
}
/// Executes and commits a given block that will cause a reconfiguration event.
fn execute_and_commit_block(
block_executor: &mut Box<BlockExecutor<AptosVM>>,
block: Vec<Transaction>,
block_id: u8,
) -> (Vec<ContractEvent>, LedgerInfoWithSignatures) {
let block_hash = gen_block_id(block_id);
// Execute block
let output = block_executor
.execute_block((block_hash, block), block_executor.committed_block_id())
.expect("Failed to execute block!");
assert!(
output.has_reconfiguration(),
"Block execution is missing a reconfiguration!"
);
// Commit block
let ledger_info_with_sigs =
gen_ledger_info_with_sigs(block_id.into(), &output, block_hash, vec![]);
assert_ok!(block_executor.commit_blocks(vec![block_hash], ledger_info_with_sigs.clone()));
(output.reconfig_events().to_vec(), ledger_info_with_sigs)
}
fn create_test_event(event_key: EventKey) -> ContractEvent {
ContractEvent::new(event_key, 0, TypeTag::Bool, bcs::to_bytes(&0).unwrap())
}
fn create_random_event_key() -> EventKey {
EventKey::new_from_address(&AccountAddress::random(), 0)
}
/// Defines a new on-chain config for test purposes.
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord, Serialize)]
pub struct TestOnChainConfig {
pub some_value: u64,
}
impl OnChainConfig for TestOnChainConfig {
const IDENTIFIER: &'static str = "TestOnChainConfig";
}
}
| 41.472256 | 111 | 0.655081 |
d909dc03a1e318b073cc7af709b60fed8733ceed
| 905 |
// Copyright IBM Corporation 2021
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_use] extern crate rocket;
#[get("/rust")]
fn index() -> &'static str {
"This is a rust app!"
}
#[get("/")]
fn hello() -> &'static str {
"Hello, world! This is a rust app."
}
#[launch]
fn rocket() -> _ {
rocket::build()
.mount("/", routes![index])
.mount("/", routes![hello])
}
| 26.617647 | 75 | 0.670718 |
671e57fba58b48dd95d4e89e17df31e957364abb
| 3,465 |
#[doc = "Register `EVENTS_TXDSENT` reader"]
pub struct R(crate::R<EVENTS_TXDSENT_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<EVENTS_TXDSENT_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<EVENTS_TXDSENT_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<EVENTS_TXDSENT_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `EVENTS_TXDSENT` writer"]
pub struct W(crate::W<EVENTS_TXDSENT_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<EVENTS_TXDSENT_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<EVENTS_TXDSENT_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<EVENTS_TXDSENT_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `EVENTS_TXDSENT` reader - "]
pub struct EVENTS_TXDSENT_R(crate::FieldReader<bool, bool>);
impl EVENTS_TXDSENT_R {
pub(crate) fn new(bits: bool) -> Self {
EVENTS_TXDSENT_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for EVENTS_TXDSENT_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EVENTS_TXDSENT` writer - "]
pub struct EVENTS_TXDSENT_W<'a> {
w: &'a mut W,
}
impl<'a> EVENTS_TXDSENT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0"]
#[inline(always)]
pub fn events_txdsent(&self) -> EVENTS_TXDSENT_R {
EVENTS_TXDSENT_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0"]
#[inline(always)]
pub fn events_txdsent(&mut self) -> EVENTS_TXDSENT_W {
EVENTS_TXDSENT_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "TWI TXD byte sent\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_txdsent](index.html) module"]
pub struct EVENTS_TXDSENT_SPEC;
impl crate::RegisterSpec for EVENTS_TXDSENT_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [events_txdsent::R](R) reader structure"]
impl crate::Readable for EVENTS_TXDSENT_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [events_txdsent::W](W) writer structure"]
impl crate::Writable for EVENTS_TXDSENT_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets EVENTS_TXDSENT to value 0"]
impl crate::Resettable for EVENTS_TXDSENT_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 30.663717 | 412 | 0.616739 |
ef7795565978035b9d58fa0f97702f1b8653f062
| 432 |
/// Ejemplo de como crear un json object desde una macro
use std::collections::HashMap;
#[derive(PartialEq, Debug)]
enum Json {
Null,
Boolean(bool),
Number(f64),
String(String),
Array(Vec<Json>),
Object(Box<HashMap<String, Json>>)
}
macro_rules! json {
(null) => {
Json::Null
}
}
fn main() {
println!("Hello, world!");
}
#[test]
fn json_null() {
assert_eq!(json!(null), Json::Null);
}
| 14.896552 | 56 | 0.599537 |
ef098365480c9a08e7b6d9dead4eaff4038cf772
| 29,979 |
use std::cmp;
use std::fmt;
use std::io;
use std::io::Read as StdRead;
use std::io::Write as StdWrite;
use std::mem;
use std::net;
use std::net::SocketAddr;
use std::slice::Chunks;
use std::time::Duration;
#[cfg(all(feature = "ssl", not(target_os = "windows")))]
use crate::conn::SslOpts;
use super::consts;
use super::consts::ColumnType;
use super::consts::Command;
use super::error::DriverError::ConnectTimeout;
use super::error::DriverError::CouldNotConnect;
use super::error::DriverError::PacketOutOfSync;
use super::error::DriverError::PacketTooLarge;
use super::error::Error::DriverError;
use super::error::Result as MyResult;
use crate::Value::{self, Bytes, Date, Float, Int, Time, UInt, NULL};
use bufstream::BufStream;
use byteorder::LittleEndian as LE;
use byteorder::ReadBytesExt;
use byteorder::WriteBytesExt;
use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression};
#[cfg(windows)]
use named_pipe as np;
#[cfg(all(feature = "ssl", all(unix, not(target_os = "macos"))))]
use openssl::ssl::{self, SslContext, SslStream};
#[cfg(all(feature = "ssl", target_os = "macos"))]
use security_framework::certificate::SecCertificate;
#[cfg(all(feature = "ssl", target_os = "macos"))]
use security_framework::cipher_suite::CipherSuite;
#[cfg(all(feature = "ssl", target_os = "macos"))]
use security_framework::identity::SecIdentity;
#[cfg(all(feature = "ssl", target_os = "macos"))]
use security_framework::secure_transport::{
HandshakeError, SslConnectionType, SslContext, SslProtocolSide, SslStream,
};
#[cfg(unix)]
use std::os::unix;
mod tcp;
const MIN_COMPRESS_LENGTH: usize = 50;
/// Maps desired payload to a set of `(<header>, <packet payload>)`.
struct PacketIterator<'a> {
chunks: Chunks<'a, u8>,
last_was_max: bool,
seq_id: &'a mut u8,
}
impl<'a> PacketIterator<'a> {
fn new(payload: &'a [u8], seq_id: &'a mut u8) -> Self {
PacketIterator {
last_was_max: payload.len() == 0,
seq_id,
chunks: payload.chunks(consts::MAX_PAYLOAD_LEN),
}
}
}
impl<'a> Iterator for PacketIterator<'a> {
type Item = ([u8; 4], &'a [u8]);
fn next(&mut self) -> Option<<Self as Iterator>::Item> {
match self.chunks.next() {
Some(chunk) => {
let mut header = [0, 0, 0, *self.seq_id];
*self.seq_id = self.seq_id.wrapping_add(1);
self.last_was_max = chunk.len() == consts::MAX_PAYLOAD_LEN;
match (&mut header[..3]).write_le_uint_n(chunk.len() as u64, 3) {
Ok(_) => Some((header, chunk)),
Err(_) => unreachable!("3 bytes for chunk len should be available"),
}
}
None => {
if self.last_was_max {
let header = [0, 0, 0, *self.seq_id];
*self.seq_id = self.seq_id.wrapping_add(1);
self.last_was_max = false;
Some((header, &[][..]))
} else {
None
}
}
}
}
}
pub trait Read: ReadBytesExt + io::BufRead {
fn read_lenenc_int(&mut self) -> io::Result<u64> {
let head_byte = self.read_u8()?;
let length = match head_byte {
0xfc => 2,
0xfd => 3,
0xfe => 8,
x => return Ok(x as u64),
};
let out = self.read_uint::<LE>(length)?;
Ok(out)
}
fn read_lenenc_bytes(&mut self) -> io::Result<Vec<u8>> {
let len = self.read_lenenc_int()?;
let mut out = Vec::with_capacity(len as usize);
let count = if len > 0 {
self.take(len).read_to_end(&mut out)?
} else {
0
};
if count as u64 == len {
Ok(out)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Unexpected EOF while reading length encoded string",
))
}
}
fn read_to_null(&mut self) -> io::Result<Vec<u8>> {
let mut out = Vec::new();
let mut chars = self.bytes();
while let Some(c) = chars.next() {
let c = c?;
if c == 0u8 {
break;
}
out.push(c);
}
Ok(out)
}
fn read_bin_value(
&mut self,
col_type: consts::ColumnType,
unsigned: bool,
) -> io::Result<Value> {
match col_type {
ColumnType::MYSQL_TYPE_STRING
| ColumnType::MYSQL_TYPE_VAR_STRING
| ColumnType::MYSQL_TYPE_BLOB
| ColumnType::MYSQL_TYPE_TINY_BLOB
| ColumnType::MYSQL_TYPE_MEDIUM_BLOB
| ColumnType::MYSQL_TYPE_LONG_BLOB
| ColumnType::MYSQL_TYPE_SET
| ColumnType::MYSQL_TYPE_ENUM
| ColumnType::MYSQL_TYPE_DECIMAL
| ColumnType::MYSQL_TYPE_VARCHAR
| ColumnType::MYSQL_TYPE_BIT
| ColumnType::MYSQL_TYPE_NEWDECIMAL
| ColumnType::MYSQL_TYPE_GEOMETRY
| ColumnType::MYSQL_TYPE_JSON => Ok(Bytes(self.read_lenenc_bytes()?)),
ColumnType::MYSQL_TYPE_TINY => {
if unsigned {
Ok(Int(self.read_u8()? as i64))
} else {
Ok(Int(self.read_i8()? as i64))
}
}
ColumnType::MYSQL_TYPE_SHORT | ColumnType::MYSQL_TYPE_YEAR => {
if unsigned {
Ok(Int(self.read_u16::<LE>()? as i64))
} else {
Ok(Int(self.read_i16::<LE>()? as i64))
}
}
ColumnType::MYSQL_TYPE_LONG | ColumnType::MYSQL_TYPE_INT24 => {
if unsigned {
Ok(Int(self.read_u32::<LE>()? as i64))
} else {
Ok(Int(self.read_i32::<LE>()? as i64))
}
}
ColumnType::MYSQL_TYPE_LONGLONG => {
if unsigned {
Ok(UInt(self.read_u64::<LE>()?))
} else {
Ok(Int(self.read_i64::<LE>()?))
}
}
ColumnType::MYSQL_TYPE_FLOAT => Ok(Float(self.read_f32::<LE>()? as f64)),
ColumnType::MYSQL_TYPE_DOUBLE => Ok(Float(self.read_f64::<LE>()?)),
ColumnType::MYSQL_TYPE_TIMESTAMP
| ColumnType::MYSQL_TYPE_DATE
| ColumnType::MYSQL_TYPE_DATETIME => {
let len = self.read_u8()?;
let mut year = 0u16;
let mut month = 0u8;
let mut day = 0u8;
let mut hour = 0u8;
let mut minute = 0u8;
let mut second = 0u8;
let mut micro_second = 0u32;
if len >= 4u8 {
year = self.read_u16::<LE>()?;
month = self.read_u8()?;
day = self.read_u8()?;
}
if len >= 7u8 {
hour = self.read_u8()?;
minute = self.read_u8()?;
second = self.read_u8()?;
}
if len == 11u8 {
micro_second = self.read_u32::<LE>()?;
}
Ok(Date(year, month, day, hour, minute, second, micro_second))
}
ColumnType::MYSQL_TYPE_TIME => {
let len = self.read_u8()?;
let mut is_negative = false;
let mut days = 0u32;
let mut hours = 0u8;
let mut minutes = 0u8;
let mut seconds = 0u8;
let mut micro_seconds = 0u32;
if len >= 8u8 {
is_negative = self.read_u8()? == 1u8;
days = self.read_u32::<LE>()?;
hours = self.read_u8()?;
minutes = self.read_u8()?;
seconds = self.read_u8()?;
}
if len == 12u8 {
micro_seconds = self.read_u32::<LE>()?;
}
Ok(Time(
is_negative,
days,
hours,
minutes,
seconds,
micro_seconds,
))
}
_ => Ok(NULL),
}
}
/// Drops mysql packet paylaod. Returns new seq_id.
fn drop_packet(&mut self, mut seq_id: u8) -> MyResult<u8> {
use std::io::ErrorKind::Other;
loop {
let payload_len = self.read_uint::<LE>(3)? as usize;
let srv_seq_id = self.read_u8()?;
if srv_seq_id != seq_id {
return Err(DriverError(PacketOutOfSync));
}
seq_id = seq_id.wrapping_add(1);
if payload_len == 0 {
break;
} else {
if self.fill_buf()?.len() < payload_len {
return Err(io::Error::new(Other, "Unexpected EOF while reading packet").into());
}
self.consume(payload_len);
if payload_len != consts::MAX_PAYLOAD_LEN {
break;
}
}
}
Ok(seq_id)
}
/// Reads mysql packet payload returns it with new seq_id value.
fn read_packet(&mut self, mut seq_id: u8) -> MyResult<(Vec<u8>, u8)> {
let mut total_read = 0;
let mut output = Vec::new();
loop {
let payload_len = self.read_uint::<LE>(3)? as usize;
let srv_seq_id = self.read_u8()?;
if srv_seq_id != seq_id {
return Err(DriverError(PacketOutOfSync));
}
seq_id = seq_id.wrapping_add(1);
if payload_len == 0 {
break;
} else {
output.reserve(payload_len);
unsafe {
output.set_len(total_read + payload_len);
}
self.read_exact(&mut output[total_read..total_read + payload_len])?;
total_read += payload_len;
if payload_len != consts::MAX_PAYLOAD_LEN {
break;
}
}
}
Ok((output, seq_id))
}
}
impl<T: ReadBytesExt + io::BufRead> Read for T {}
pub trait Write: WriteBytesExt {
fn write_le_uint_n(&mut self, x: u64, len: usize) -> io::Result<()> {
let mut buf = [0u8; 8];
let mut offset = 0;
while offset < len {
buf[offset] = (((0xFF << (offset * 8)) & x) >> (offset * 8)) as u8;
offset += 1;
}
StdWrite::write_all(self, &buf[..len])
}
fn write_lenenc_int(&mut self, x: u64) -> io::Result<()> {
if x < 251 {
self.write_u8(x as u8)?;
Ok(())
} else if x < 65_536 {
self.write_u8(0xFC)?;
self.write_le_uint_n(x, 2)
} else if x < 16_777_216 {
self.write_u8(0xFD)?;
self.write_le_uint_n(x, 3)
} else {
self.write_u8(0xFE)?;
self.write_le_uint_n(x, 8)
}
}
fn write_lenenc_bytes(&mut self, bytes: &[u8]) -> io::Result<()> {
self.write_lenenc_int(bytes.len() as u64)?;
self.write_all(bytes)
}
fn write_packet(
&mut self,
data: &[u8],
mut seq_id: u8,
max_allowed_packet: usize,
) -> MyResult<u8> {
if data.len() > max_allowed_packet {
return Err(DriverError(PacketTooLarge));
}
for (header, payload) in PacketIterator::new(data, &mut seq_id) {
self.write_all(&header[..])?;
self.write_all(payload)?;
}
self.flush().map_err(Into::into).map(|_| seq_id)
}
}
impl<T: WriteBytesExt> Write for T {}
/// Applies compression to a stream. See [mysql docs][#1]
///
/// 1. [https://dev.mysql.com/doc/internals/en/compressed-payload.html]
#[derive(Debug)]
pub struct Compressed {
stream: Stream,
buf: Vec<u8>,
pos: usize,
comp_seq_id: u8,
}
impl Compressed {
pub fn new(stream: Stream) -> Self {
Compressed {
stream,
buf: Vec::new(),
pos: 0,
comp_seq_id: 0,
}
}
pub fn get_comp_seq_id(&self) -> u8 {
self.comp_seq_id
}
fn available(&self) -> usize {
self.buf.len() - self.pos
}
fn with_buf_and_stream<F>(&mut self, mut fun: F) -> io::Result<()>
where
F: FnMut(&mut Vec<u8>, &mut dyn IoPack) -> io::Result<()>,
{
let mut buf = mem::replace(&mut self.buf, Vec::new());
let ret = fun(&mut buf, self.stream.as_mut());
self.buf = buf;
ret
}
fn read_compressed_packet(&mut self) -> io::Result<()> {
assert_eq!(self.buf.len(), 0, "buf should be empty");
let compressed_len = self.stream.as_mut().read_uint::<LE>(3)? as usize;
let comp_seq_id = self.stream.as_mut().read_u8()?;
let uncompressed_len = self.stream.as_mut().read_uint::<LE>(3)? as usize;
self.comp_seq_id = comp_seq_id.wrapping_add(1);
self.with_buf_and_stream(|buf, stream| {
if uncompressed_len == 0 {
buf.resize(compressed_len, 0);
stream.read_exact(buf)
} else {
let mut intermediate_buf = Vec::with_capacity(compressed_len);
intermediate_buf.resize(compressed_len, 0);
stream.read_exact(&mut intermediate_buf)?;
let mut decoder = ZlibDecoder::new(&*intermediate_buf);
buf.reserve(uncompressed_len);
decoder.read_to_end(buf).map(|_| ())
}
})
}
pub fn write_compressed_packet(
&mut self,
data: &[u8],
mut seq_id: u8,
max_allowed_packet: usize,
) -> MyResult<u8> {
if data.len() > max_allowed_packet {
return Err(DriverError(PacketTooLarge));
}
let compress = data.len() + 4 > MIN_COMPRESS_LENGTH;
let mut comp_seq_id = seq_id;
let mut intermediate_buf = Vec::new();
if compress {
let capacity = data.len() + 4 * (data.len() / consts::MAX_PAYLOAD_LEN) + 4;
intermediate_buf.reserve(capacity);
}
for (header, payload) in PacketIterator::new(data, &mut seq_id) {
if !compress {
let chunk_len = header.len() + payload.len();
self.stream.as_mut().write_uint::<LE>(chunk_len as u64, 3)?;
self.stream.as_mut().write_u8(comp_seq_id)?;
comp_seq_id = comp_seq_id.wrapping_add(1);
self.stream.as_mut().write_uint::<LE>(0, 3)?;
self.stream.as_mut().write_all(&header[..])?;
self.stream.as_mut().write_all(payload)?;
} else {
intermediate_buf.write_all(&header[..])?;
intermediate_buf.write_all(payload)?;
}
}
if compress {
let capacity = cmp::min(intermediate_buf.len(), consts::MAX_PAYLOAD_LEN);
let mut compressed_buf = Vec::with_capacity(capacity / 2);
for chunk in intermediate_buf.chunks(consts::MAX_PAYLOAD_LEN) {
let mut encoder = ZlibEncoder::new(compressed_buf, Compression::default());
encoder.write_all(chunk)?;
compressed_buf = encoder.finish()?;
self.stream
.as_mut()
.write_uint::<LE>(compressed_buf.len() as u64, 3)?;
self.stream.as_mut().write_u8(comp_seq_id)?;
self.stream
.as_mut()
.write_uint::<LE>(chunk.len() as u64, 3)?;
self.stream.as_mut().write_all(&*compressed_buf)?;
comp_seq_id = comp_seq_id.wrapping_add(1);
compressed_buf.truncate(0);
}
}
self.comp_seq_id = comp_seq_id;
self.stream.as_mut().flush()?;
// Syncronize seq_id with comp_seq_id if compression is used.
Ok(if compress { comp_seq_id } else { seq_id })
}
pub fn is_insecure(&self) -> bool {
self.stream.is_insecure()
}
pub fn is_socket(&self) -> bool {
self.stream.is_socket()
}
}
impl io::Read for Compressed {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let available = self.available();
if available == 0 {
self.buf.truncate(0);
self.pos = 0;
self.read_compressed_packet()?;
self.read(buf)
} else {
let count = cmp::min(buf.len(), self.buf.len() - self.pos);
if count > 0 {
let end = self.pos + count;
(&mut buf[..count]).copy_from_slice(&self.buf[self.pos..end]);
self.pos = end;
}
Ok(count)
}
}
}
impl io::BufRead for Compressed {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
self.read_compressed_packet()?;
Ok(self.buf.as_ref())
}
fn consume(&mut self, amt: usize) {
self.pos += amt;
assert!(self.pos <= self.buf.len());
}
}
impl Drop for Compressed {
fn drop(&mut self) {
if let Stream::TcpStream(None) = self.stream {
return;
}
// Write COM_QUIT using compression.
let _ =
self.write_compressed_packet(&[Command::COM_QUIT as u8], 0, consts::MAX_PAYLOAD_LEN);
self.stream = Stream::TcpStream(None);
}
}
#[derive(Debug)]
pub enum Stream {
#[cfg(unix)]
SocketStream(BufStream<unix::net::UnixStream>),
#[cfg(windows)]
SocketStream(BufStream<np::PipeClient>),
TcpStream(Option<TcpStream>),
}
pub trait IoPack: io::Read + io::Write + io::BufRead + 'static {}
impl<T: io::Read + io::Write + 'static> IoPack for BufStream<T> {}
impl AsMut<dyn IoPack> for Stream {
fn as_mut(&mut self) -> &mut dyn IoPack {
match *self {
#[cfg(unix)]
Stream::SocketStream(ref mut stream) => stream,
#[cfg(windows)]
Stream::SocketStream(ref mut stream) => stream,
Stream::TcpStream(Some(ref mut stream)) => stream.as_mut(),
_ => panic!("Incomplete stream"),
}
}
}
impl Stream {
#[cfg(unix)]
pub fn connect_socket(
socket: &str,
read_timeout: Option<Duration>,
write_timeout: Option<Duration>,
) -> MyResult<Stream> {
match unix::net::UnixStream::connect(socket) {
Ok(stream) => {
stream.set_read_timeout(read_timeout)?;
stream.set_write_timeout(write_timeout)?;
Ok(Stream::SocketStream(BufStream::new(stream)))
}
Err(e) => {
let addr = format!("{}", socket);
let desc = format!("{}", e);
Err(DriverError(CouldNotConnect(Some((addr, desc, e.kind())))))
}
}
}
#[cfg(windows)]
pub fn connect_socket(
socket: &str,
read_timeout: Option<Duration>,
write_timeout: Option<Duration>,
) -> MyResult<Stream> {
let full_name = format!(r"\\.\pipe\{}", socket);
match np::PipeClient::connect(full_name.clone()) {
Ok(mut stream) => {
stream.set_read_timeout(read_timeout);
stream.set_write_timeout(write_timeout);
Ok(Stream::SocketStream(BufStream::new(stream)))
}
Err(e) => {
let desc = format!("{}", e);
Err(DriverError(CouldNotConnect(Some((
full_name,
desc,
e.kind(),
)))))
}
}
}
#[cfg(all(not(unix), not(windows)))]
fn connect_socket(&mut self) -> MyResult<()> {
unimplemented!("Sockets is not implemented on current platform");
}
pub fn connect_tcp(
ip_or_hostname: &str,
port: u16,
read_timeout: Option<Duration>,
write_timeout: Option<Duration>,
tcp_keepalive_time: Option<u32>,
nodelay: bool,
tcp_connect_timeout: Option<Duration>,
bind_address: Option<SocketAddr>,
) -> MyResult<Stream> {
let mut builder = tcp::MyTcpBuilder::new((ip_or_hostname, port));
builder
.connect_timeout(tcp_connect_timeout)
.read_timeout(read_timeout)
.write_timeout(write_timeout)
.keepalive_time_ms(tcp_keepalive_time)
.nodelay(nodelay)
.bind_address(bind_address);
builder
.connect()
.map(|stream| Stream::TcpStream(Some(TcpStream::Insecure(BufStream::new(stream)))))
.map_err(|err| {
if err.kind() == io::ErrorKind::TimedOut {
DriverError(ConnectTimeout)
} else {
let addr = format!("{}:{}", ip_or_hostname, port);
let desc = format!("{}", err);
DriverError(CouldNotConnect(Some((addr, desc, err.kind()))))
}
})
}
pub fn is_insecure(&self) -> bool {
match self {
&Stream::TcpStream(Some(TcpStream::Insecure(_))) => true,
_ => false,
}
}
pub fn is_socket(&self) -> bool {
match self {
Stream::SocketStream(_) => true,
_ => false,
}
}
}
#[cfg(all(feature = "ssl", target_os = "macos"))]
impl Stream {
pub fn make_secure(
mut self,
verify_peer: bool,
ip_or_hostname: Option<&str>,
ssl_opts: &SslOpts,
) -> MyResult<Stream> {
use std::path::Path;
use std::path::PathBuf;
fn load_client_cert(path: &Path, pass: &str) -> MyResult<Option<SecIdentity>> {
use security_framework::import_export::Pkcs12ImportOptions;
let mut import = Pkcs12ImportOptions::new();
import.passphrase(pass);
let mut client_file = ::std::fs::File::open(path)?;
let mut client_data = Vec::new();
client_file.read_to_end(&mut client_data)?;
let mut identities = import.import(&*client_data)?;
Ok(identities.pop().and_then(|x| x.identity))
}
fn load_extra_certs(files: &[PathBuf]) -> MyResult<Vec<SecCertificate>> {
let mut extra_certs = Vec::new();
for path in files {
let mut cert_file = ::std::fs::File::open(path)?;
let mut cert_data = Vec::new();
cert_file.read_to_end(&mut cert_data)?;
extra_certs.push(SecCertificate::from_der(&*cert_data)?);
}
Ok(extra_certs)
}
if self.is_insecure() {
let mut ctx: SslContext =
SslContext::new(SslProtocolSide::CLIENT, SslConnectionType::STREAM)?;
match *ssl_opts {
Some(ref ssl_opts) => {
if verify_peer {
ctx.set_peer_domain_name(
ip_or_hostname.as_ref().unwrap_or(&("localhost".into())),
)?;
}
// Taken from gmail.com
ctx.set_enabled_ciphers(&[
CipherSuite::TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
CipherSuite::TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite::TLS_ECDHE_RSA_WITH_RC4_128_SHA,
CipherSuite::TLS_RSA_WITH_AES_128_GCM_SHA256,
CipherSuite::TLS_RSA_WITH_AES_128_CBC_SHA256,
CipherSuite::TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite::TLS_RSA_WITH_RC4_128_SHA,
CipherSuite::TLS_RSA_WITH_RC4_128_MD5,
CipherSuite::TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
CipherSuite::TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
CipherSuite::TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
CipherSuite::TLS_RSA_WITH_AES_256_GCM_SHA384,
CipherSuite::TLS_RSA_WITH_AES_256_CBC_SHA256,
CipherSuite::TLS_RSA_WITH_AES_256_CBC_SHA,
CipherSuite::TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
CipherSuite::TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
CipherSuite::TLS_RSA_WITH_3DES_EDE_CBC_SHA,
])?;
if let Some((ref path, ref pass, ref certs)) = *ssl_opts {
if let Some(identity) = load_client_cert(path, pass)? {
let extra_certs = load_extra_certs(certs)?;
ctx.set_certificate(&identity, &*extra_certs)?;
}
}
match self {
Stream::TcpStream(ref mut opt_stream) if opt_stream.is_some() => {
let stream = opt_stream.take().unwrap();
match stream {
TcpStream::Insecure(mut stream) => {
stream.flush()?;
let s_stream = match ctx.handshake(stream.into_inner().unwrap())
{
Ok(s_stream) => s_stream,
Err(HandshakeError::Failure(err)) => return Err(err.into()),
Err(HandshakeError::Interrupted(_)) => unreachable!(),
};
Ok(Stream::TcpStream(Some(TcpStream::Secure(BufStream::new(
s_stream,
)))))
}
_ => unreachable!(),
}
}
_ => unreachable!(),
}
}
_ => unreachable!(),
}
} else {
Ok(self)
}
}
}
#[cfg(all(feature = "ssl", not(target_os = "macos"), unix))]
impl Stream {
pub fn make_secure(
mut self,
verify_peer: bool,
_: Option<&str>,
ssl_opts: &SslOpts,
) -> MyResult<Stream> {
if self.is_insecure() {
let mut ctx = SslContext::builder(ssl::SslMethod::tls())?;
let mode = if verify_peer {
ssl::SslVerifyMode::PEER
} else {
ssl::SslVerifyMode::NONE
};
ctx.set_verify(mode);
match *ssl_opts {
Some((ref ca_cert, None)) => ctx.set_ca_file(&ca_cert)?,
Some((ref ca_cert, Some((ref client_cert, ref client_key)))) => {
ctx.set_ca_file(&ca_cert)?;
ctx.set_certificate_file(&client_cert, ssl::SslFiletype::PEM)?;
ctx.set_private_key_file(&client_key, ssl::SslFiletype::PEM)?;
}
_ => unreachable!(),
}
match self {
Stream::TcpStream(ref mut opt_stream) if opt_stream.is_some() => {
let stream = opt_stream.take().unwrap();
match stream {
TcpStream::Insecure(stream) => {
let ctx = ctx.build();
let s_stream = match ssl::Ssl::new(&ctx)?
.connect(stream.into_inner().unwrap())
{
Ok(s_stream) => s_stream,
Err(handshake_err) => match handshake_err {
ssl::HandshakeError::SetupFailure(err) => {
return Err(err.into());
}
ssl::HandshakeError::Failure(mid_stream) => {
return Err(mid_stream.into_error().into());
}
ssl::HandshakeError::WouldBlock(_mid_stream) => unreachable!(),
},
};
Ok(Stream::TcpStream(Some(TcpStream::Secure(BufStream::new(
s_stream,
)))))
}
_ => unreachable!(),
}
}
_ => unreachable!(),
}
} else {
Ok(self)
}
}
}
impl Drop for Stream {
fn drop(&mut self) {
if let &mut Stream::TcpStream(None) = self {
return;
}
let _ = self
.as_mut()
.write_packet(&[Command::COM_QUIT as u8], 0, consts::MAX_PAYLOAD_LEN);
let _ = self.as_mut().flush();
}
}
pub enum TcpStream {
#[cfg(all(feature = "ssl", any(unix, target_os = "macos")))]
Secure(BufStream<SslStream<net::TcpStream>>),
Insecure(BufStream<net::TcpStream>),
}
impl AsMut<dyn IoPack> for TcpStream {
fn as_mut(&mut self) -> &mut dyn IoPack {
match *self {
#[cfg(all(feature = "ssl", any(unix, target_os = "macos")))]
TcpStream::Secure(ref mut stream) => stream,
TcpStream::Insecure(ref mut stream) => stream,
}
}
}
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
#[cfg(all(feature = "ssl", any(unix, target_os = "macos")))]
TcpStream::Secure(_) => write!(f, "Secure stream"),
TcpStream::Insecure(ref s) => write!(f, "Insecure stream {:?}", s),
}
}
}
| 34.899884 | 100 | 0.494313 |
2353306598ba66413d13fe19dc4e03010803a865
| 4,571 |
use zoon::{*, eprintln};
use crate::{connection::connection, app};
use shared::{UpMsg, ClientId, ProjectId, TimeEntryId, time_tracker};
use std::sync::Arc;
mod view;
const TIME_ENTRY_BREAKPOINT: u32 = 660;
// ------ ------
// Types
// ------ ------
#[derive(Default)]
struct Client {
id: ClientId,
name:String,
projects: Vec<Arc<Project>>,
}
#[derive(Default)]
struct Project {
id: ProjectId,
name: String,
time_entries: MutableVec<Arc<TimeEntry>>,
}
#[derive(Default)]
struct TimeEntry {
id: TimeEntryId,
name: Mutable<String>,
started: Mutable<Wrapper<DateTime<Local>>>,
stopped: Mutable<Option<Wrapper<DateTime<Local>>>>,
is_old: bool,
}
// ------ ------
// States
// ------ ------
#[static_ref]
fn clients() -> &'static MutableVec<Arc<Client>> {
MutableVec::new()
}
#[static_ref]
fn current_time() -> &'static Mutable<DateTime<Local>> {
current_time_updater();
Mutable::new(Local::now())
}
#[static_ref]
fn current_time_updater() -> &'static Mutable<Timer> {
Mutable::new(Timer::new(1_000, || current_time().set_neq(Local::now())))
}
// ------ ------
// Commands
// ------ ------
pub fn request_clients() {
Task::start(async {
let msg = UpMsg::GetTimeTrackerClients;
if let Err(error) = connection().send_up_msg(msg).await {
eprintln!("get TimeTracker clients request failed: {}", error);
}
});
}
pub fn convert_and_set_clients(new_clients: Vec<time_tracker::Client>) {
fn convert_clients(clients: Vec<time_tracker::Client>) -> Vec<Arc<Client>> {
clients.into_iter().map(|client| {
Arc::new(Client {
id: client.id,
name: client.name,
projects: convert_projects(client.projects),
})
}).collect()
}
fn convert_projects(time_blocks: Vec<time_tracker::Project>) -> Vec<Arc<Project>> {
time_blocks.into_iter().map(|project| {
Arc::new(Project {
id: project.id,
name: project.name,
time_entries: MutableVec::new_with_values(convert_time_entries(project.time_entries)),
})
}).collect()
}
fn convert_time_entries(time_entries: Vec<time_tracker::TimeEntry>) -> Vec<Arc<TimeEntry>> {
time_entries.into_iter().map(|time_entry| {
Arc::new(TimeEntry {
id: time_entry.id,
name: Mutable::new(time_entry.name),
started: Mutable::new(Wrapper::new(time_entry.started)),
stopped: Mutable::new(time_entry.stopped.map(Wrapper::new)),
is_old: true,
})
}).collect()
}
clients().lock_mut().replace_cloned(convert_clients(new_clients));
}
// -- project --
fn toggle_tracker(project: &Project) {
let active_time_entry = project
.time_entries
.lock_ref()
.iter()
.find(|time_entry| time_entry.stopped.get().is_none())
.cloned();
if let Some(active_time_entry) = active_time_entry {
return active_time_entry.stopped.set(Some(Local::now().into()));
}
add_time_entry(project);
}
// -- time_entry --
fn add_time_entry(project: &Project) {
let mut time_entries = project.time_entries.lock_mut();
let name = time_entries
.first()
.map(|time_entry| time_entry.name.get_cloned())
.unwrap_or_default();
let time_entry = TimeEntry::default();
time_entry.name.set(name);
// @TODO send up_msg
time_entries.insert_cloned(0, Arc::new(time_entry));
}
fn delete_time_entry(project: &Project, time_entry_id: TimeEntryId) {
// @TODO send up_msg + confirm dialog
project.time_entries.lock_mut().retain(|time_entry| time_entry.id != time_entry_id);
}
fn rename_time_entry(time_entry_id: TimeEntryId, name: &str) {
// @TODO send up_msg
zoon::println!("rename_time_entry not implemented yet");
}
fn set_time_entry_started(time_entry: &TimeEntry, started: DateTime<Local>) {
// @TODO send up_msg
time_entry.started.set(Wrapper::new(started));
}
fn set_time_entry_stopped(time_entry: &TimeEntry, stopped: DateTime<Local>) {
// @TODO send up_msg
time_entry.stopped.set(Some(Wrapper::new(stopped)));
}
// ------ ------
// Signals
// ------ ------
fn show_wide_time_entry() -> impl Signal<Item = bool> {
app::viewport_width().signal().map(|width| width > TIME_ENTRY_BREAKPOINT).dedupe()
}
// ------ ------
// View
// ------ ------
pub fn view() -> RawElement {
view::page().into_raw_element()
}
| 26.888235 | 102 | 0.611026 |
1d3ac262b5011383f1f054f2a63bc8ff645f5f2a
| 2,406 |
//! Defines a BTDF that describes specular transmission
use enum_set::EnumSet;
use std::f32;
use bxdf::fresnel::{Dielectric, Fresnel};
use bxdf::{self, BxDF, BxDFType};
use film::Colorf;
use linalg::{self, Vector};
/// Specular transmission BTDF that implements a specularly transmissive material model
#[derive(Clone, Copy)]
pub struct SpecularTransmission<'a> {
/// Color of the transmissited light
transmission: Colorf,
/// Fresnel term for the tranmission model, only dielectrics make sense here
fresnel: &'a Dielectric,
}
impl<'a> SpecularTransmission<'a> {
/// Create a specularly transmissive BTDF with the color and Fresnel term
pub fn new(c: &Colorf, fresnel: &'a Dielectric) -> SpecularTransmission<'a> {
SpecularTransmission {
transmission: *c,
fresnel: fresnel,
}
}
}
impl<'a> BxDF for SpecularTransmission<'a> {
fn bxdf_type(&self) -> EnumSet<BxDFType> {
let mut e = EnumSet::new();
e.insert(BxDFType::Specular);
e.insert(BxDFType::Transmission);
e
}
/// We'll never exactly hit the specular transmission direction with some pair
/// so this just returns black. Use `sample` instead
fn eval(&self, _: &Vector, _: &Vector) -> Colorf {
Colorf::broadcast(0.0)
}
/// Sampling the specular BTDF just returns the specular transmission direction
/// for the light leaving along `w_o`
fn sample(&self, w_o: &Vector, _: &(f32, f32)) -> (Colorf, Vector, f32) {
// Select the incident and transmited indices of refraction based on whether
// we're entering or exiting the material
let entering = bxdf::cos_theta(w_o) > 0.0;
let (ei, et, n) = if entering {
(
self.fresnel.eta_i,
self.fresnel.eta_t,
Vector::new(0.0, 0.0, 1.0),
)
} else {
(
self.fresnel.eta_t,
self.fresnel.eta_i,
Vector::new(0.0, 0.0, -1.0),
)
};
if let Some(w_i) = linalg::refract(w_o, &n, ei / et) {
let f = Colorf::broadcast(1.0) - self.fresnel.fresnel(bxdf::cos_theta(&w_i));
let c = f * self.transmission / f32::abs(bxdf::cos_theta(&w_i));
(c, w_i, 1.0)
} else {
(Colorf::black(), Vector::broadcast(0.0), 0.0)
}
}
}
| 34.371429 | 89 | 0.587282 |
3856ea420b0f30c73d4a5e1be4c72972445fedfd
| 11,709 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::RelationDir::*;
use self::TypeVariableValue::*;
use self::UndoEntry::*;
use hir::def_id::{DefId};
use syntax::util::small_vector::SmallVector;
use syntax_pos::Span;
use ty::{self, Ty};
use std::cmp::min;
use std::marker::PhantomData;
use std::mem;
use std::u32;
use rustc_data_structures::snapshot_vec as sv;
use rustc_data_structures::unify as ut;
pub struct TypeVariableTable<'tcx> {
values: sv::SnapshotVec<Delegate<'tcx>>,
eq_relations: ut::UnificationTable<ty::TyVid>,
}
struct TypeVariableData<'tcx> {
value: TypeVariableValue<'tcx>,
diverging: bool
}
enum TypeVariableValue<'tcx> {
Known(Ty<'tcx>),
Bounded {
relations: Vec<Relation>,
default: Option<Default<'tcx>>
}
}
// We will use this to store the required information to recapitulate what happened when
// an error occurs.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Default<'tcx> {
pub ty: Ty<'tcx>,
/// The span where the default was incurred
pub origin_span: Span,
/// The definition that the default originates from
pub def_id: DefId
}
pub struct Snapshot {
snapshot: sv::Snapshot,
eq_snapshot: ut::Snapshot<ty::TyVid>,
}
enum UndoEntry<'tcx> {
// The type of the var was specified.
SpecifyVar(ty::TyVid, Vec<Relation>, Option<Default<'tcx>>),
Relate(ty::TyVid, ty::TyVid),
RelateRange(ty::TyVid, usize),
}
struct Delegate<'tcx>(PhantomData<&'tcx ()>);
type Relation = (RelationDir, ty::TyVid);
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub enum RelationDir {
SubtypeOf, SupertypeOf, EqTo, BiTo
}
impl RelationDir {
fn opposite(self) -> RelationDir {
match self {
SubtypeOf => SupertypeOf,
SupertypeOf => SubtypeOf,
EqTo => EqTo,
BiTo => BiTo,
}
}
}
impl<'tcx> TypeVariableTable<'tcx> {
pub fn new() -> TypeVariableTable<'tcx> {
TypeVariableTable {
values: sv::SnapshotVec::new(),
eq_relations: ut::UnificationTable::new(),
}
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
relations(self.values.get_mut(a.index as usize))
}
pub fn default(&self, vid: ty::TyVid) -> Option<Default<'tcx>> {
match &self.values.get(vid.index as usize).value {
&Known(_) => None,
&Bounded { ref default, .. } => default.clone()
}
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index as usize).diverging
}
/// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
///
/// Precondition: neither `a` nor `b` are known.
pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
let a = self.root_var(a);
let b = self.root_var(b);
if a != b {
if dir == EqTo {
// a and b must be equal which we mark in the unification table
let root = self.eq_relations.union(a, b);
// In addition to being equal, all relations from the variable which is no longer
// the root must be added to the root so they are not forgotten as the other
// variable should no longer be referenced (other than to get the root)
let other = if a == root { b } else { a };
let count = {
let (relations, root_relations) = if other.index < root.index {
let (pre, post) = self.values.split_at_mut(root.index as usize);
(relations(&mut pre[other.index as usize]), relations(&mut post[0]))
} else {
let (pre, post) = self.values.split_at_mut(other.index as usize);
(relations(&mut post[0]), relations(&mut pre[root.index as usize]))
};
root_relations.extend_from_slice(relations);
relations.len()
};
self.values.record(RelateRange(root, count));
} else {
self.relations(a).push((dir, b));
self.relations(b).push((dir.opposite(), a));
self.values.record(Relate(a, b));
}
}
}
/// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the
/// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)`
/// where `vid1` is some other variable id.
///
/// Precondition: `vid` must be a root in the unification table
pub fn instantiate_and_push(
&mut self,
vid: ty::TyVid,
ty: Ty<'tcx>,
stack: &mut SmallVector<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
debug_assert!(self.root_var(vid) == vid);
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index as usize).value;
mem::replace(value_ptr, Known(ty))
};
let (relations, default) = match old_value {
Bounded { relations, default } => (relations, default),
Known(_) => bug!("Asked to instantiate variable that is \
already instantiated")
};
for &(dir, vid) in &relations {
stack.push((ty, dir, vid));
}
self.values.record(SpecifyVar(vid, relations, default));
}
pub fn new_var(&mut self,
diverging: bool,
default: Option<Default<'tcx>>) -> ty::TyVid {
self.eq_relations.new_key(());
let index = self.values.push(TypeVariableData {
value: Bounded { relations: vec![], default: default },
diverging: diverging
});
let v = ty::TyVid { index: index as u32 };
debug!("new_var() -> {:?}", v);
v
}
pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
self.eq_relations.find(vid)
}
pub fn probe(&mut self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
let vid = self.root_var(vid);
self.probe_root(vid)
}
/// Retrieves the type of `vid` given that it is currently a root in the unification table
pub fn probe_root(&mut self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
debug_assert!(self.root_var(vid) == vid);
match self.values.get(vid.index as usize).value {
Bounded { .. } => None,
Known(t) => Some(t)
}
}
pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
match t.sty {
ty::TyInfer(ty::TyVar(v)) => {
match self.probe(v) {
None => t,
Some(u) => u
}
}
_ => t,
}
}
pub fn snapshot(&mut self) -> Snapshot {
Snapshot {
snapshot: self.values.start_snapshot(),
eq_snapshot: self.eq_relations.snapshot(),
}
}
pub fn rollback_to(&mut self, s: Snapshot) {
debug!("rollback_to{:?}", {
for action in self.values.actions_since_snapshot(&s.snapshot) {
match *action {
sv::UndoLog::NewElem(index) => {
debug!("inference variable _#{}t popped", index)
}
_ => { }
}
}
});
self.values.rollback_to(s.snapshot);
self.eq_relations.rollback_to(s.eq_snapshot);
}
pub fn commit(&mut self, s: Snapshot) {
self.values.commit(s.snapshot);
self.eq_relations.commit(s.eq_snapshot);
}
pub fn types_escaping_snapshot(&mut self, s: &Snapshot) -> Vec<Ty<'tcx>> {
/*!
* Find the set of type variables that existed *before* `s`
* but which have only been unified since `s` started, and
* return the types with which they were unified. So if we had
* a type variable `V0`, then we started the snapshot, then we
* created a type variable `V1`, unifed `V0` with `T0`, and
* unified `V1` with `T1`, this function would return `{T0}`.
*/
let mut new_elem_threshold = u32::MAX;
let mut escaping_types = Vec::new();
let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot);
debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len());
for action in actions_since_snapshot {
match *action {
sv::UndoLog::NewElem(index) => {
// if any new variables were created during the
// snapshot, remember the lower index (which will
// always be the first one we see). Note that this
// action must precede those variables being
// specified.
new_elem_threshold = min(new_elem_threshold, index as u32);
debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
}
sv::UndoLog::Other(SpecifyVar(vid, ..)) => {
if vid.index < new_elem_threshold {
// quick check to see if this variable was
// created since the snapshot started or not.
let escaping_type = match self.values.get(vid.index as usize).value {
Bounded { .. } => bug!(),
Known(ty) => ty,
};
escaping_types.push(escaping_type);
}
debug!("SpecifyVar({:?}) new_elem_threshold={}", vid, new_elem_threshold);
}
_ => { }
}
}
escaping_types
}
pub fn unsolved_variables(&mut self) -> Vec<ty::TyVid> {
(0..self.values.len())
.filter_map(|i| {
let vid = ty::TyVid { index: i as u32 };
if self.probe(vid).is_some() {
None
} else {
Some(vid)
}
})
.collect()
}
}
impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
type Value = TypeVariableData<'tcx>;
type Undo = UndoEntry<'tcx>;
fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry<'tcx>) {
match action {
SpecifyVar(vid, relations, default) => {
values[vid.index as usize].value = Bounded {
relations: relations,
default: default
};
}
Relate(a, b) => {
relations(&mut (*values)[a.index as usize]).pop();
relations(&mut (*values)[b.index as usize]).pop();
}
RelateRange(i, n) => {
let relations = relations(&mut (*values)[i.index as usize]);
for _ in 0..n {
relations.pop();
}
}
}
}
}
fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
match v.value {
Known(_) => bug!("var_sub_var: variable is known"),
Bounded { ref mut relations, .. } => relations
}
}
| 34.539823 | 99 | 0.537706 |
08d9087dd143e2db651c198a0703c98e144f8de7
| 181 |
mod install;
mod install_errors;
mod version;
pub use install::install_latest_version;
pub use install_errors::InstallError;
pub use version::{binary_version, get_latest_version};
| 22.625 | 54 | 0.823204 |
4ae2d9da82d88a57a0be0d5ae92f010d021b7b2d
| 63,521 |
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::PADREGE {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `PAD19FNCSEL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD19FNCSELR {
#[doc = "Configure as the analog comparator reference 0 signal value."]
CMPRF0,
#[doc = "IOM/MSPI nCE group 19 value."]
NCE19,
#[doc = "CTIMER conenction 6 value."]
CT6,
#[doc = "Configure as GPIO19 value."]
GPIO19,
#[doc = "SCARD serial clock value."]
SCCLK,
#[doc = "Configure as the ANATEST1 I/O signal value."]
ANATEST1,
#[doc = "Configure as the UART1 RX input signal value."]
UART1RX,
#[doc = "Configure as the PDM I2S bit clock input signal value."]
I2SBCLK,
}
impl PAD19FNCSELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PAD19FNCSELR::CMPRF0 => 0,
PAD19FNCSELR::NCE19 => 1,
PAD19FNCSELR::CT6 => 2,
PAD19FNCSELR::GPIO19 => 3,
PAD19FNCSELR::SCCLK => 4,
PAD19FNCSELR::ANATEST1 => 5,
PAD19FNCSELR::UART1RX => 6,
PAD19FNCSELR::I2SBCLK => 7,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PAD19FNCSELR {
match value {
0 => PAD19FNCSELR::CMPRF0,
1 => PAD19FNCSELR::NCE19,
2 => PAD19FNCSELR::CT6,
3 => PAD19FNCSELR::GPIO19,
4 => PAD19FNCSELR::SCCLK,
5 => PAD19FNCSELR::ANATEST1,
6 => PAD19FNCSELR::UART1RX,
7 => PAD19FNCSELR::I2SBCLK,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `CMPRF0`"]
#[inline]
pub fn is_cmprf0(&self) -> bool {
*self == PAD19FNCSELR::CMPRF0
}
#[doc = "Checks if the value of the field is `NCE19`"]
#[inline]
pub fn is_nce19(&self) -> bool {
*self == PAD19FNCSELR::NCE19
}
#[doc = "Checks if the value of the field is `CT6`"]
#[inline]
pub fn is_ct6(&self) -> bool {
*self == PAD19FNCSELR::CT6
}
#[doc = "Checks if the value of the field is `GPIO19`"]
#[inline]
pub fn is_gpio19(&self) -> bool {
*self == PAD19FNCSELR::GPIO19
}
#[doc = "Checks if the value of the field is `SCCLK`"]
#[inline]
pub fn is_scclk(&self) -> bool {
*self == PAD19FNCSELR::SCCLK
}
#[doc = "Checks if the value of the field is `ANATEST1`"]
#[inline]
pub fn is_anatest1(&self) -> bool {
*self == PAD19FNCSELR::ANATEST1
}
#[doc = "Checks if the value of the field is `UART1RX`"]
#[inline]
pub fn is_uart1rx(&self) -> bool {
*self == PAD19FNCSELR::UART1RX
}
#[doc = "Checks if the value of the field is `I2SBCLK`"]
#[inline]
pub fn is_i2sbclk(&self) -> bool {
*self == PAD19FNCSELR::I2SBCLK
}
}
#[doc = "Possible values of the field `PAD19STRNG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD19STRNGR {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD19STRNGR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD19STRNGR::LOW => false,
PAD19STRNGR::HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD19STRNGR {
match value {
false => PAD19STRNGR::LOW,
true => PAD19STRNGR::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline]
pub fn is_low(&self) -> bool {
*self == PAD19STRNGR::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline]
pub fn is_high(&self) -> bool {
*self == PAD19STRNGR::HIGH
}
}
#[doc = "Possible values of the field `PAD19INPEN`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD19INPENR {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD19INPENR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD19INPENR::DIS => false,
PAD19INPENR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD19INPENR {
match value {
false => PAD19INPENR::DIS,
true => PAD19INPENR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD19INPENR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD19INPENR::EN
}
}
#[doc = "Possible values of the field `PAD19PULL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD19PULLR {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD19PULLR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD19PULLR::DIS => false,
PAD19PULLR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD19PULLR {
match value {
false => PAD19PULLR::DIS,
true => PAD19PULLR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD19PULLR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD19PULLR::EN
}
}
#[doc = "Possible values of the field `PAD18FNCSEL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD18FNCSELR {
#[doc = "Configure as the analog comparator input 1 signal value."]
CMPIN1,
#[doc = "IOM/MSPI nCE group 18 value."]
NCE18,
#[doc = "CTIMER connection 4 value."]
CT4,
#[doc = "Configure as GPIO18 value."]
GPIO18,
#[doc = "Configure as UART0 RTS output signal value."]
UA0RTS,
#[doc = "Configure as ANATEST2 I/O signal value."]
ANATEST2,
#[doc = "Configure as UART1 TX output signal value."]
UART1TX,
#[doc = "SCARD data input/output connectin value."]
SCCIO,
}
impl PAD18FNCSELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PAD18FNCSELR::CMPIN1 => 0,
PAD18FNCSELR::NCE18 => 1,
PAD18FNCSELR::CT4 => 2,
PAD18FNCSELR::GPIO18 => 3,
PAD18FNCSELR::UA0RTS => 4,
PAD18FNCSELR::ANATEST2 => 5,
PAD18FNCSELR::UART1TX => 6,
PAD18FNCSELR::SCCIO => 7,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PAD18FNCSELR {
match value {
0 => PAD18FNCSELR::CMPIN1,
1 => PAD18FNCSELR::NCE18,
2 => PAD18FNCSELR::CT4,
3 => PAD18FNCSELR::GPIO18,
4 => PAD18FNCSELR::UA0RTS,
5 => PAD18FNCSELR::ANATEST2,
6 => PAD18FNCSELR::UART1TX,
7 => PAD18FNCSELR::SCCIO,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `CMPIN1`"]
#[inline]
pub fn is_cmpin1(&self) -> bool {
*self == PAD18FNCSELR::CMPIN1
}
#[doc = "Checks if the value of the field is `NCE18`"]
#[inline]
pub fn is_nce18(&self) -> bool {
*self == PAD18FNCSELR::NCE18
}
#[doc = "Checks if the value of the field is `CT4`"]
#[inline]
pub fn is_ct4(&self) -> bool {
*self == PAD18FNCSELR::CT4
}
#[doc = "Checks if the value of the field is `GPIO18`"]
#[inline]
pub fn is_gpio18(&self) -> bool {
*self == PAD18FNCSELR::GPIO18
}
#[doc = "Checks if the value of the field is `UA0RTS`"]
#[inline]
pub fn is_ua0rts(&self) -> bool {
*self == PAD18FNCSELR::UA0RTS
}
#[doc = "Checks if the value of the field is `ANATEST2`"]
#[inline]
pub fn is_anatest2(&self) -> bool {
*self == PAD18FNCSELR::ANATEST2
}
#[doc = "Checks if the value of the field is `UART1TX`"]
#[inline]
pub fn is_uart1tx(&self) -> bool {
*self == PAD18FNCSELR::UART1TX
}
#[doc = "Checks if the value of the field is `SCCIO`"]
#[inline]
pub fn is_sccio(&self) -> bool {
*self == PAD18FNCSELR::SCCIO
}
}
#[doc = "Possible values of the field `PAD18STRNG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD18STRNGR {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD18STRNGR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD18STRNGR::LOW => false,
PAD18STRNGR::HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD18STRNGR {
match value {
false => PAD18STRNGR::LOW,
true => PAD18STRNGR::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline]
pub fn is_low(&self) -> bool {
*self == PAD18STRNGR::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline]
pub fn is_high(&self) -> bool {
*self == PAD18STRNGR::HIGH
}
}
#[doc = "Possible values of the field `PAD18INPEN`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD18INPENR {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD18INPENR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD18INPENR::DIS => false,
PAD18INPENR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD18INPENR {
match value {
false => PAD18INPENR::DIS,
true => PAD18INPENR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD18INPENR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD18INPENR::EN
}
}
#[doc = "Possible values of the field `PAD18PULL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD18PULLR {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD18PULLR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD18PULLR::DIS => false,
PAD18PULLR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD18PULLR {
match value {
false => PAD18PULLR::DIS,
true => PAD18PULLR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD18PULLR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD18PULLR::EN
}
}
#[doc = "Possible values of the field `PAD17FNCSEL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD17FNCSELR {
#[doc = "Configure as the analog comparator reference signal 1 input signal value."]
CMPRF1,
#[doc = "IOM/MSPI nCE group 17 value."]
NCE17,
#[doc = "Configure as the ADC Trigger 1 signal value."]
TRIG1,
#[doc = "Configure as GPIO17 value."]
GPIO17,
#[doc = "SCARD serial clock output value."]
SCCCLK,
#[doc = "Configure as UART0 RX input signal value."]
UART0RX,
#[doc = "Configure as UART1 CTS input signal value."]
UA1CTS,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl PAD17FNCSELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PAD17FNCSELR::CMPRF1 => 0,
PAD17FNCSELR::NCE17 => 1,
PAD17FNCSELR::TRIG1 => 2,
PAD17FNCSELR::GPIO17 => 3,
PAD17FNCSELR::SCCCLK => 4,
PAD17FNCSELR::UART0RX => 6,
PAD17FNCSELR::UA1CTS => 7,
PAD17FNCSELR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PAD17FNCSELR {
match value {
0 => PAD17FNCSELR::CMPRF1,
1 => PAD17FNCSELR::NCE17,
2 => PAD17FNCSELR::TRIG1,
3 => PAD17FNCSELR::GPIO17,
4 => PAD17FNCSELR::SCCCLK,
6 => PAD17FNCSELR::UART0RX,
7 => PAD17FNCSELR::UA1CTS,
i => PAD17FNCSELR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `CMPRF1`"]
#[inline]
pub fn is_cmprf1(&self) -> bool {
*self == PAD17FNCSELR::CMPRF1
}
#[doc = "Checks if the value of the field is `NCE17`"]
#[inline]
pub fn is_nce17(&self) -> bool {
*self == PAD17FNCSELR::NCE17
}
#[doc = "Checks if the value of the field is `TRIG1`"]
#[inline]
pub fn is_trig1(&self) -> bool {
*self == PAD17FNCSELR::TRIG1
}
#[doc = "Checks if the value of the field is `GPIO17`"]
#[inline]
pub fn is_gpio17(&self) -> bool {
*self == PAD17FNCSELR::GPIO17
}
#[doc = "Checks if the value of the field is `SCCCLK`"]
#[inline]
pub fn is_sccclk(&self) -> bool {
*self == PAD17FNCSELR::SCCCLK
}
#[doc = "Checks if the value of the field is `UART0RX`"]
#[inline]
pub fn is_uart0rx(&self) -> bool {
*self == PAD17FNCSELR::UART0RX
}
#[doc = "Checks if the value of the field is `UA1CTS`"]
#[inline]
pub fn is_ua1cts(&self) -> bool {
*self == PAD17FNCSELR::UA1CTS
}
}
#[doc = "Possible values of the field `PAD17STRNG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD17STRNGR {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD17STRNGR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD17STRNGR::LOW => false,
PAD17STRNGR::HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD17STRNGR {
match value {
false => PAD17STRNGR::LOW,
true => PAD17STRNGR::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline]
pub fn is_low(&self) -> bool {
*self == PAD17STRNGR::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline]
pub fn is_high(&self) -> bool {
*self == PAD17STRNGR::HIGH
}
}
#[doc = "Possible values of the field `PAD17INPEN`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD17INPENR {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD17INPENR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD17INPENR::DIS => false,
PAD17INPENR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD17INPENR {
match value {
false => PAD17INPENR::DIS,
true => PAD17INPENR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD17INPENR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD17INPENR::EN
}
}
#[doc = "Possible values of the field `PAD17PULL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD17PULLR {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD17PULLR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD17PULLR::DIS => false,
PAD17PULLR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD17PULLR {
match value {
false => PAD17PULLR::DIS,
true => PAD17PULLR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD17PULLR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD17PULLR::EN
}
}
#[doc = "Possible values of the field `PAD16FNCSEL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD16FNCSELR {
#[doc = "Configure as the analog ADC single ended port 0 input signal value."]
ADCSE0,
#[doc = "IOM/MSPI nCE group 16 value."]
NCE16,
#[doc = "Configure as the ADC Trigger 0 signal value."]
TRIG0,
#[doc = "Configure as GPIO16 value."]
GPIO16,
#[doc = "SCARD reset output value."]
SCCRST,
#[doc = "Configure as comparator input 0 signal value."]
CMPIN0,
#[doc = "Configure as UART0 TX output signal value."]
UART0TX,
#[doc = "Configure as UART1 RTS output signal value."]
UA1RTS,
}
impl PAD16FNCSELR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
PAD16FNCSELR::ADCSE0 => 0,
PAD16FNCSELR::NCE16 => 1,
PAD16FNCSELR::TRIG0 => 2,
PAD16FNCSELR::GPIO16 => 3,
PAD16FNCSELR::SCCRST => 4,
PAD16FNCSELR::CMPIN0 => 5,
PAD16FNCSELR::UART0TX => 6,
PAD16FNCSELR::UA1RTS => 7,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> PAD16FNCSELR {
match value {
0 => PAD16FNCSELR::ADCSE0,
1 => PAD16FNCSELR::NCE16,
2 => PAD16FNCSELR::TRIG0,
3 => PAD16FNCSELR::GPIO16,
4 => PAD16FNCSELR::SCCRST,
5 => PAD16FNCSELR::CMPIN0,
6 => PAD16FNCSELR::UART0TX,
7 => PAD16FNCSELR::UA1RTS,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ADCSE0`"]
#[inline]
pub fn is_adcse0(&self) -> bool {
*self == PAD16FNCSELR::ADCSE0
}
#[doc = "Checks if the value of the field is `NCE16`"]
#[inline]
pub fn is_nce16(&self) -> bool {
*self == PAD16FNCSELR::NCE16
}
#[doc = "Checks if the value of the field is `TRIG0`"]
#[inline]
pub fn is_trig0(&self) -> bool {
*self == PAD16FNCSELR::TRIG0
}
#[doc = "Checks if the value of the field is `GPIO16`"]
#[inline]
pub fn is_gpio16(&self) -> bool {
*self == PAD16FNCSELR::GPIO16
}
#[doc = "Checks if the value of the field is `SCCRST`"]
#[inline]
pub fn is_sccrst(&self) -> bool {
*self == PAD16FNCSELR::SCCRST
}
#[doc = "Checks if the value of the field is `CMPIN0`"]
#[inline]
pub fn is_cmpin0(&self) -> bool {
*self == PAD16FNCSELR::CMPIN0
}
#[doc = "Checks if the value of the field is `UART0TX`"]
#[inline]
pub fn is_uart0tx(&self) -> bool {
*self == PAD16FNCSELR::UART0TX
}
#[doc = "Checks if the value of the field is `UA1RTS`"]
#[inline]
pub fn is_ua1rts(&self) -> bool {
*self == PAD16FNCSELR::UA1RTS
}
}
#[doc = "Possible values of the field `PAD16STRNG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD16STRNGR {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD16STRNGR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD16STRNGR::LOW => false,
PAD16STRNGR::HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD16STRNGR {
match value {
false => PAD16STRNGR::LOW,
true => PAD16STRNGR::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline]
pub fn is_low(&self) -> bool {
*self == PAD16STRNGR::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline]
pub fn is_high(&self) -> bool {
*self == PAD16STRNGR::HIGH
}
}
#[doc = "Possible values of the field `PAD16INPEN`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD16INPENR {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD16INPENR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD16INPENR::DIS => false,
PAD16INPENR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD16INPENR {
match value {
false => PAD16INPENR::DIS,
true => PAD16INPENR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD16INPENR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD16INPENR::EN
}
}
#[doc = "Possible values of the field `PAD16PULL`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PAD16PULLR {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD16PULLR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
PAD16PULLR::DIS => false,
PAD16PULLR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> PAD16PULLR {
match value {
false => PAD16PULLR::DIS,
true => PAD16PULLR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == PAD16PULLR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == PAD16PULLR::EN
}
}
#[doc = "Values that can be written to the field `PAD19FNCSEL`"]
pub enum PAD19FNCSELW {
#[doc = "Configure as the analog comparator reference 0 signal value."]
CMPRF0,
#[doc = "IOM/MSPI nCE group 19 value."]
NCE19,
#[doc = "CTIMER conenction 6 value."]
CT6,
#[doc = "Configure as GPIO19 value."]
GPIO19,
#[doc = "SCARD serial clock value."]
SCCLK,
#[doc = "Configure as the ANATEST1 I/O signal value."]
ANATEST1,
#[doc = "Configure as the UART1 RX input signal value."]
UART1RX,
#[doc = "Configure as the PDM I2S bit clock input signal value."]
I2SBCLK,
}
impl PAD19FNCSELW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PAD19FNCSELW::CMPRF0 => 0,
PAD19FNCSELW::NCE19 => 1,
PAD19FNCSELW::CT6 => 2,
PAD19FNCSELW::GPIO19 => 3,
PAD19FNCSELW::SCCLK => 4,
PAD19FNCSELW::ANATEST1 => 5,
PAD19FNCSELW::UART1RX => 6,
PAD19FNCSELW::I2SBCLK => 7,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD19FNCSELW<'a> {
w: &'a mut W,
}
impl<'a> _PAD19FNCSELW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD19FNCSELW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Configure as the analog comparator reference 0 signal value."]
#[inline]
pub fn cmprf0(self) -> &'a mut W {
self.variant(PAD19FNCSELW::CMPRF0)
}
#[doc = "IOM/MSPI nCE group 19 value."]
#[inline]
pub fn nce19(self) -> &'a mut W {
self.variant(PAD19FNCSELW::NCE19)
}
#[doc = "CTIMER conenction 6 value."]
#[inline]
pub fn ct6(self) -> &'a mut W {
self.variant(PAD19FNCSELW::CT6)
}
#[doc = "Configure as GPIO19 value."]
#[inline]
pub fn gpio19(self) -> &'a mut W {
self.variant(PAD19FNCSELW::GPIO19)
}
#[doc = "SCARD serial clock value."]
#[inline]
pub fn scclk(self) -> &'a mut W {
self.variant(PAD19FNCSELW::SCCLK)
}
#[doc = "Configure as the ANATEST1 I/O signal value."]
#[inline]
pub fn anatest1(self) -> &'a mut W {
self.variant(PAD19FNCSELW::ANATEST1)
}
#[doc = "Configure as the UART1 RX input signal value."]
#[inline]
pub fn uart1rx(self) -> &'a mut W {
self.variant(PAD19FNCSELW::UART1RX)
}
#[doc = "Configure as the PDM I2S bit clock input signal value."]
#[inline]
pub fn i2sbclk(self) -> &'a mut W {
self.variant(PAD19FNCSELW::I2SBCLK)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 27;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD19STRNG`"]
pub enum PAD19STRNGW {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD19STRNGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD19STRNGW::LOW => false,
PAD19STRNGW::HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD19STRNGW<'a> {
w: &'a mut W,
}
impl<'a> _PAD19STRNGW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD19STRNGW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Low drive strength value."]
#[inline]
pub fn low(self) -> &'a mut W {
self.variant(PAD19STRNGW::LOW)
}
#[doc = "High drive strength value."]
#[inline]
pub fn high(self) -> &'a mut W {
self.variant(PAD19STRNGW::HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 26;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD19INPEN`"]
pub enum PAD19INPENW {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD19INPENW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD19INPENW::DIS => false,
PAD19INPENW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD19INPENW<'a> {
w: &'a mut W,
}
impl<'a> _PAD19INPENW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD19INPENW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pad input disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD19INPENW::DIS)
}
#[doc = "Pad input enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD19INPENW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 25;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD19PULL`"]
pub enum PAD19PULLW {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD19PULLW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD19PULLW::DIS => false,
PAD19PULLW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD19PULLW<'a> {
w: &'a mut W,
}
impl<'a> _PAD19PULLW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD19PULLW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pullup disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD19PULLW::DIS)
}
#[doc = "Pullup enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD19PULLW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD18FNCSEL`"]
pub enum PAD18FNCSELW {
#[doc = "Configure as the analog comparator input 1 signal value."]
CMPIN1,
#[doc = "IOM/MSPI nCE group 18 value."]
NCE18,
#[doc = "CTIMER connection 4 value."]
CT4,
#[doc = "Configure as GPIO18 value."]
GPIO18,
#[doc = "Configure as UART0 RTS output signal value."]
UA0RTS,
#[doc = "Configure as ANATEST2 I/O signal value."]
ANATEST2,
#[doc = "Configure as UART1 TX output signal value."]
UART1TX,
#[doc = "SCARD data input/output connectin value."]
SCCIO,
}
impl PAD18FNCSELW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PAD18FNCSELW::CMPIN1 => 0,
PAD18FNCSELW::NCE18 => 1,
PAD18FNCSELW::CT4 => 2,
PAD18FNCSELW::GPIO18 => 3,
PAD18FNCSELW::UA0RTS => 4,
PAD18FNCSELW::ANATEST2 => 5,
PAD18FNCSELW::UART1TX => 6,
PAD18FNCSELW::SCCIO => 7,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD18FNCSELW<'a> {
w: &'a mut W,
}
impl<'a> _PAD18FNCSELW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD18FNCSELW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Configure as the analog comparator input 1 signal value."]
#[inline]
pub fn cmpin1(self) -> &'a mut W {
self.variant(PAD18FNCSELW::CMPIN1)
}
#[doc = "IOM/MSPI nCE group 18 value."]
#[inline]
pub fn nce18(self) -> &'a mut W {
self.variant(PAD18FNCSELW::NCE18)
}
#[doc = "CTIMER connection 4 value."]
#[inline]
pub fn ct4(self) -> &'a mut W {
self.variant(PAD18FNCSELW::CT4)
}
#[doc = "Configure as GPIO18 value."]
#[inline]
pub fn gpio18(self) -> &'a mut W {
self.variant(PAD18FNCSELW::GPIO18)
}
#[doc = "Configure as UART0 RTS output signal value."]
#[inline]
pub fn ua0rts(self) -> &'a mut W {
self.variant(PAD18FNCSELW::UA0RTS)
}
#[doc = "Configure as ANATEST2 I/O signal value."]
#[inline]
pub fn anatest2(self) -> &'a mut W {
self.variant(PAD18FNCSELW::ANATEST2)
}
#[doc = "Configure as UART1 TX output signal value."]
#[inline]
pub fn uart1tx(self) -> &'a mut W {
self.variant(PAD18FNCSELW::UART1TX)
}
#[doc = "SCARD data input/output connectin value."]
#[inline]
pub fn sccio(self) -> &'a mut W {
self.variant(PAD18FNCSELW::SCCIO)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 19;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD18STRNG`"]
pub enum PAD18STRNGW {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD18STRNGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD18STRNGW::LOW => false,
PAD18STRNGW::HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD18STRNGW<'a> {
w: &'a mut W,
}
impl<'a> _PAD18STRNGW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD18STRNGW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Low drive strength value."]
#[inline]
pub fn low(self) -> &'a mut W {
self.variant(PAD18STRNGW::LOW)
}
#[doc = "High drive strength value."]
#[inline]
pub fn high(self) -> &'a mut W {
self.variant(PAD18STRNGW::HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD18INPEN`"]
pub enum PAD18INPENW {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD18INPENW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD18INPENW::DIS => false,
PAD18INPENW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD18INPENW<'a> {
w: &'a mut W,
}
impl<'a> _PAD18INPENW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD18INPENW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pad input disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD18INPENW::DIS)
}
#[doc = "Pad input enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD18INPENW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD18PULL`"]
pub enum PAD18PULLW {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD18PULLW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD18PULLW::DIS => false,
PAD18PULLW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD18PULLW<'a> {
w: &'a mut W,
}
impl<'a> _PAD18PULLW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD18PULLW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pullup disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD18PULLW::DIS)
}
#[doc = "Pullup enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD18PULLW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD17FNCSEL`"]
pub enum PAD17FNCSELW {
#[doc = "Configure as the analog comparator reference signal 1 input signal value."]
CMPRF1,
#[doc = "IOM/MSPI nCE group 17 value."]
NCE17,
#[doc = "Configure as the ADC Trigger 1 signal value."]
TRIG1,
#[doc = "Configure as GPIO17 value."]
GPIO17,
#[doc = "SCARD serial clock output value."]
SCCCLK,
#[doc = "Configure as UART0 RX input signal value."]
UART0RX,
#[doc = "Configure as UART1 CTS input signal value."]
UA1CTS,
}
impl PAD17FNCSELW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PAD17FNCSELW::CMPRF1 => 0,
PAD17FNCSELW::NCE17 => 1,
PAD17FNCSELW::TRIG1 => 2,
PAD17FNCSELW::GPIO17 => 3,
PAD17FNCSELW::SCCCLK => 4,
PAD17FNCSELW::UART0RX => 6,
PAD17FNCSELW::UA1CTS => 7,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD17FNCSELW<'a> {
w: &'a mut W,
}
impl<'a> _PAD17FNCSELW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD17FNCSELW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "Configure as the analog comparator reference signal 1 input signal value."]
#[inline]
pub fn cmprf1(self) -> &'a mut W {
self.variant(PAD17FNCSELW::CMPRF1)
}
#[doc = "IOM/MSPI nCE group 17 value."]
#[inline]
pub fn nce17(self) -> &'a mut W {
self.variant(PAD17FNCSELW::NCE17)
}
#[doc = "Configure as the ADC Trigger 1 signal value."]
#[inline]
pub fn trig1(self) -> &'a mut W {
self.variant(PAD17FNCSELW::TRIG1)
}
#[doc = "Configure as GPIO17 value."]
#[inline]
pub fn gpio17(self) -> &'a mut W {
self.variant(PAD17FNCSELW::GPIO17)
}
#[doc = "SCARD serial clock output value."]
#[inline]
pub fn sccclk(self) -> &'a mut W {
self.variant(PAD17FNCSELW::SCCCLK)
}
#[doc = "Configure as UART0 RX input signal value."]
#[inline]
pub fn uart0rx(self) -> &'a mut W {
self.variant(PAD17FNCSELW::UART0RX)
}
#[doc = "Configure as UART1 CTS input signal value."]
#[inline]
pub fn ua1cts(self) -> &'a mut W {
self.variant(PAD17FNCSELW::UA1CTS)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD17STRNG`"]
pub enum PAD17STRNGW {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD17STRNGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD17STRNGW::LOW => false,
PAD17STRNGW::HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD17STRNGW<'a> {
w: &'a mut W,
}
impl<'a> _PAD17STRNGW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD17STRNGW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Low drive strength value."]
#[inline]
pub fn low(self) -> &'a mut W {
self.variant(PAD17STRNGW::LOW)
}
#[doc = "High drive strength value."]
#[inline]
pub fn high(self) -> &'a mut W {
self.variant(PAD17STRNGW::HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD17INPEN`"]
pub enum PAD17INPENW {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD17INPENW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD17INPENW::DIS => false,
PAD17INPENW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD17INPENW<'a> {
w: &'a mut W,
}
impl<'a> _PAD17INPENW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD17INPENW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pad input disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD17INPENW::DIS)
}
#[doc = "Pad input enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD17INPENW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD17PULL`"]
pub enum PAD17PULLW {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD17PULLW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD17PULLW::DIS => false,
PAD17PULLW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD17PULLW<'a> {
w: &'a mut W,
}
impl<'a> _PAD17PULLW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD17PULLW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pullup disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD17PULLW::DIS)
}
#[doc = "Pullup enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD17PULLW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD16FNCSEL`"]
pub enum PAD16FNCSELW {
#[doc = "Configure as the analog ADC single ended port 0 input signal value."]
ADCSE0,
#[doc = "IOM/MSPI nCE group 16 value."]
NCE16,
#[doc = "Configure as the ADC Trigger 0 signal value."]
TRIG0,
#[doc = "Configure as GPIO16 value."]
GPIO16,
#[doc = "SCARD reset output value."]
SCCRST,
#[doc = "Configure as comparator input 0 signal value."]
CMPIN0,
#[doc = "Configure as UART0 TX output signal value."]
UART0TX,
#[doc = "Configure as UART1 RTS output signal value."]
UA1RTS,
}
impl PAD16FNCSELW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
PAD16FNCSELW::ADCSE0 => 0,
PAD16FNCSELW::NCE16 => 1,
PAD16FNCSELW::TRIG0 => 2,
PAD16FNCSELW::GPIO16 => 3,
PAD16FNCSELW::SCCRST => 4,
PAD16FNCSELW::CMPIN0 => 5,
PAD16FNCSELW::UART0TX => 6,
PAD16FNCSELW::UA1RTS => 7,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD16FNCSELW<'a> {
w: &'a mut W,
}
impl<'a> _PAD16FNCSELW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD16FNCSELW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Configure as the analog ADC single ended port 0 input signal value."]
#[inline]
pub fn adcse0(self) -> &'a mut W {
self.variant(PAD16FNCSELW::ADCSE0)
}
#[doc = "IOM/MSPI nCE group 16 value."]
#[inline]
pub fn nce16(self) -> &'a mut W {
self.variant(PAD16FNCSELW::NCE16)
}
#[doc = "Configure as the ADC Trigger 0 signal value."]
#[inline]
pub fn trig0(self) -> &'a mut W {
self.variant(PAD16FNCSELW::TRIG0)
}
#[doc = "Configure as GPIO16 value."]
#[inline]
pub fn gpio16(self) -> &'a mut W {
self.variant(PAD16FNCSELW::GPIO16)
}
#[doc = "SCARD reset output value."]
#[inline]
pub fn sccrst(self) -> &'a mut W {
self.variant(PAD16FNCSELW::SCCRST)
}
#[doc = "Configure as comparator input 0 signal value."]
#[inline]
pub fn cmpin0(self) -> &'a mut W {
self.variant(PAD16FNCSELW::CMPIN0)
}
#[doc = "Configure as UART0 TX output signal value."]
#[inline]
pub fn uart0tx(self) -> &'a mut W {
self.variant(PAD16FNCSELW::UART0TX)
}
#[doc = "Configure as UART1 RTS output signal value."]
#[inline]
pub fn ua1rts(self) -> &'a mut W {
self.variant(PAD16FNCSELW::UA1RTS)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD16STRNG`"]
pub enum PAD16STRNGW {
#[doc = "Low drive strength value."]
LOW,
#[doc = "High drive strength value."]
HIGH,
}
impl PAD16STRNGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD16STRNGW::LOW => false,
PAD16STRNGW::HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD16STRNGW<'a> {
w: &'a mut W,
}
impl<'a> _PAD16STRNGW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD16STRNGW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Low drive strength value."]
#[inline]
pub fn low(self) -> &'a mut W {
self.variant(PAD16STRNGW::LOW)
}
#[doc = "High drive strength value."]
#[inline]
pub fn high(self) -> &'a mut W {
self.variant(PAD16STRNGW::HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD16INPEN`"]
pub enum PAD16INPENW {
#[doc = "Pad input disabled value."]
DIS,
#[doc = "Pad input enabled value."]
EN,
}
impl PAD16INPENW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD16INPENW::DIS => false,
PAD16INPENW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD16INPENW<'a> {
w: &'a mut W,
}
impl<'a> _PAD16INPENW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD16INPENW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pad input disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD16INPENW::DIS)
}
#[doc = "Pad input enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD16INPENW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `PAD16PULL`"]
pub enum PAD16PULLW {
#[doc = "Pullup disabled value."]
DIS,
#[doc = "Pullup enabled value."]
EN,
}
impl PAD16PULLW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
PAD16PULLW::DIS => false,
PAD16PULLW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _PAD16PULLW<'a> {
w: &'a mut W,
}
impl<'a> _PAD16PULLW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: PAD16PULLW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Pullup disabled value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(PAD16PULLW::DIS)
}
#[doc = "Pullup enabled value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(PAD16PULLW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 27:29 - Pad 19 function select"]
#[inline]
pub fn pad19fncsel(&self) -> PAD19FNCSELR {
PAD19FNCSELR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 26 - Pad 19 drive strength"]
#[inline]
pub fn pad19strng(&self) -> PAD19STRNGR {
PAD19STRNGR::_from({
const MASK: bool = true;
const OFFSET: u8 = 26;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 25 - Pad 19 input enable"]
#[inline]
pub fn pad19inpen(&self) -> PAD19INPENR {
PAD19INPENR::_from({
const MASK: bool = true;
const OFFSET: u8 = 25;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 24 - Pad 19 pullup enable"]
#[inline]
pub fn pad19pull(&self) -> PAD19PULLR {
PAD19PULLR::_from({
const MASK: bool = true;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 19:21 - Pad 18 function select"]
#[inline]
pub fn pad18fncsel(&self) -> PAD18FNCSELR {
PAD18FNCSELR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 18 - Pad 18 drive strength"]
#[inline]
pub fn pad18strng(&self) -> PAD18STRNGR {
PAD18STRNGR::_from({
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 17 - Pad 18 input enable"]
#[inline]
pub fn pad18inpen(&self) -> PAD18INPENR {
PAD18INPENR::_from({
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 16 - Pad 18 pullup enable"]
#[inline]
pub fn pad18pull(&self) -> PAD18PULLR {
PAD18PULLR::_from({
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 11:13 - Pad 17 function select"]
#[inline]
pub fn pad17fncsel(&self) -> PAD17FNCSELR {
PAD17FNCSELR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 10 - Pad 17 drive strength"]
#[inline]
pub fn pad17strng(&self) -> PAD17STRNGR {
PAD17STRNGR::_from({
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 9 - Pad 17 input enable"]
#[inline]
pub fn pad17inpen(&self) -> PAD17INPENR {
PAD17INPENR::_from({
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 8 - Pad 17 pullup enable"]
#[inline]
pub fn pad17pull(&self) -> PAD17PULLR {
PAD17PULLR::_from({
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 3:5 - Pad 16 function select"]
#[inline]
pub fn pad16fncsel(&self) -> PAD16FNCSELR {
PAD16FNCSELR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 2 - Pad 16 drive strength"]
#[inline]
pub fn pad16strng(&self) -> PAD16STRNGR {
PAD16STRNGR::_from({
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 1 - Pad 16 input enable"]
#[inline]
pub fn pad16inpen(&self) -> PAD16INPENR {
PAD16INPENR::_from({
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 0 - Pad 16 pullup enable"]
#[inline]
pub fn pad16pull(&self) -> PAD16PULLR {
PAD16PULLR::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 404232216 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 27:29 - Pad 19 function select"]
#[inline]
pub fn pad19fncsel(&mut self) -> _PAD19FNCSELW {
_PAD19FNCSELW { w: self }
}
#[doc = "Bit 26 - Pad 19 drive strength"]
#[inline]
pub fn pad19strng(&mut self) -> _PAD19STRNGW {
_PAD19STRNGW { w: self }
}
#[doc = "Bit 25 - Pad 19 input enable"]
#[inline]
pub fn pad19inpen(&mut self) -> _PAD19INPENW {
_PAD19INPENW { w: self }
}
#[doc = "Bit 24 - Pad 19 pullup enable"]
#[inline]
pub fn pad19pull(&mut self) -> _PAD19PULLW {
_PAD19PULLW { w: self }
}
#[doc = "Bits 19:21 - Pad 18 function select"]
#[inline]
pub fn pad18fncsel(&mut self) -> _PAD18FNCSELW {
_PAD18FNCSELW { w: self }
}
#[doc = "Bit 18 - Pad 18 drive strength"]
#[inline]
pub fn pad18strng(&mut self) -> _PAD18STRNGW {
_PAD18STRNGW { w: self }
}
#[doc = "Bit 17 - Pad 18 input enable"]
#[inline]
pub fn pad18inpen(&mut self) -> _PAD18INPENW {
_PAD18INPENW { w: self }
}
#[doc = "Bit 16 - Pad 18 pullup enable"]
#[inline]
pub fn pad18pull(&mut self) -> _PAD18PULLW {
_PAD18PULLW { w: self }
}
#[doc = "Bits 11:13 - Pad 17 function select"]
#[inline]
pub fn pad17fncsel(&mut self) -> _PAD17FNCSELW {
_PAD17FNCSELW { w: self }
}
#[doc = "Bit 10 - Pad 17 drive strength"]
#[inline]
pub fn pad17strng(&mut self) -> _PAD17STRNGW {
_PAD17STRNGW { w: self }
}
#[doc = "Bit 9 - Pad 17 input enable"]
#[inline]
pub fn pad17inpen(&mut self) -> _PAD17INPENW {
_PAD17INPENW { w: self }
}
#[doc = "Bit 8 - Pad 17 pullup enable"]
#[inline]
pub fn pad17pull(&mut self) -> _PAD17PULLW {
_PAD17PULLW { w: self }
}
#[doc = "Bits 3:5 - Pad 16 function select"]
#[inline]
pub fn pad16fncsel(&mut self) -> _PAD16FNCSELW {
_PAD16FNCSELW { w: self }
}
#[doc = "Bit 2 - Pad 16 drive strength"]
#[inline]
pub fn pad16strng(&mut self) -> _PAD16STRNGW {
_PAD16STRNGW { w: self }
}
#[doc = "Bit 1 - Pad 16 input enable"]
#[inline]
pub fn pad16inpen(&mut self) -> _PAD16INPENW {
_PAD16INPENW { w: self }
}
#[doc = "Bit 0 - Pad 16 pullup enable"]
#[inline]
pub fn pad16pull(&mut self) -> _PAD16PULLW {
_PAD16PULLW { w: self }
}
}
| 27.702137 | 88 | 0.529148 |
f4bab41cb24416a58fde845270229b9bb167f11a
| 21,897 |
use std::time::Duration;
use druid::kurbo::{Line, Rect};
use druid::piet::kurbo::Shape;
use druid::piet::Color;
use druid::piet::TextLayoutBuilder;
use druid::RenderContext;
use druid::{
kurbo::Ellipse, piet::Text, widget::ListIter, BoxConstraints, Data, Env, Event, EventCtx,
LayoutCtx, Lens, LifeCycle, LifeCycleCtx, PaintCtx, Point, Size, TimerToken, UpdateCtx, Widget,
};
use evalexpr::*;
use satellite_data::{
database::Database,
satellites::{MajorBody, Satellite},
};
#[derive(Clone, Debug)]
pub struct SatelliteteVec(pub Vec<SatelliteWrapper>);
impl Data for SatelliteteVec {
fn same(&self, other: &Self) -> bool {
if self.0.len() != other.0.len() {
return false;
}
if !self.0.is_empty() {
for (i, sat) in self.0.iter().enumerate() {
if sat != other.0.get(i).unwrap() {
return false;
}
}
}
true
}
}
impl ListIter<SatelliteWrapper> for SatelliteteVec {
fn for_each(&self, mut cb: impl FnMut(&SatelliteWrapper, usize)) {
for satellite in self.0.iter() {
cb(satellite, self.0.len());
}
// cb (self.0.get(0).unwrap(), 1)
}
fn for_each_mut(&mut self, mut cb: impl FnMut(&mut SatelliteWrapper, usize)) {
for satellite in self.0.iter_mut() {
cb(satellite, (*satellite).satellite)
}
}
fn data_len(&self) -> usize {
self.0.len()
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct SatelliteWrapper {
pub satellite: usize,
pub selected: bool,
}
impl Copy for SatelliteWrapper {}
impl Data for SatelliteWrapper {
fn same(&self, other: &Self) -> bool {
self.satellite == other.satellite && self.selected == other.selected
}
}
pub struct AstronomyCanvas {
pub selected_satellites: Vec<Satellite>,
pub selected_satellite: Option<SatelliteWrapper>,
pub move_bul: bool,
pub center: Point,
pub database: Database,
pub full_database: Database,
pub count_change_timer: Option<TimerToken>,
pub selected_update_timer: Option<TimerToken>,
}
#[derive(Clone, Data, Lens, Debug)]
pub struct AstronomyCanvasData {
pub all_displayed: usize,
pub selected: bool,
pub scale: f64,
pub center: Point,
pub toggle_distance: bool,
pub toggle_angle: bool,
pub toggle_major_semiaxes: bool,
pub mouse_point: Option<Point>,
pub selected_satellites: SatelliteteVec,
pub match_string: String,
pub selected_satellite: Option<SatelliteWrapper>,
pub graph_view: bool,
pub x_value: String,
pub y_value: String,
}
impl Widget<AstronomyCanvasData> for AstronomyCanvas {
fn paint(&mut self, ctx: &mut PaintCtx, data: &AstronomyCanvasData, env: &Env) {
if !data.graph_view {
self.render_circular_view(ctx, data, env);
} else {
self.render_graph_view(ctx, data, env);
}
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
_data: &AstronomyCanvasData,
_env: &Env,
) -> Size {
let default_size = Size::new(
ctx.window().get_size().width,
ctx.window().get_size().height,
);
bc.constrain(default_size)
}
fn event(
&mut self,
ctx: &mut EventCtx,
event: &Event,
data: &mut AstronomyCanvasData,
_env: &Env,
) {
match event {
Event::Wheel(mouse_event) => {
data.mouse_point = None;
let (_, y) = mouse_event.wheel_delta.into();
if y.is_sign_positive() {
data.scale *= 2.0
} else {
data.scale /= 2.0
}
ctx.request_paint();
ctx.request_layout();
}
Event::MouseDown(mouse_event) => {
match mouse_event.button {
druid::MouseButton::Left => {
data.mouse_point = None;
let mouse_pos = mouse_event.pos;
data.center = mouse_pos;
ctx.set_active(true);
}
druid::MouseButton::Right => {
let mouse_pos = mouse_event.pos;
data.mouse_point = Some(mouse_pos);
self.selected_satellites.clear();
}
druid::MouseButton::Middle => {
data.selected = !data.selected
// data.mouse_point = None;
}
_ => {}
}
ctx.request_paint();
ctx.request_layout();
}
Event::MouseMove(mouse_event) => {
if ctx.is_active() {
self.move_bul = true;
let mouse_pos = mouse_event.pos;
data.center = mouse_pos;
}
}
Event::MouseUp(_mouse_event) => {
ctx.set_active(false);
let satellites: Vec<SatelliteWrapper> = self
.selected_satellites
.clone()
.into_iter()
.map(|satellite| SatelliteWrapper {
satellite: satellite.id,
selected: false,
})
.collect();
data.selected_satellites = SatelliteteVec(satellites);
self.move_bul = false;
}
Event::Timer(_event) => {
ctx.request_layout();
ctx.request_paint();
data.all_displayed = self.database.data.len();
}
_ => {}
};
data.selected_satellite = self.selected_satellite;
}
fn update(
&mut self,
ctx: &mut UpdateCtx,
old_data: &AstronomyCanvasData,
data: &AstronomyCanvasData,
_env: &Env,
) {
if old_data.selected_satellites.0.len() == data.selected_satellites.0.len()
&& !data.selected_satellites.0.is_empty()
{
let satellite: Vec<SatelliteWrapper> = data
.selected_satellites
.0
.clone()
.iter()
.enumerate()
.filter(|(id, satellite)| {
let old = old_data.selected_satellites.0.get(*id).unwrap();
satellite.satellite == old.satellite && satellite.selected != old.selected
})
.map(|(_id, satellite)| *satellite)
.collect();
if !satellite.is_empty() {
self.selected_satellite = Some(*satellite.first().unwrap());
ctx.request_timer(Duration::from_millis(1));
}
}
if data.toggle_distance != old_data.toggle_distance
|| data.toggle_angle != old_data.toggle_angle
|| data.toggle_major_semiaxes != old_data.toggle_major_semiaxes
{
ctx.request_paint();
ctx.request_layout();
}
if old_data.center != data.center {
if self.move_bul {
self.center = Point::new(
self.center.x + old_data.center.x - data.center.x,
self.center.y + old_data.center.y - data.center.y,
);
}
ctx.request_paint();
ctx.request_layout();
}
if data.match_string != old_data.match_string {
if data.match_string.is_empty() {
self.database = self.full_database.clone();
return;
}
let database = &self.full_database;
let database: Vec<Satellite> = database
.data
.iter()
.cloned()
.filter(|satellite: &Satellite| {
parse_logicall_expression(satellite, &data.match_string)
})
.collect();
let database = Database { data: database };
self.database = database;
// let all_displayed = self.database.data.len();
// self.all_displayed = all_displayed
ctx.request_paint();
ctx.request_layout();
ctx.request_timer(Duration::from_millis(1));
}
}
fn lifecycle(
&mut self,
_ctx: &mut LifeCycleCtx,
_event: &LifeCycle,
_data: &AstronomyCanvasData,
_env: &Env,
) {
}
}
impl AstronomyCanvas {
fn render_circular_view(&mut self, ctx: &mut PaintCtx, data: &AstronomyCanvasData, _env: &Env) {
let scale = data.scale * 5000.0;
for satellite in &self.database.data {
let a: f64 = satellite.orbital_params.major_semiaxis;
let e: f64 = satellite.orbital_params.eccentricity;
let b = (a.powi(2) * (1.0 - e.powi(2))).sqrt();
let color = match satellite.major_body {
MajorBody::Earth => Color::rgb(0.0, 1.0, 0.4),
MajorBody::Mars => Color::rgb(1.0, 0.0, 0.0),
MajorBody::Jupiter => Color::rgb(1.0, 0.5, 0.3),
MajorBody::Saturn => Color::rgb(1.0, 0.9, 0.3),
MajorBody::Uranus => Color::rgb(0.3, 1.0, 0.8),
MajorBody::Neptune => Color::rgb(0.1, 0.2, 1.0),
MajorBody::Pluto => Color::rgb(0.9, 0.2, 1.0),
};
let rotation: f64 = satellite.orbital_params.inclination;
let rotation: f64 = rotation.to_radians();
let x = self.center.x + ((a * e) * rotation.cos() / scale);
let y = self.center.y + ((a * e) * rotation.sin() / scale);
let ellipse = Ellipse::new((x, y), (a / scale, b / scale), rotation);
match data.mouse_point {
Some(mouse_point) => {
let x = mouse_point.x;
let y = mouse_point.y;
let rect = Rect::from_center_size((x, y), (40.0, 40.0));
let diagonal = Line::new((rect.x0, rect.y0), (rect.x1, rect.y1));
// let diagonal2 = Line::new((rect.x0, rect.y0), (rect.x1, rect.y1));
ctx.stroke(rect, &Color::AQUA, 1.0);
ctx.stroke(diagonal, &Color::AQUA, 1.0);
let ell = ellipse.to_path(0.01);
let ell = ell.segments();
for ell in ell {
if !ell.intersect_line(diagonal).is_empty()
&& !self.selected_satellites.contains(satellite)
{
self.selected_satellites.push(satellite.clone());
}
}
}
None => {
self.selected_satellites.clear();
}
}
if data.toggle_major_semiaxes {
let x1 = x + (a * rotation.cos() / scale);
let y1 = y + (a * rotation.sin() / scale);
let rotation: f64 = satellite.orbital_params.ascending_node;
let rotation: f64 = (rotation + 180.0).to_radians();
let x2 = x + (a * rotation.cos() / scale);
let y2 = y + (a * rotation.sin() / scale);
let major_semiaxes = Line::new((x2, y2), (x1, y1));
let rotation: f64 = satellite.orbital_params.ascending_node;
let rotation: f64 = (rotation + 90.0).to_radians();
let x1 = x + (b * rotation.cos() / scale);
let y1 = y + (b * rotation.sin() / scale);
let rotation: f64 = satellite.orbital_params.ascending_node;
let rotation: f64 = (rotation + 270.0).to_radians();
let x2 = x + (b * rotation.cos() / scale);
let y2 = y + (b * rotation.sin() / scale);
let minor_semiaxes = Line::new((x2, y2), (x1, y1));
ctx.stroke(major_semiaxes, &color.clone().with_alpha(0.5), 1.0);
ctx.stroke(minor_semiaxes, &color.clone().with_alpha(0.5), 1.0);
}
ctx.stroke(ellipse, &color, 1.0);
}
let planet = Ellipse::new(self.center, (5., 5.), 0.0);
ctx.fill(planet, &Color::rgb(1.0, 1.0, 0.0));
if data.toggle_distance {
let line = Line::new(self.center, (ctx.size().width, self.center.y));
ctx.stroke(line, &Color::WHITE, 2.0);
for i in 0..50 {
let distance = i as f64 * 80.0;
let line = Line::new(
(self.center.x + distance, self.center.y - 15.0),
(self.center.x + distance, self.center.y + 15.0),
);
let text = ctx.text();
let text = text
.new_text_layout((distance * scale).to_string())
.text_color(Color::WHITE);
let text = text.build().unwrap();
ctx.draw_text(&text, (self.center.x + distance, self.center.y + 20.0));
ctx.stroke(line, &Color::WHITE, 1.0);
}
}
if data.toggle_angle {
let ellipse = Ellipse::new(self.center, (150.0, 150.0), 0.0);
ctx.stroke(ellipse, &Color::WHITE, 2.0);
for i in 0..36 {
let rotation: f64 = (i * 10).into();
let rotation: f64 = rotation.to_radians();
let distance = 150.0;
let x1 = self.center.x + ((distance - 5.0) * rotation.cos());
let y1 = self.center.y + ((distance - 5.0) * rotation.sin());
let x2 = self.center.x + ((distance + 5.0) * rotation.cos());
let y2 = self.center.y + ((distance + 5.0) * rotation.sin());
let line = Line::new((x1, y1), (x2, y2));
ctx.stroke(line, &Color::WHITE, 1.0);
let x = self.center.x + ((distance + 30.0) * rotation.cos()) - 10.0;
let y = self.center.y + ((distance + 30.0) * rotation.sin());
let text = ctx.text();
let text = text
.new_text_layout((i * 10).to_string())
.text_color(Color::WHITE);
let text = text.build().unwrap();
ctx.draw_text(&text, (x, y));
}
}
}
fn render_graph_view(&mut self, ctx: &mut PaintCtx, data: &AstronomyCanvasData, _env: &Env) {
let x_values: Vec<&str> = data
.x_value
.split('|')
.filter(|&value| !value.is_empty() && value != " ")
.collect();
let mut x_scale = 1.0;
let mut x_value = String::new();
if !x_values.is_empty() {
x_value = x_values[0].to_string();
}
if x_values.len() > 1 {
let value = x_values[1];
let value = eval(value);
let value: f64 = match value {
Ok(value) => value.as_number().unwrap_or(1.0),
Err(_) => 1.0,
};
x_scale = value;
}
x_scale *= data.scale;
let y_values: Vec<&str> = data
.y_value
.split('|')
.filter(|&value| !value.is_empty() && value != " ")
.collect();
let mut y_value = String::new();
let mut y_scale = 1.0;
if !y_values.is_empty() {
y_value = y_values[0].to_string();
}
if y_values.len() > 1 {
let value = y_values[1];
let value = eval(value);
let value: f64 = match value {
Ok(value) => value.as_number().unwrap_or(1.0),
Err(_) => 1.0,
};
y_scale = value;
}
y_scale *= data.scale;
let x_line = Line::new(
(self.center.x - 1000.0, self.center.y),
(self.center.x + 1000.0, self.center.y),
);
let y_line = Line::new(
(self.center.x, self.center.y - 1000.0),
(self.center.x, self.center.y + 1000.0),
);
ctx.stroke(x_line, &Color::WHITE, 2.0);
ctx.stroke(y_line, &Color::WHITE, 2.0);
for i in -50..50 {
let distance = i as f64 * 80.0;
let x_line = Line::new(
(self.center.x + distance, self.center.y + 10.0),
(self.center.x + distance, self.center.y - 10.0),
);
let y_line = Line::new(
(self.center.x + 10.0, self.center.y + distance),
(self.center.x - 10.0, self.center.y + distance),
);
ctx.stroke(x_line, &Color::WHITE, 1.0);
ctx.stroke(y_line, &Color::WHITE, 1.0);
let text = ctx.text();
let text_x = text
.new_text_layout((distance * x_scale).to_string())
.text_color(Color::WHITE);
let text_x = text_x.build().unwrap();
let text_y = text
.new_text_layout((distance * y_scale).to_string())
.text_color(Color::WHITE);
let text_y = text_y.build().unwrap();
ctx.draw_text(&text_y, (self.center.x - 20.0, self.center.y + distance));
ctx.draw_text(&text_x, (self.center.x + distance, self.center.y + 20.0));
}
for satellite in &self.database.data {
let x = parse_math_expression(satellite, &x_value);
let y = parse_math_expression(satellite, &y_value);
let x = x / x_scale;
let y = y / y_scale;
let x = self.center.x + x;
let y = self.center.y - y;
let ellipse = Ellipse::new((x, y), (3.0, 3.0), 0.0);
match data.mouse_point {
Some(mouse_point) => {
let rect = Rect::from_center_size((mouse_point.x, mouse_point.y), (20.0, 20.0));
if x < rect.x1
&& x > rect.x0
&& y < rect.y1
&& y > rect.y0
&& !self.selected_satellites.contains(satellite)
{
self.selected_satellites.push(satellite.clone());
}
ctx.stroke(rect, &Color::AQUA, 1.0);
}
None => {
self.selected_satellites.clear();
}
}
let color = match satellite.major_body {
MajorBody::Earth => Color::rgb(0.0, 1.0, 0.4),
MajorBody::Mars => Color::rgb(1.0, 0.0, 0.0),
MajorBody::Jupiter => Color::rgb(1.0, 0.5, 0.3),
MajorBody::Saturn => Color::rgb(1.0, 0.9, 0.3),
MajorBody::Uranus => Color::rgb(0.3, 1.0, 0.8),
MajorBody::Neptune => Color::rgb(0.1, 0.2, 1.0),
MajorBody::Pluto => Color::rgb(0.9, 0.2, 1.0),
};
ctx.fill(ellipse, &color);
}
}
}
fn parse_logicall_expression(satellite: &Satellite, logical_expression: &str) -> bool {
let precompiled = build_operator_tree(logical_expression);
let precompiled = match precompiled {
Ok(precompiled) => precompiled,
Err(_e) => build_operator_tree("false").unwrap(),
};
let context = context_map! {
"a" => satellite.orbital_params.major_semiaxis,
"i" => satellite.orbital_params.inclination,
"e" => satellite.orbital_params.eccentricity,
"node" => satellite.orbital_params.ascending_node,
"gm" => satellite.physical_params.gm.to_value(),
"radius" => satellite.physical_params.radius.to_value(),
"density" => satellite.physical_params.density.to_value(),
"magnitude" => satellite.physical_params.magnitude.to_value(),
"albedo" => satellite.physical_params.albedo.to_value(),
"mb" => satellite.major_body.to_string(),
"name" => satellite.name.clone()
}
.unwrap();
let result = precompiled.eval_boolean_with_context(&context);
match result {
Ok(result) => result,
_ => false,
}
}
fn parse_math_expression(satellite: &Satellite, math_expression: &str) -> f64 {
let precompiled = build_operator_tree(math_expression);
let precompiled = match precompiled {
Ok(precompiled) => precompiled,
Err(_e) => build_operator_tree("false").unwrap(),
};
let context = context_map! {
"a" => satellite.orbital_params.major_semiaxis,
"i" => satellite.orbital_params.inclination,
"e" => satellite.orbital_params.eccentricity,
"node" => satellite.orbital_params.ascending_node,
"gm" => satellite.physical_params.gm.to_value(),
"radius" => satellite.physical_params.radius.to_value(),
"density" => satellite.physical_params.density.to_value(),
"magnitude" => satellite.physical_params.magnitude.to_value(),
"albedo" => satellite.physical_params.albedo.to_value(),
}
.unwrap();
let result = precompiled.eval_float_with_context(&context);
match result {
Ok(result) => result,
_ => 0.0,
}
}
| 38.281469 | 101 | 0.489656 |
e67514e50403ef06fd426283eee48ebebe8648cb
| 30,550 |
/// WeightedVoteOption defines a unit of vote for vote split.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WeightedVoteOption {
#[prost(enumeration = "VoteOption", tag = "1")]
pub option: i32,
#[prost(string, tag = "2")]
pub weight: ::prost::alloc::string::String,
}
/// TextProposal defines a standard text proposal whose changes need to be
/// manually updated in case of approval.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TextProposal {
#[prost(string, tag = "1")]
pub title: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub description: ::prost::alloc::string::String,
}
/// Deposit defines an amount deposited by an account address to an active
/// proposal.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Deposit {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
#[prost(string, tag = "2")]
pub depositor: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "3")]
pub amount: ::prost::alloc::vec::Vec<super::super::base::v1beta1::Coin>,
}
/// Proposal defines the core field members of a governance proposal.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Proposal {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
#[prost(message, optional, tag = "2")]
pub content: ::core::option::Option<::prost_types::Any>,
#[prost(enumeration = "ProposalStatus", tag = "3")]
pub status: i32,
#[prost(message, optional, tag = "4")]
pub final_tally_result: ::core::option::Option<TallyResult>,
#[prost(message, optional, tag = "5")]
pub submit_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, optional, tag = "6")]
pub deposit_end_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, repeated, tag = "7")]
pub total_deposit: ::prost::alloc::vec::Vec<super::super::base::v1beta1::Coin>,
#[prost(message, optional, tag = "8")]
pub voting_start_time: ::core::option::Option<::prost_types::Timestamp>,
#[prost(message, optional, tag = "9")]
pub voting_end_time: ::core::option::Option<::prost_types::Timestamp>,
}
/// TallyResult defines a standard tally for a governance proposal.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TallyResult {
#[prost(string, tag = "1")]
pub yes: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub abstain: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub no: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub no_with_veto: ::prost::alloc::string::String,
}
/// Vote defines a vote on a governance proposal.
/// A Vote consists of a proposal ID, the voter, and the vote option.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Vote {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
#[prost(string, tag = "2")]
pub voter: ::prost::alloc::string::String,
/// Deprecated: Prefer to use `options` instead. This field is set in queries
/// if and only if `len(options) == 1` and that option has weight 1. In all
/// other cases, this field will default to VOTE_OPTION_UNSPECIFIED.
#[deprecated]
#[prost(enumeration = "VoteOption", tag = "3")]
pub option: i32,
#[prost(message, repeated, tag = "4")]
pub options: ::prost::alloc::vec::Vec<WeightedVoteOption>,
}
/// DepositParams defines the params for deposits on governance proposals.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DepositParams {
/// Minimum deposit for a proposal to enter voting period.
#[prost(message, repeated, tag = "1")]
pub min_deposit: ::prost::alloc::vec::Vec<super::super::base::v1beta1::Coin>,
/// Maximum period for Atom holders to deposit on a proposal. Initial value: 2
/// months.
#[prost(message, optional, tag = "2")]
pub max_deposit_period: ::core::option::Option<::prost_types::Duration>,
}
/// VotingParams defines the params for voting on governance proposals.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VotingParams {
/// Length of the voting period.
#[prost(message, optional, tag = "1")]
pub voting_period: ::core::option::Option<::prost_types::Duration>,
}
/// TallyParams defines the params for tallying votes on governance proposals.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TallyParams {
/// Minimum percentage of total stake needed to vote for a result to be
/// considered valid.
#[prost(bytes = "vec", tag = "1")]
pub quorum: ::prost::alloc::vec::Vec<u8>,
/// Minimum proportion of Yes votes for proposal to pass. Default value: 0.5.
#[prost(bytes = "vec", tag = "2")]
pub threshold: ::prost::alloc::vec::Vec<u8>,
/// Minimum value of Veto votes to Total votes ratio for proposal to be
/// vetoed. Default value: 1/3.
#[prost(bytes = "vec", tag = "3")]
pub veto_threshold: ::prost::alloc::vec::Vec<u8>,
}
/// VoteOption enumerates the valid vote options for a given governance proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum VoteOption {
/// VOTE_OPTION_UNSPECIFIED defines a no-op vote option.
Unspecified = 0,
/// VOTE_OPTION_YES defines a yes vote option.
Yes = 1,
/// VOTE_OPTION_ABSTAIN defines an abstain vote option.
Abstain = 2,
/// VOTE_OPTION_NO defines a no vote option.
No = 3,
/// VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option.
NoWithVeto = 4,
}
/// ProposalStatus enumerates the valid statuses of a proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ProposalStatus {
/// PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status.
Unspecified = 0,
/// PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit
/// period.
DepositPeriod = 1,
/// PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting
/// period.
VotingPeriod = 2,
/// PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has
/// passed.
Passed = 3,
/// PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has
/// been rejected.
Rejected = 4,
/// PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has
/// failed.
Failed = 5,
}
/// MsgSubmitProposal defines an sdk.Msg type that supports submitting arbitrary
/// proposal Content.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgSubmitProposal {
#[prost(message, optional, tag = "1")]
pub content: ::core::option::Option<::prost_types::Any>,
#[prost(message, repeated, tag = "2")]
pub initial_deposit: ::prost::alloc::vec::Vec<super::super::base::v1beta1::Coin>,
#[prost(string, tag = "3")]
pub proposer: ::prost::alloc::string::String,
}
/// MsgSubmitProposalResponse defines the Msg/SubmitProposal response type.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgSubmitProposalResponse {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
}
/// MsgVote defines a message to cast a vote.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgVote {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
#[prost(string, tag = "2")]
pub voter: ::prost::alloc::string::String,
#[prost(enumeration = "VoteOption", tag = "3")]
pub option: i32,
}
/// MsgVoteResponse defines the Msg/Vote response type.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgVoteResponse {}
/// MsgVoteWeighted defines a message to cast a vote.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgVoteWeighted {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
#[prost(string, tag = "2")]
pub voter: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "3")]
pub options: ::prost::alloc::vec::Vec<WeightedVoteOption>,
}
/// MsgVoteWeightedResponse defines the Msg/VoteWeighted response type.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgVoteWeightedResponse {}
/// MsgDeposit defines a message to submit a deposit to an existing proposal.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgDeposit {
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
#[prost(string, tag = "2")]
pub depositor: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "3")]
pub amount: ::prost::alloc::vec::Vec<super::super::base::v1beta1::Coin>,
}
/// MsgDepositResponse defines the Msg/Deposit response type.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MsgDepositResponse {}
#[doc = r" Generated client implementations."]
pub mod msg_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " Msg defines the bank Msg service."]
#[derive(Debug, Clone)]
pub struct MsgClient<T> {
inner: tonic::client::Grpc<T>,
}
impl MsgClient<tonic::transport::Channel> {
#[doc = r" Attempt to create a new client by connecting to a given endpoint."]
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> MsgClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(inner: T, interceptor: F) -> MsgClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
MsgClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " SubmitProposal defines a method to create new proposal given a content."]
pub async fn submit_proposal(
&mut self,
request: impl tonic::IntoRequest<super::MsgSubmitProposal>,
) -> Result<tonic::Response<super::MsgSubmitProposalResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path =
http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Msg/SubmitProposal");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Vote defines a method to add a vote on a specific proposal."]
pub async fn vote(
&mut self,
request: impl tonic::IntoRequest<super::MsgVote>,
) -> Result<tonic::Response<super::MsgVoteResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Msg/Vote");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " VoteWeighted defines a method to add a weighted vote on a specific proposal."]
pub async fn vote_weighted(
&mut self,
request: impl tonic::IntoRequest<super::MsgVoteWeighted>,
) -> Result<tonic::Response<super::MsgVoteWeightedResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Msg/VoteWeighted");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Deposit defines a method to add deposit on a specific proposal."]
pub async fn deposit(
&mut self,
request: impl tonic::IntoRequest<super::MsgDeposit>,
) -> Result<tonic::Response<super::MsgDepositResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Msg/Deposit");
self.inner.unary(request.into_request(), path, codec).await
}
}
}
/// QueryProposalRequest is the request type for the Query/Proposal RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryProposalRequest {
/// proposal_id defines the unique id of the proposal.
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
}
/// QueryProposalResponse is the response type for the Query/Proposal RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryProposalResponse {
#[prost(message, optional, tag = "1")]
pub proposal: ::core::option::Option<Proposal>,
}
/// QueryProposalsRequest is the request type for the Query/Proposals RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryProposalsRequest {
/// proposal_status defines the status of the proposals.
#[prost(enumeration = "ProposalStatus", tag = "1")]
pub proposal_status: i32,
/// voter defines the voter address for the proposals.
#[prost(string, tag = "2")]
pub voter: ::prost::alloc::string::String,
/// depositor defines the deposit addresses from the proposals.
#[prost(string, tag = "3")]
pub depositor: ::prost::alloc::string::String,
/// pagination defines an optional pagination for the request.
#[prost(message, optional, tag = "4")]
pub pagination: ::core::option::Option<super::super::base::query::v1beta1::PageRequest>,
}
/// QueryProposalsResponse is the response type for the Query/Proposals RPC
/// method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryProposalsResponse {
#[prost(message, repeated, tag = "1")]
pub proposals: ::prost::alloc::vec::Vec<Proposal>,
/// pagination defines the pagination in the response.
#[prost(message, optional, tag = "2")]
pub pagination: ::core::option::Option<super::super::base::query::v1beta1::PageResponse>,
}
/// QueryVoteRequest is the request type for the Query/Vote RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryVoteRequest {
/// proposal_id defines the unique id of the proposal.
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
/// voter defines the oter address for the proposals.
#[prost(string, tag = "2")]
pub voter: ::prost::alloc::string::String,
}
/// QueryVoteResponse is the response type for the Query/Vote RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryVoteResponse {
/// vote defined the queried vote.
#[prost(message, optional, tag = "1")]
pub vote: ::core::option::Option<Vote>,
}
/// QueryVotesRequest is the request type for the Query/Votes RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryVotesRequest {
/// proposal_id defines the unique id of the proposal.
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
/// pagination defines an optional pagination for the request.
#[prost(message, optional, tag = "2")]
pub pagination: ::core::option::Option<super::super::base::query::v1beta1::PageRequest>,
}
/// QueryVotesResponse is the response type for the Query/Votes RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryVotesResponse {
/// votes defined the queried votes.
#[prost(message, repeated, tag = "1")]
pub votes: ::prost::alloc::vec::Vec<Vote>,
/// pagination defines the pagination in the response.
#[prost(message, optional, tag = "2")]
pub pagination: ::core::option::Option<super::super::base::query::v1beta1::PageResponse>,
}
/// QueryParamsRequest is the request type for the Query/Params RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryParamsRequest {
/// params_type defines which parameters to query for, can be one of "voting",
/// "tallying" or "deposit".
#[prost(string, tag = "1")]
pub params_type: ::prost::alloc::string::String,
}
/// QueryParamsResponse is the response type for the Query/Params RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryParamsResponse {
/// voting_params defines the parameters related to voting.
#[prost(message, optional, tag = "1")]
pub voting_params: ::core::option::Option<VotingParams>,
/// deposit_params defines the parameters related to deposit.
#[prost(message, optional, tag = "2")]
pub deposit_params: ::core::option::Option<DepositParams>,
/// tally_params defines the parameters related to tally.
#[prost(message, optional, tag = "3")]
pub tally_params: ::core::option::Option<TallyParams>,
}
/// QueryDepositRequest is the request type for the Query/Deposit RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryDepositRequest {
/// proposal_id defines the unique id of the proposal.
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
/// depositor defines the deposit addresses from the proposals.
#[prost(string, tag = "2")]
pub depositor: ::prost::alloc::string::String,
}
/// QueryDepositResponse is the response type for the Query/Deposit RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryDepositResponse {
/// deposit defines the requested deposit.
#[prost(message, optional, tag = "1")]
pub deposit: ::core::option::Option<Deposit>,
}
/// QueryDepositsRequest is the request type for the Query/Deposits RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryDepositsRequest {
/// proposal_id defines the unique id of the proposal.
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
/// pagination defines an optional pagination for the request.
#[prost(message, optional, tag = "2")]
pub pagination: ::core::option::Option<super::super::base::query::v1beta1::PageRequest>,
}
/// QueryDepositsResponse is the response type for the Query/Deposits RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryDepositsResponse {
#[prost(message, repeated, tag = "1")]
pub deposits: ::prost::alloc::vec::Vec<Deposit>,
/// pagination defines the pagination in the response.
#[prost(message, optional, tag = "2")]
pub pagination: ::core::option::Option<super::super::base::query::v1beta1::PageResponse>,
}
/// QueryTallyResultRequest is the request type for the Query/Tally RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryTallyResultRequest {
/// proposal_id defines the unique id of the proposal.
#[prost(uint64, tag = "1")]
pub proposal_id: u64,
}
/// QueryTallyResultResponse is the response type for the Query/Tally RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryTallyResultResponse {
/// tally defines the requested tally.
#[prost(message, optional, tag = "1")]
pub tally: ::core::option::Option<TallyResult>,
}
#[doc = r" Generated client implementations."]
pub mod query_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " Query defines the gRPC querier service for gov module"]
#[derive(Debug, Clone)]
pub struct QueryClient<T> {
inner: tonic::client::Grpc<T>,
}
impl QueryClient<tonic::transport::Channel> {
#[doc = r" Attempt to create a new client by connecting to a given endpoint."]
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> QueryClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> QueryClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
QueryClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " Proposal queries proposal details based on ProposalID."]
pub async fn proposal(
&mut self,
request: impl tonic::IntoRequest<super::QueryProposalRequest>,
) -> Result<tonic::Response<super::QueryProposalResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Proposal");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Proposals queries all proposals based on given status."]
pub async fn proposals(
&mut self,
request: impl tonic::IntoRequest<super::QueryProposalsRequest>,
) -> Result<tonic::Response<super::QueryProposalsResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Proposals");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Vote queries voted information based on proposalID, voterAddr."]
pub async fn vote(
&mut self,
request: impl tonic::IntoRequest<super::QueryVoteRequest>,
) -> Result<tonic::Response<super::QueryVoteResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Vote");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Votes queries votes of a given proposal."]
pub async fn votes(
&mut self,
request: impl tonic::IntoRequest<super::QueryVotesRequest>,
) -> Result<tonic::Response<super::QueryVotesResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Votes");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Params queries all parameters of the gov module."]
pub async fn params(
&mut self,
request: impl tonic::IntoRequest<super::QueryParamsRequest>,
) -> Result<tonic::Response<super::QueryParamsResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Params");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Deposit queries single deposit information based proposalID, depositAddr."]
pub async fn deposit(
&mut self,
request: impl tonic::IntoRequest<super::QueryDepositRequest>,
) -> Result<tonic::Response<super::QueryDepositResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Deposit");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Deposits queries all deposits of a single proposal."]
pub async fn deposits(
&mut self,
request: impl tonic::IntoRequest<super::QueryDepositsRequest>,
) -> Result<tonic::Response<super::QueryDepositsResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/Deposits");
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " TallyResult queries the tally of a proposal vote."]
pub async fn tally_result(
&mut self,
request: impl tonic::IntoRequest<super::QueryTallyResultRequest>,
) -> Result<tonic::Response<super::QueryTallyResultResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path =
http::uri::PathAndQuery::from_static("/cosmos.gov.v1beta1.Query/TallyResult");
self.inner.unary(request.into_request(), path, codec).await
}
}
}
/// GenesisState defines the gov module's genesis state.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GenesisState {
/// starting_proposal_id is the ID of the starting proposal.
#[prost(uint64, tag = "1")]
pub starting_proposal_id: u64,
/// deposits defines all the deposits present at genesis.
#[prost(message, repeated, tag = "2")]
pub deposits: ::prost::alloc::vec::Vec<Deposit>,
/// votes defines all the votes present at genesis.
#[prost(message, repeated, tag = "3")]
pub votes: ::prost::alloc::vec::Vec<Vote>,
/// proposals defines all the proposals present at genesis.
#[prost(message, repeated, tag = "4")]
pub proposals: ::prost::alloc::vec::Vec<Proposal>,
/// params defines all the paramaters of related to deposit.
#[prost(message, optional, tag = "5")]
pub deposit_params: ::core::option::Option<DepositParams>,
/// params defines all the paramaters of related to voting.
#[prost(message, optional, tag = "6")]
pub voting_params: ::core::option::Option<VotingParams>,
/// params defines all the paramaters of related to tally.
#[prost(message, optional, tag = "7")]
pub tally_params: ::core::option::Option<TallyParams>,
}
| 44.729136 | 100 | 0.61928 |
87cc8f1c15e5594873da9f1cb0419f6e1f9a4bb1
| 5,296 |
#[cfg(target_pointer_width = "32")]
compile_error!("x32 is not yet supported");
#[inline]
#[must_use]
pub(crate) unsafe fn syscall0_readonly(nr: u32) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall1(nr: u32, a0: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall1_readonly(nr: u32, a0: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
#[inline]
pub(crate) unsafe fn syscall1_noreturn(nr: u32, a0: usize) -> ! {
asm!(
"syscall",
in("rax") nr,
in("rdi") a0,
options(noreturn)
)
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall2(nr: u32, a0: usize, a1: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall2_readonly(nr: u32, a0: usize, a1: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall3(nr: u32, a0: usize, a1: usize, a2: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall3_readonly(nr: u32, a0: usize, a1: usize, a2: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall4(nr: u32, a0: usize, a1: usize, a2: usize, a3: usize) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
in("r10") a3,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall4_readonly(
nr: u32,
a0: usize,
a1: usize,
a2: usize,
a3: usize,
) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
in("r10") a3,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall5(
nr: u32,
a0: usize,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
in("r10") a3,
in("r8") a4,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall5_readonly(
nr: u32,
a0: usize,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
in("r10") a3,
in("r8") a4,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall6(
nr: u32,
a0: usize,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
a5: usize,
) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
in("r10") a3,
in("r8") a4,
in("r9") a5,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags)
);
r0
}
#[inline]
#[must_use]
pub(crate) unsafe fn syscall6_readonly(
nr: u32,
a0: usize,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
a5: usize,
) -> usize {
let r0;
asm!(
"syscall",
inlateout("rax") nr as usize => r0,
in("rdi") a0,
in("rsi") a1,
in("rdx") a2,
in("r10") a3,
in("r8") a4,
in("r9") a5,
out("rcx") _,
out("r11") _,
options(nostack, preserves_flags, readonly)
);
r0
}
| 19.399267 | 93 | 0.481873 |
0921c754af7f2bc3661bb437bf7f2885f36411ca
| 165 |
/// Futures API modules
/// # Examples
/// See examples/binance_futures.rs
pub mod account;
pub mod general;
pub mod market;
pub mod rest_model;
pub mod userstream;
| 18.333333 | 35 | 0.745455 |
f48c3d4a23dbc726f057d894ccc084be4b41d490
| 8,342 |
// Copyright 2019 The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
key_val_store::{
key_val_store::{IterationResult, KeyValueStore},
KeyValStoreError,
},
lmdb_store::LMDBDatabase,
};
use lmdb_zero::traits::AsLmdbBytes;
use serde::{de::DeserializeOwned, Serialize};
use std::{marker::PhantomData, sync::Arc};
/// This is a simple wrapper struct that lifts the generic parameters so that KeyValStore can be implemented on
/// LMDBDatabase. LMDBDatabase doesn't have the generics at the struct level because the LMDBStore can contain many
/// instances of LMDBDatabase each with different K and V types.
pub struct LMDBWrapper<K, V> {
inner: Arc<LMDBDatabase>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<K, V> LMDBWrapper<K, V> {
/// Wrap a LMDBDatabase instance so that it implements [KeyValueStore]
pub fn new(db: Arc<LMDBDatabase>) -> LMDBWrapper<K, V> {
LMDBWrapper {
inner: db,
_k: PhantomData,
_v: PhantomData,
}
}
/// Get access to the underlying LMDB database
pub fn inner(&self) -> Arc<LMDBDatabase> {
Arc::clone(&self.inner)
}
}
impl<K, V> KeyValueStore<K, V> for LMDBWrapper<K, V>
where
K: AsLmdbBytes + DeserializeOwned,
V: Serialize + DeserializeOwned,
{
/// Inserts a key-value pair into the key-value database.
fn insert(&self, key: K, value: V) -> Result<(), KeyValStoreError> {
self.inner
.insert::<K, V>(&key, &value)
.map_err(|e| KeyValStoreError::DatabaseError(format!("{:?}", e)))
}
/// Get the value corresponding to the provided key from the key-value database.
fn get(&self, key: &K) -> Result<Option<V>, KeyValStoreError>
where for<'t> V: serde::de::DeserializeOwned {
self.inner
.get::<K, V>(key)
.map_err(|e| KeyValStoreError::DatabaseError(format!("{:?}", e)))
}
/// Returns the total number of entries recorded in the key-value database.
fn size(&self) -> Result<usize, KeyValStoreError> {
self.inner
.len()
.map_err(|e| KeyValStoreError::DatabaseError(format!("{:?}", e)))
}
/// Iterate over all the stored records and execute the function `f` for each pair in the key-value database.
fn for_each<F>(&self, f: F) -> Result<(), KeyValStoreError>
where F: FnMut(Result<(K, V), KeyValStoreError>) -> IterationResult {
self.inner
.for_each::<K, V, F>(f)
.map_err(|e| KeyValStoreError::DatabaseError(format!("{:?}", e)))
}
/// Checks whether a record exist in the key-value database that corresponds to the provided `key`.
fn exists(&self, key: &K) -> Result<bool, KeyValStoreError> {
self.inner
.contains_key::<K>(key)
.map_err(|e| KeyValStoreError::DatabaseError(format!("{:?}", e)))
}
/// Remove the record from the key-value database that corresponds with the provided `key`.
fn delete(&self, key: &K) -> Result<(), KeyValStoreError> {
self.inner
.remove::<K>(key)
.map_err(|e| KeyValStoreError::DatabaseError(format!("{:?}", e)))
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::lmdb_store::{LMDBBuilder, LMDBConfig, LMDBError, LMDBStore};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
fn get_path(name: &str) -> String {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("tests/data");
path.push(name);
path.to_str().unwrap().to_string()
}
fn init_datastore(name: &str) -> Result<LMDBStore, LMDBError> {
let path = get_path(name);
std::fs::create_dir(&path).unwrap_or_default();
LMDBBuilder::new()
.set_path(&path)
.set_env_config(LMDBConfig::default())
.set_max_number_of_databases(2)
.add_database(name, lmdb_zero::db::CREATE)
.build()
}
fn clean_up_datastore(name: &str) {
std::fs::remove_dir_all(get_path(name)).unwrap();
}
#[test]
fn test_lmdb_kvstore() {
let database_name = "test_lmdb_kvstore"; // Note: every test should have unique database
{
let datastore = init_datastore(database_name).unwrap();
let db = datastore.get_handle(database_name).unwrap();
let db = LMDBWrapper::new(Arc::new(db));
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct Foo {
value: String,
}
let key1 = 1 as u64;
let key2 = 2 as u64;
let key3 = 3 as u64;
let key4 = 4 as u64;
let val1 = Foo {
value: "one".to_string(),
};
let val2 = Foo {
value: "two".to_string(),
};
let val3 = Foo {
value: "three".to_string(),
};
db.insert(1, val1.clone()).unwrap();
db.insert(2, val2.clone()).unwrap();
db.insert(3, val3.clone()).unwrap();
assert_eq!(db.get(&1).unwrap().unwrap(), val1);
assert_eq!(db.get(&2).unwrap().unwrap(), val2);
assert_eq!(db.get(&3).unwrap().unwrap(), val3);
assert!(db.get(&4).unwrap().is_none());
assert_eq!(db.size().unwrap(), 3);
assert!(db.exists(&key1).unwrap());
assert!(db.exists(&key2).unwrap());
assert!(db.exists(&key3).unwrap());
assert!(!db.exists(&key4).unwrap());
db.delete(&key2).unwrap();
assert_eq!(db.get(&key1).unwrap().unwrap(), val1);
assert!(db.get(&key2).unwrap().is_none());
assert_eq!(db.get(&key3).unwrap().unwrap(), val3);
assert!(db.get(&key4).unwrap().is_none());
assert_eq!(db.size().unwrap(), 2);
assert!(db.exists(&key1).unwrap());
assert!(!db.exists(&key2).unwrap());
assert!(db.exists(&key3).unwrap());
assert!(!db.exists(&key4).unwrap());
// Only Key1 and Key3 should be in key-value database, but order is not known
let mut key1_found = false;
let mut key3_found = false;
let _res = db.for_each(|pair| {
let (key, val) = pair.unwrap();
if key == key1 {
key1_found = true;
assert_eq!(val, val1);
} else if key == key3 {
key3_found = true;
assert_eq!(val, val3);
}
IterationResult::Continue
});
assert!(key1_found);
assert!(key3_found);
}
clean_up_datastore(database_name); // In Windows file handles must be released before files can be deleted
}
}
| 40.299517 | 119 | 0.600456 |
163ac11178c3f9d5950fc5b5eb4c608913792dd7
| 2,956 |
use super::{command::Command, error::ControlChanError, line_parser, Reply};
use bytes::BytesMut;
use std::io::Write;
use tokio_util::codec::{Decoder, Encoder};
// FTPCodec implements tokio's `Decoder` and `Encoder` traits for the control channel, that we'll
// use to decode FTP commands and encode their responses.
pub struct FtpCodec {
// Stored index of the next index to examine for a '\n' character. This is used to optimize
// searching. For example, if `decode` was called with `abc`, it would hold `3`, because that
// is the next index to examine. The next time `decode` is called with `abcde\n`, we will only
// look at `de\n` before returning.
next_index: usize,
}
impl FtpCodec {
pub fn new() -> Self {
FtpCodec { next_index: 0 }
}
}
impl Decoder for FtpCodec {
type Item = Command;
type Error = ControlChanError;
// Here we decode the incoming bytes into a meaningful command. We'll split on newlines, and
// parse the resulting line using `Command::parse()`. This method will be called by tokio.
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Command>, Self::Error> {
if let Some(newline_offset) = buf[self.next_index..].iter().position(|b| *b == b'\n') {
let newline_index = newline_offset + self.next_index;
let line = buf.split_to(newline_index + 1);
self.next_index = 0;
Ok(Some(line_parser::parse(line)?))
} else {
self.next_index = buf.len();
Ok(None)
}
}
}
impl Encoder<Reply> for FtpCodec {
type Error = ControlChanError;
// Here we encode the outgoing response
fn encode(&mut self, reply: Reply, buf: &mut BytesMut) -> Result<(), Self::Error> {
let mut buffer = vec![];
match reply {
Reply::None => {
return Ok(());
}
Reply::CodeAndMsg { code, msg } => {
if msg.is_empty() {
writeln!(buffer, "{}\r", code as u32)?;
} else {
writeln!(buffer, "{} {}\r", code as u32, msg)?;
}
}
Reply::MultiLine { code, mut lines } => {
// Get the last line since it needs to be preceded by the response code.
let last_line = lines.pop().unwrap();
// Lines starting with a digit should be indented
for it in lines.iter_mut() {
if it.chars().next().unwrap().is_digit(10) {
it.insert(0, ' ');
}
}
if lines.is_empty() {
writeln!(buffer, "{} {}\r", code as u32, last_line)?;
} else {
write!(buffer, "{}-{}\r\n{} {}\r\n", code as u32, lines.join("\r\n"), code as u32, last_line)?;
}
}
}
buf.extend(&buffer);
Ok(())
}
}
| 37.417722 | 115 | 0.539242 |
d9414b99a446be02cb6275482b2e9078fe1ee82a
| 8,718 |
use crate::gen::Generator;
use crate::model::sql::Column;
use crate::model::sql::Constraint;
use crate::model::sql::Sql;
use crate::model::Definition;
use crate::model::Model;
use std::fmt::Write;
#[derive(Debug)]
pub enum Error {
Fmt(::std::fmt::Error),
}
impl From<::std::fmt::Error> for Error {
fn from(e: ::std::fmt::Error) -> Self {
Error::Fmt(e)
}
}
#[derive(Debug)]
pub enum TableOptimizationHint {
WritePerformance,
}
#[derive(Debug)]
pub enum PrimaryKeyHint {
WrapOnOverflow,
}
#[allow(clippy::module_name_repetitions)]
#[derive(Debug, Default)]
pub struct SqlDefGenerator {
models: Vec<Model<Sql>>,
optimize_tables_for: Option<TableOptimizationHint>,
primary_key_hint: Option<PrimaryKeyHint>,
}
impl SqlDefGenerator {
pub fn reset(&mut self) {
self.models.clear();
}
}
impl Generator<Sql> for SqlDefGenerator {
type Error = Error;
fn add_model(&mut self, model: Model<Sql>) {
self.models.push(model);
}
fn models(&self) -> &[Model<Sql>] {
&self.models
}
fn models_mut(&mut self) -> &mut [Model<Sql>] {
&mut self.models
}
fn to_string(&self) -> Result<Vec<(String, String)>, <Self as Generator<Sql>>::Error> {
let mut files = Vec::with_capacity(self.models.len());
for model in &self.models {
let mut drop = String::new();
let mut create = String::new();
for Definition(name, sql) in &model.definitions {
writeln!(create)?;
match sql {
Sql::Table(columns, constraints) => {
// TODO
writeln!(drop, "DROP TABLE IF EXISTS {} CASCADE;", name)?;
self.append_create_table(&mut create, name, columns, constraints)?;
self.apply_primary_key_hints(&mut create, name, columns)?;
}
Sql::Enum(variants) => {
// TODO
writeln!(drop, "DROP TABLE IF EXISTS {} CASCADE;", name)?;
self.append_create_enum(&mut create, name, variants)?
}
Sql::Index(table, columns) => {
Self::append_index(&mut create, name, table, &columns[..])?;
}
Sql::AbandonChildrenFunction(table, children) => {
Self::append_abandon_children(&mut create, table, name, &children[..])?;
}
Sql::SilentlyPreventAnyDelete(table) => {
Self::append_silently_prevent_any_delete(&mut create, name, table)?;
}
}
}
drop.push_str(&create);
files.push((format!("{}.sql", model.name), drop));
}
Ok(files)
}
}
impl SqlDefGenerator {
pub fn optimize_tables_for_write_performance(mut self) -> Self {
self.optimize_tables_for = Some(TableOptimizationHint::WritePerformance);
self
}
pub const fn no_table_write_optimization(mut self) -> Self {
self.optimize_tables_for = None;
self
}
pub const fn wrap_primary_key_on_overflow(mut self) -> Self {
self.primary_key_hint = Some(PrimaryKeyHint::WrapOnOverflow);
self
}
pub const fn no_wrap_of_primary_key_on_overflow(mut self) -> Self {
self.primary_key_hint = None;
self
}
fn append_create_table(
&self,
target: &mut dyn Write,
name: &str,
columns: &[Column],
constraints: &[Constraint],
) -> Result<(), Error> {
writeln!(
target,
"CREATE{}TABLE {} (",
match self.optimize_tables_for {
Some(TableOptimizationHint::WritePerformance) => " UNLOGGED ",
None => " ",
},
name
)?;
for (index, column) in columns.iter().enumerate() {
Self::append_column_statement(target, column)?;
if index + 1 < columns.len() || !constraints.is_empty() {
write!(target, ",")?;
}
writeln!(target)?;
}
for (index, constraint) in constraints.iter().enumerate() {
Self::append_constraint(target, constraint)?;
if index + 1 < constraints.len() {
write!(target, ",")?;
}
writeln!(target)?;
}
writeln!(target, ");")?;
Ok(())
}
#[allow(clippy::single_match)] // to get a compiler error on a new variant in PrimaryKeyHint
fn apply_primary_key_hints(
&self,
target: &mut dyn Write,
table: &str,
columns: &[Column],
) -> Result<(), Error> {
let column_name = columns.iter().find_map(|column| {
if column.primary_key {
Some(column.name.clone())
} else {
None
}
});
if let Some(column) = column_name {
match self.primary_key_hint {
Some(PrimaryKeyHint::WrapOnOverflow) => {
writeln!(target, "ALTER SEQUENCE {}_{}_seq CYCLE;", table, column)?;
}
None => {}
}
}
Ok(())
}
pub fn append_column_statement(target: &mut dyn Write, column: &Column) -> Result<(), Error> {
write!(target, " {} {}", column.name, column.sql.to_string())?;
if column.primary_key {
write!(target, " PRIMARY KEY")?;
}
Ok(())
}
fn append_create_enum(
&self,
target: &mut dyn Write,
name: &str,
variants: &[String],
) -> Result<(), Error> {
writeln!(
target,
"CREATE{}TABLE {} (",
match self.optimize_tables_for {
Some(TableOptimizationHint::WritePerformance) => " UNLOGGED ",
None => " ",
},
name
)?;
writeln!(target, " id SERIAL PRIMARY KEY,")?;
writeln!(target, " name TEXT NOT NULL")?;
writeln!(target, ");")?;
writeln!(target, "INSERT INTO {} (id, name) VALUES", name)?;
for (index, variant) in variants.iter().enumerate() {
write!(target, " ({}, '{}')", index, variant)?;
if index + 1 < variants.len() {
write!(target, ", ")?;
} else {
write!(target, ";")?;
}
writeln!(target)?;
}
Ok(())
}
fn append_constraint(target: &mut dyn Write, constraint: &Constraint) -> Result<(), Error> {
match constraint {
Constraint::CombinedPrimaryKey(columns) => {
write!(target, " PRIMARY KEY({})", columns.join(", "))?;
}
Constraint::OneNotNull(columns) => {
write!(
target,
" CHECK (num_nonnulls({}) = 1)",
columns.join(", ")
)?;
}
}
Ok(())
}
fn append_index(
target: &mut dyn Write,
name: &str,
table: &str,
columns: &[String],
) -> Result<(), Error> {
writeln!(
target,
"CREATE INDEX {} ON {}({});",
name,
table,
columns.join(", ")
)?;
Ok(())
}
fn append_abandon_children(
target: &mut dyn Write,
table: &str,
name: &str,
children: &[(String, String, String)],
) -> Result<(), Error> {
writeln!(
target,
"CREATE OR REPLACE FUNCTION {}() RETURNS TRIGGER AS",
name
)?;
writeln!(target, "$$ BEGIN")?;
for (column, other_table, other_column) in children {
writeln!(
target,
" DELETE FROM {} WHERE {} = OLD.{};",
other_table, other_column, column
)?;
}
writeln!(target, " RETURN NULL;")?;
writeln!(target, "END; $$ LANGUAGE plpgsql;")?;
writeln!(
target,
"CREATE TRIGGER OnDelete{} AFTER DELETE ON {}",
name, table
)?;
writeln!(target, " FOR EACH ROW")?;
writeln!(target, " EXECUTE PROCEDURE {}();", name)?;
Ok(())
}
fn append_silently_prevent_any_delete(
target: &mut dyn Write,
name: &str,
table: &str,
) -> Result<(), Error> {
writeln!(target, "CREATE RULE {} AS ON DELETE TO {}", name, table)?;
writeln!(target, " DO INSTEAD NOTHING;")?;
Ok(())
}
}
| 30.16609 | 98 | 0.489791 |
f72fe036478e66cbe33e30fd243346040807a67c
| 1,711 |
use crate::util::{Element, SCError, SCResult};
use super::ScoreDefinitionFragment;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ScoreDefinition {
fragments: Vec<ScoreDefinitionFragment>,
}
impl ScoreDefinition {
pub fn new(fragments: impl IntoIterator<Item=ScoreDefinitionFragment>) -> Self {
Self { fragments: fragments.into_iter().collect() }
}
#[inline]
pub fn fragments(&self) -> &Vec<ScoreDefinitionFragment> { &self.fragments }
}
impl TryFrom<&Element> for ScoreDefinition {
type Error = SCError;
fn try_from(elem: &Element) -> SCResult<Self> {
Ok(ScoreDefinition {
fragments: elem.childs_by_name("fragment").map(ScoreDefinitionFragment::try_from).collect::<SCResult<_>>()?,
})
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use crate::{util::Element, protocol::{ScoreDefinition, ScoreDefinitionFragment, ScoreAggregation}};
#[test]
fn test_parsing() {
assert_eq!(ScoreDefinition::try_from(&Element::from_str(r#"
<definition>
<fragment name="Siegpunkte">
<aggregation>SUM</aggregation>
<relevantForRanking>true</relevantForRanking>
</fragment>
<fragment name="∅ Punkte">
<aggregation>AVERAGE</aggregation>
<relevantForRanking>true</relevantForRanking>
</fragment>
</definition>
"#).unwrap()).unwrap(), ScoreDefinition::new([
ScoreDefinitionFragment::new("Siegpunkte", ScoreAggregation::Sum, true),
ScoreDefinitionFragment::new("∅ Punkte", ScoreAggregation::Average, true),
]));
}
}
| 31.685185 | 120 | 0.618352 |
67e62237907547932f4ddae3ee3c0907db60403d
| 13,617 |
// Copyright 2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
use core::marker::PhantomData;
use super::*;
use super::{elem_sqr_mul, elem_sqr_mul_acc, Mont};
use super::elem::{binary_op, binary_op_assign};
macro_rules! p384_limbs {
[$limb_b:expr, $limb_a:expr, $limb_9:expr, $limb_8:expr,
$limb_7:expr, $limb_6:expr, $limb_5:expr, $limb_4:expr,
$limb_3:expr, $limb_2:expr, $limb_1:expr, $limb_0:expr] => {
limbs![$limb_b, $limb_a, $limb_9, $limb_8,
$limb_7, $limb_6, $limb_5, $limb_4,
$limb_3, $limb_2, $limb_1, $limb_0]
};
}
pub static COMMON_OPS: CommonOps = CommonOps {
num_limbs: 384 / LIMB_BITS,
q: Mont {
p: p384_limbs![0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe,
0xffffffff, 0x00000000, 0x00000000, 0xffffffff],
rr: limbs![0, 0, 0, 1, 2, 0, 0xfffffffe, 0, 2, 0, 0xfffffffe, 1],
},
n: Elem {
limbs: p384_limbs![0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xc7634d81, 0xf4372ddf,
0x581a0db2, 0x48b0a77a, 0xecec196a, 0xccc52973],
m: PhantomData,
encoding: PhantomData, // Unencoded
},
a: Elem {
limbs: p384_limbs![0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffb,
0xfffffffc, 0x00000000, 0x00000003, 0xfffffffc],
m: PhantomData,
encoding: PhantomData, // Unreduced
},
b: Elem {
limbs: p384_limbs![0xcd08114b, 0x604fbff9, 0xb62b21f4, 0x1f022094,
0xe3374bee, 0x94938ae2, 0x77f2209b, 0x1920022e,
0xf729add8, 0x7a4c32ec, 0x08118871, 0x9d412dcc],
m: PhantomData,
encoding: PhantomData, // Unreduced
},
elem_add_impl: GFp_p384_elem_add,
elem_mul_mont: GFp_p384_elem_mul_mont,
elem_sqr_mont: GFp_p384_elem_sqr_mont,
point_add_jacobian_impl: GFp_nistz384_point_add,
};
pub static PRIVATE_KEY_OPS: PrivateKeyOps = PrivateKeyOps {
common: &COMMON_OPS,
elem_inv: p384_elem_inv,
point_mul_base_impl: p384_point_mul_base_impl,
point_mul_impl: GFp_nistz384_point_mul,
};
fn p384_elem_inv(a: &Elem<R>) -> Elem<R> {
// Calculate the modular inverse of field element |a| using Fermat's Little
// Theorem:
//
// a**-1 (mod q) == a**(q - 2) (mod q)
//
// The exponent (q - 2) is:
//
// 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\
// ffffffff0000000000000000fffffffd
#[inline]
fn sqr_mul(a: &Elem<R>, squarings: usize, b: &Elem<R>) -> Elem<R> {
elem_sqr_mul(&COMMON_OPS, a, squarings, b)
}
#[inline]
fn sqr_mul_acc(a: &mut Elem<R>, squarings: usize, b: &Elem<R>) {
elem_sqr_mul_acc(&COMMON_OPS, a, squarings, b)
}
let b_1 = &a;
let b_11 = sqr_mul(b_1, 0 + 1, b_1);
let f = sqr_mul(&b_11, 0 + 2, &b_11);
let ff = sqr_mul(&f, 0 + 4, &f);
let ffff = sqr_mul(&ff, 0 + 8, &ff);
let ffffff = sqr_mul(&ffff, 0 + 8, &ff);
let fffffff = sqr_mul(&ffffff, 0 + 4, &f);
let b_1 = &a;
let ffffffffffffff = sqr_mul(&fffffff, 0 + 28, &fffffff);
let ffffffffffffffffffffffffffff =
sqr_mul(&ffffffffffffff, 0 + 56, &ffffffffffffff);
// ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
let mut acc = sqr_mul(&ffffffffffffffffffffffffffff, 0 + 112,
&ffffffffffffffffffffffffffff);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff */
sqr_mul_acc(&mut acc, 0 + 28, &fffffff);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff[11]
sqr_mul_acc(&mut acc, 0 + 2, &b_11);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff[111]
sqr_mul_acc(&mut acc, 0 + 1, b_1);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffff
sqr_mul_acc(&mut acc, 1 + 28, &fffffff);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff
sqr_mul_acc(&mut acc, 0 + 4, &f);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff
// 0000000000000000fffffff
sqr_mul_acc(&mut acc, 64 + 28, &fffffff);
// fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff
// 0000000000000000fffffffd
sqr_mul_acc(&mut acc, 0 + 2, &b_11);
sqr_mul(&acc, 1 + 1, b_1)
}
fn p384_point_mul_base_impl(a: &Scalar) -> Point {
// XXX: Not efficient. TODO: Precompute multiples of the generator.
static P384_GENERATOR: (Elem<R>, Elem<R>) = (
Elem {
limbs: p384_limbs![0x4d3aadc2, 0x299e1513, 0x812ff723, 0x614ede2b,
0x64548684, 0x59a30eff, 0x879c3afc, 0x541b4d6e,
0x20e378e2, 0xa0d6ce38, 0x3dd07566, 0x49c0b528],
m: PhantomData,
encoding: PhantomData,
},
Elem {
limbs: p384_limbs![0x2b78abc2, 0x5a15c5e9, 0xdd800226, 0x3969a840,
0xc6c35219, 0x68f4ffd9, 0x8bade756, 0x2e83b050,
0xa1bfa8bf, 0x7bb4a9ac, 0x23043dad, 0x4b03a4fe],
m: PhantomData,
encoding: PhantomData,
}
);
PRIVATE_KEY_OPS.point_mul(a, &P384_GENERATOR)
}
pub static PUBLIC_KEY_OPS: PublicKeyOps = PublicKeyOps { common: &COMMON_OPS };
pub static SCALAR_OPS: ScalarOps = ScalarOps {
common: &COMMON_OPS,
scalar_inv_to_mont_impl: p384_scalar_inv_to_mont,
scalar_mul_mont: GFp_p384_scalar_mul_mont,
};
pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
scalar_ops: &SCALAR_OPS,
public_key_ops: &PUBLIC_KEY_OPS,
private_key_ops: &PRIVATE_KEY_OPS,
q_minus_n: Elem {
limbs: p384_limbs![0, 0, 0, 0, 0, 0, 0x389cb27e, 0x0bc8d21f,
0x1313e696, 0x333ad68c, 0xa7e5f24c, 0xb74f5885],
m: PhantomData,
encoding: PhantomData, // Unencoded
},
};
fn p384_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
// Calculate the modular inverse of scalar |a| using Fermat's Little
// Theorem:
//
// a**-1 (mod n) == a**(n - 2) (mod n)
//
// The exponent (n - 2) is:
//
// 0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf\
// 581a0db248b0a77aecec196accc52971.
// XXX(perf): This hasn't been optimized at all. TODO: optimize.
fn mul(a: &Scalar<R>, b: &Scalar<R>) -> Scalar<R> {
binary_op(GFp_p384_scalar_mul_mont, a, b)
}
fn sqr(a: &Scalar<R>) -> Scalar<R> {
binary_op(GFp_p384_scalar_mul_mont, a, a)
}
fn sqr_mut(a: &mut Scalar<R>) {
unary_op_from_binary_op_assign(GFp_p384_scalar_mul_mont, a);
}
// Returns (`a` squared `squarings` times) * `b`.
fn sqr_mul(a: &Scalar<R>, squarings: usize, b: &Scalar<R>) -> Scalar<R> {
debug_assert!(squarings >= 1);
let mut tmp = sqr(a);
for _ in 1..squarings {
sqr_mut(&mut tmp);
}
mul(&tmp, b)
}
// Sets `acc` = (`acc` squared `squarings` times) * `b`.
fn sqr_mul_acc(acc: &mut Scalar<R>, squarings: usize, b: &Scalar<R>) {
debug_assert!(squarings >= 1);
for _ in 0..squarings {
sqr_mut(acc);
}
binary_op_assign(GFp_p384_scalar_mul_mont, acc, b)
}
fn to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
static N_RR: Scalar<Unencoded> = Scalar {
limbs: p384_limbs![0x0c84ee01, 0x2b39bf21, 0x3fb05b7a, 0x28266895,
0xd40d4917, 0x4aab1cc5, 0xbc3e483a, 0xfcb82947,
0xff3d81e5, 0xdf1aa419, 0x2d319b24, 0x19b409a9],
m: PhantomData,
encoding: PhantomData
};
binary_op(GFp_p384_scalar_mul_mont, a, &N_RR)
}
// Indexes into `d`.
const B_1: usize = 0;
const B_10: usize = 1;
const B_11: usize = 2;
const B_101: usize = 3;
const B_111: usize = 4;
const B_1111: usize = 5;
const DIGIT_COUNT: usize = 6;
let mut d = [Scalar::zero(); DIGIT_COUNT];
d[B_1] = to_mont(a);
d[B_10] = sqr (&d[B_1]);
d[B_11] = mul (&d[B_10], &d[B_1]);
d[B_101] = sqr_mul(&d[B_10], 0 + 1, &d[B_1]);
d[B_111] = mul (&d[B_101], &d[B_10]);
d[B_1111] = sqr_mul(&d[B_111], 0 + 1, &d[B_1]);
let ff = sqr_mul(&d[B_1111], 0 + 4, &d[B_1111]);
let ffff = sqr_mul(&ff, 0 + 8, &ff);
let ffffffff = sqr_mul(&ffff, 0 + 16, &ffff);
let ffffffffffffffff = sqr_mul(&ffffffff, 0 + 32, &ffffffff);
let ffffffffffffffffffffffff =
sqr_mul(&ffffffffffffffff, 0 + 32, &ffffffff);
// ffffffffffffffffffffffffffffffffffffffffffffffff
let mut acc =
sqr_mul(&ffffffffffffffffffffffff, 0 + 96, &ffffffffffffffffffffffff);
// The rest of the exponent, in binary, is:
//
// 1100011101100011010011011000000111110100001101110010110111011111
// 0101100000011010000011011011001001001000101100001010011101111010
// 1110110011101100000110010110101011001100110001010010100101110001
static REMAINING_WINDOWS: [(u8, u8); 48] = [
( 2, B_11 as u8),
(3 + 3, B_111 as u8),
(1 + 2, B_11 as u8),
(3 + 2, B_11 as u8),
(1 + 1, B_1 as u8),
(2 + 2, B_11 as u8),
(1 + 2, B_11 as u8),
(6 + 4, B_1111 as u8),
( 3, B_101 as u8),
(4 + 2, B_11 as u8),
(1 + 3, B_111 as u8),
(2 + 3, B_101 as u8),
( 1, B_1 as u8),
(1 + 3, B_111 as u8),
(1 + 4, B_1111 as u8),
( 3, B_101 as u8),
(1 + 2, B_11 as u8),
(6 + 2, B_11 as u8),
(1 + 1, B_1 as u8),
(5 + 2, B_11 as u8),
(1 + 2, B_11 as u8),
(1 + 2, B_11 as u8),
(2 + 1, B_1 as u8),
(2 + 1, B_1 as u8),
(2 + 1, B_1 as u8),
(3 + 1, B_1 as u8),
(1 + 2, B_11 as u8),
(4 + 1, B_1 as u8),
(1 + 1, B_1 as u8),
(2 + 3, B_111 as u8),
(1 + 4, B_1111 as u8),
(1 + 1, B_1 as u8),
(1 + 3, B_111 as u8),
(1 + 2, B_11 as u8),
(2 + 3, B_111 as u8),
(1 + 2, B_11 as u8),
(5 + 2, B_11 as u8),
(2 + 1, B_1 as u8),
(1 + 2, B_11 as u8),
(1 + 3, B_101 as u8),
(1 + 2, B_11 as u8),
(2 + 2, B_11 as u8),
(2 + 2, B_11 as u8),
(3 + 3, B_101 as u8),
(2 + 3, B_101 as u8),
(2 + 1, B_1 as u8),
(1 + 3, B_111 as u8),
(3 + 1, B_1 as u8),
];
for &(squarings, digit) in &REMAINING_WINDOWS[..] {
sqr_mul_acc(&mut acc, squarings as usize, &d[digit as usize]);
}
acc
}
#[allow(non_snake_case)]
unsafe extern fn GFp_p384_elem_sqr_mont(
r: *mut Limb/*[COMMON_OPS.num_limbs]*/,
a: *const Limb/*[COMMON_OPS.num_limbs]*/) {
// XXX: Inefficient. TODO: Make a dedicated squaring routine.
GFp_p384_elem_mul_mont(r, a, a);
}
extern {
fn GFp_p384_elem_add(r: *mut Limb/*[COMMON_OPS.num_limbs]*/,
a: *const Limb/*[COMMON_OPS.num_limbs]*/,
b: *const Limb/*[COMMON_OPS.num_limbs]*/);
fn GFp_p384_elem_mul_mont(r: *mut Limb/*[COMMON_OPS.num_limbs]*/,
a: *const Limb/*[COMMON_OPS.num_limbs]*/,
b: *const Limb/*[COMMON_OPS.num_limbs]*/);
fn GFp_nistz384_point_add(r: *mut Limb/*[3][COMMON_OPS.num_limbs]*/,
a: *const Limb/*[3][COMMON_OPS.num_limbs]*/,
b: *const Limb/*[3][COMMON_OPS.num_limbs]*/);
fn GFp_nistz384_point_mul(r: *mut Limb/*[3][COMMON_OPS.num_limbs]*/,
p_scalar: *const Limb/*[COMMON_OPS.num_limbs]*/,
p_x: *const Limb/*[COMMON_OPS.num_limbs]*/,
p_y: *const Limb/*[COMMON_OPS.num_limbs]*/);
fn GFp_p384_scalar_mul_mont(r: *mut Limb/*[COMMON_OPS.num_limbs]*/,
a: *const Limb/*[COMMON_OPS.num_limbs]*/,
b: *const Limb/*[COMMON_OPS.num_limbs]*/);
}
#[cfg(feature = "internal_benches")]
mod internal_benches {
use super::*;
use super::super::internal_benches::*;
bench_curve!(&[
Scalar { limbs: LIMBS_1 },
Scalar { limbs: LIMBS_ALTERNATING_10, },
Scalar { // n - 1
limbs: p384_limbs![0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0xc7634d81, 0xf4372ddf,
0x581a0db2, 0x48b0a77a, 0xecec196a,
0xccc52973 - 1],
},
]);
}
| 35.095361 | 79 | 0.583902 |
7af4f2560e8b284372d40977e8c98c857b81e51b
| 14,664 |
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_datavalues::prelude::*;
use common_exception::Result;
use serde_json::json;
use super::scalar_function_test::test_scalar_functions;
use super::scalar_function_test::ScalarFunctionTest;
#[test]
fn test_eq_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "eq-passed",
columns: vec![
Series::from_data(vec![4i64, 3, 2, 4]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![false, false, false, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-int-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!(4i64)),
VariantValue::from(json!(3i64)),
VariantValue::from(json!(2i64)),
VariantValue::from(json!(4i64)),
]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![false, false, false, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-string-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("dd")),
VariantValue::from(json!("cc")),
VariantValue::from(json!("bb")),
VariantValue::from(json!("dd")),
]),
Series::from_data(vec!["aa", "bb", "cc", "dd"]),
],
expect: Series::from_data(vec![false, false, false, true]),
error: "",
},
];
test_scalar_functions("=", &tests)
}
#[test]
fn test_gt_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "gt-passed",
columns: vec![
Series::from_data(vec![4i64, 3, 2, 4]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![true, true, false, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-int-gt-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!(4i64)),
VariantValue::from(json!(3i64)),
VariantValue::from(json!(2i64)),
VariantValue::from(json!(4i64)),
]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![true, true, false, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-string-gt-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("dd")),
VariantValue::from(json!("cc")),
VariantValue::from(json!("bb")),
VariantValue::from(json!("dd")),
]),
Series::from_data(vec!["aa", "bb", "cc", "dd"]),
],
expect: Series::from_data(vec![true, true, false, false]),
error: "",
},
];
test_scalar_functions(">", &tests)
}
#[test]
fn test_gt_eq_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "gt-eq-passed",
columns: vec![
Series::from_data(vec![4i64, 3, 2, 4]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![true, true, false, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-int-gt-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!(4i64)),
VariantValue::from(json!(3i64)),
VariantValue::from(json!(2i64)),
VariantValue::from(json!(4i64)),
]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![true, true, false, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-string-gt-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("dd")),
VariantValue::from(json!("cc")),
VariantValue::from(json!("bb")),
VariantValue::from(json!("dd")),
]),
Series::from_data(vec!["aa", "bb", "cc", "dd"]),
],
expect: Series::from_data(vec![true, true, false, true]),
error: "",
},
];
test_scalar_functions(">=", &tests)
}
#[test]
fn test_lt_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "lt-passed",
columns: vec![
Series::from_data(vec![4i64, 3, 2, 4]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![false, false, true, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-int-lt-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!(4i64)),
VariantValue::from(json!(3i64)),
VariantValue::from(json!(2i64)),
VariantValue::from(json!(4i64)),
]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![false, false, true, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-string-lt-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("dd")),
VariantValue::from(json!("cc")),
VariantValue::from(json!("bb")),
VariantValue::from(json!("dd")),
]),
Series::from_data(vec!["aa", "bb", "cc", "dd"]),
],
expect: Series::from_data(vec![false, false, true, false]),
error: "",
},
];
test_scalar_functions("<", &tests)
}
#[test]
fn test_lt_eq_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "lt-eq-passed",
columns: vec![
Series::from_data(vec![4i64, 3, 2, 4]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![false, false, true, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-int-lt-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!(4i64)),
VariantValue::from(json!(3i64)),
VariantValue::from(json!(2i64)),
VariantValue::from(json!(4i64)),
]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![false, false, true, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-string-lt-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("dd")),
VariantValue::from(json!("cc")),
VariantValue::from(json!("bb")),
VariantValue::from(json!("dd")),
]),
Series::from_data(vec!["aa", "bb", "cc", "dd"]),
],
expect: Series::from_data(vec![false, false, true, true]),
error: "",
},
];
test_scalar_functions("<=", &tests)
}
#[test]
fn test_not_eq_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "not-eq-passed",
columns: vec![
Series::from_data(vec![4i64, 3, 2, 4]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![true, true, true, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-int-not-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!(4i64)),
VariantValue::from(json!(3i64)),
VariantValue::from(json!(2i64)),
VariantValue::from(json!(4i64)),
]),
Series::from_data(vec![1i64, 2, 3, 4]),
],
expect: Series::from_data(vec![true, true, true, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-string-not-eq-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("dd")),
VariantValue::from(json!("cc")),
VariantValue::from(json!("bb")),
VariantValue::from(json!("dd")),
]),
Series::from_data(vec!["aa", "bb", "cc", "dd"]),
],
expect: Series::from_data(vec![true, true, true, false]),
error: "",
},
];
test_scalar_functions("<>", &tests)
}
#[test]
fn test_like_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "like-passed",
columns: vec![
Series::from_data(vec!["abc", "abd", "abe", "abf"]),
Series::from_data(vec!["a%", "_b_", "abe", "a"]),
],
expect: Series::from_data(vec![true, true, true, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-like-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("abc")),
VariantValue::from(json!("abd")),
VariantValue::from(json!("abe")),
VariantValue::from(json!("abf")),
]),
Series::from_data(vec!["a%", "_b_", "abe", "a"]),
],
expect: Series::from_data(vec![true, true, true, false]),
error: "",
},
];
test_scalar_functions("like", &tests)
}
#[test]
fn test_not_like_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "not-like-passed",
columns: vec![
Series::from_data(vec!["abc", "abd", "abe", "abf"]),
Series::from_data(vec!["a%", "_b_", "abe", "a"]),
],
expect: Series::from_data(vec![false, false, false, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-not-like-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("abc")),
VariantValue::from(json!("abd")),
VariantValue::from(json!("abe")),
VariantValue::from(json!("abf")),
]),
Series::from_data(vec!["a%", "_b_", "abe", "a"]),
],
expect: Series::from_data(vec![false, false, false, true]),
error: "",
},
];
test_scalar_functions("not like", &tests)
}
#[test]
fn test_regexp_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "regexp-passed",
columns: vec![
Series::from_data(vec!["abc", "abd", "abe", "abf", "abc", ""]),
Series::from_data(vec!["^a", "^b", "abe", "a", "", ""]),
],
expect: Series::from_data(vec![true, false, true, true, false, true]),
error: "",
},
ScalarFunctionTest {
name: "variant-regexp-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("abc")),
VariantValue::from(json!("abd")),
VariantValue::from(json!("abe")),
VariantValue::from(json!("abf")),
VariantValue::from(json!("abc")),
VariantValue::from(json!("")),
]),
Series::from_data(vec!["^a", "^b", "abe", "a", "", ""]),
],
expect: Series::from_data(vec![true, false, true, true, false, true]),
error: "",
},
];
test_scalar_functions("regexp", &tests)
}
#[test]
fn test_not_regexp_comparison_function() -> Result<()> {
let tests = vec![
ScalarFunctionTest {
name: "not-regexp-passed",
columns: vec![
Series::from_data(vec!["abc", "abd", "abe", "abf", "abc", ""]),
Series::from_data(vec!["^a", "^b", "abe", "a", "", ""]),
],
expect: Series::from_data(vec![false, true, false, false, true, false]),
error: "",
},
ScalarFunctionTest {
name: "variant-not-regexp-passed",
columns: vec![
Series::from_data(vec![
VariantValue::from(json!("abc")),
VariantValue::from(json!("abd")),
VariantValue::from(json!("abe")),
VariantValue::from(json!("abf")),
VariantValue::from(json!("abc")),
VariantValue::from(json!("")),
]),
Series::from_data(vec!["^a", "^b", "abe", "a", "", ""]),
],
expect: Series::from_data(vec![false, true, false, false, true, false]),
error: "",
},
];
test_scalar_functions("not regexp", &tests)
}
| 34.997613 | 84 | 0.458265 |
f536ad800c0dd65aa7ed6d440b097380772952df
| 76 |
#[macro_use]
extern crate ekiden_tools;
define_edl! {
"identity.edl"
}
| 10.857143 | 26 | 0.697368 |
1d5e7b73be557c17403377e9bbda5a35d47cdf01
| 20,247 |
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_std]
#![allow(unused_attributes)]
#![deny(warnings)]
#![unstable(feature = "alloc_system",
reason = "this library is unlikely to be stabilized in its current \
form or name",
issue = "32838")]
#![feature(global_allocator)]
#![feature(allocator_api)]
#![feature(alloc)]
#![feature(core_intrinsics)]
#![feature(staged_api)]
#![feature(rustc_attrs)]
#![cfg_attr(any(unix, target_os = "cloudabi", target_os = "redox"), feature(libc))]
#![rustc_alloc_kind = "lib"]
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values.
#[cfg(all(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "asmjs",
target_arch = "wasm32")))]
#[allow(dead_code)]
const MIN_ALIGN: usize = 8;
#[cfg(all(any(target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64")))]
#[allow(dead_code)]
const MIN_ALIGN: usize = 16;
extern crate alloc;
use self::alloc::heap::{Alloc, AllocErr, Layout, Excess, CannotReallocInPlace};
#[unstable(feature = "allocator_api", issue = "32838")]
pub struct System;
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl Alloc for System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
(&*self).alloc(layout)
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
(&*self).alloc_zeroed(layout)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
(&*self).dealloc(ptr, layout)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
(&*self).realloc(ptr, old_layout, new_layout)
}
fn oom(&mut self, err: AllocErr) -> ! {
(&*self).oom(err)
}
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(&self).usable_size(layout)
}
#[inline]
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
(&*self).alloc_excess(layout)
}
#[inline]
unsafe fn realloc_excess(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<Excess, AllocErr> {
(&*self).realloc_excess(ptr, layout, new_layout)
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
(&*self).grow_in_place(ptr, layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
(&*self).shrink_in_place(ptr, layout, new_layout)
}
}
#[cfg(any(unix, target_os = "cloudabi", target_os = "redox"))]
mod platform {
extern crate libc;
use core::cmp;
use core::ptr;
use MIN_ALIGN;
use System;
use alloc::heap::{Alloc, AllocErr, Layout};
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl<'a> Alloc for &'a System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let ptr = if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
libc::malloc(layout.size()) as *mut u8
} else {
aligned_malloc(&layout)
};
if !ptr.is_null() {
Ok(ptr)
} else {
Err(AllocErr::Exhausted { request: layout })
}
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
let ptr = libc::calloc(layout.size(), 1) as *mut u8;
if !ptr.is_null() {
Ok(ptr)
} else {
Err(AllocErr::Exhausted { request: layout })
}
} else {
let ret = self.alloc(layout.clone());
if let Ok(ptr) = ret {
ptr::write_bytes(ptr, 0, layout.size());
}
ret
}
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, _layout: Layout) {
libc::free(ptr as *mut libc::c_void)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
if old_layout.align() != new_layout.align() {
return Err(AllocErr::Unsupported {
details: "cannot change alignment on `realloc`",
})
}
if new_layout.align() <= MIN_ALIGN && new_layout.align() <= new_layout.size(){
let ptr = libc::realloc(ptr as *mut libc::c_void, new_layout.size());
if !ptr.is_null() {
Ok(ptr as *mut u8)
} else {
Err(AllocErr::Exhausted { request: new_layout })
}
} else {
let res = self.alloc(new_layout.clone());
if let Ok(new_ptr) = res {
let size = cmp::min(old_layout.size(), new_layout.size());
ptr::copy_nonoverlapping(ptr, new_ptr, size);
self.dealloc(ptr, old_layout);
}
res
}
}
fn oom(&mut self, err: AllocErr) -> ! {
use core::fmt::{self, Write};
// Print a message to stderr before aborting to assist with
// debugging. It is critical that this code does not allocate any
// memory since we are in an OOM situation. Any errors are ignored
// while printing since there's nothing we can do about them and we
// are about to exit anyways.
drop(writeln!(Stderr, "fatal runtime error: {}", err));
unsafe {
::core::intrinsics::abort();
}
struct Stderr;
impl Write for Stderr {
#[cfg(target_os = "cloudabi")]
fn write_str(&mut self, _: &str) -> fmt::Result {
// CloudABI does not have any reserved file descriptor
// numbers. We should not attempt to write to file
// descriptor #2, as it may be associated with any kind of
// resource.
Ok(())
}
#[cfg(not(target_os = "cloudabi"))]
fn write_str(&mut self, s: &str) -> fmt::Result {
unsafe {
libc::write(libc::STDERR_FILENO,
s.as_ptr() as *const libc::c_void,
s.len());
}
Ok(())
}
}
}
}
#[cfg(any(target_os = "android", target_os = "redox", target_os = "solaris"))]
#[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
// On android we currently target API level 9 which unfortunately
// doesn't have the `posix_memalign` API used below. Instead we use
// `memalign`, but this unfortunately has the property on some systems
// where the memory returned cannot be deallocated by `free`!
//
// Upon closer inspection, however, this appears to work just fine with
// Android, so for this platform we should be fine to call `memalign`
// (which is present in API level 9). Some helpful references could
// possibly be chromium using memalign [1], attempts at documenting that
// memalign + free is ok [2] [3], or the current source of chromium
// which still uses memalign on android [4].
//
// [1]: https://codereview.chromium.org/10796020/
// [2]: https://code.google.com/p/android/issues/detail?id=35391
// [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
// [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
// /memory/aligned_memory.cc
libc::memalign(layout.align(), layout.size()) as *mut u8
}
#[cfg(not(any(target_os = "android", target_os = "redox", target_os = "solaris")))]
#[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
let mut out = ptr::null_mut();
let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
if ret != 0 {
ptr::null_mut()
} else {
out as *mut u8
}
}
}
#[cfg(windows)]
#[allow(bad_style)]
mod platform {
use core::cmp;
use core::ptr;
use MIN_ALIGN;
use System;
use alloc::heap::{Alloc, AllocErr, Layout, CannotReallocInPlace};
type LPVOID = *mut u8;
type HANDLE = LPVOID;
type SIZE_T = usize;
type DWORD = u32;
type BOOL = i32;
type LPDWORD = *mut DWORD;
type LPOVERLAPPED = *mut u8;
const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD;
extern "system" {
fn GetProcessHeap() -> HANDLE;
fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
fn GetLastError() -> DWORD;
fn WriteFile(hFile: HANDLE,
lpBuffer: LPVOID,
nNumberOfBytesToWrite: DWORD,
lpNumberOfBytesWritten: LPDWORD,
lpOverlapped: LPOVERLAPPED)
-> BOOL;
fn GetStdHandle(which: DWORD) -> HANDLE;
}
#[repr(C)]
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
const HEAP_REALLOC_IN_PLACE_ONLY: DWORD = 0x00000010;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.offset((align - (ptr as usize & (align - 1))) as isize);
*get_header(aligned) = Header(ptr);
aligned
}
#[inline]
unsafe fn allocate_with_flags(layout: Layout, flags: DWORD)
-> Result<*mut u8, AllocErr>
{
let ptr = if layout.align() <= MIN_ALIGN {
HeapAlloc(GetProcessHeap(), flags, layout.size())
} else {
let size = layout.size() + layout.align();
let ptr = HeapAlloc(GetProcessHeap(), flags, size);
if ptr.is_null() {
ptr
} else {
align_ptr(ptr, layout.align())
}
};
if ptr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
Ok(ptr as *mut u8)
}
}
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl<'a> Alloc for &'a System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
allocate_with_flags(layout, 0)
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
allocate_with_flags(layout, HEAP_ZERO_MEMORY)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if layout.align() <= MIN_ALIGN {
let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}",
GetLastError());
} else {
let header = get_header(ptr);
let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}",
GetLastError());
}
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
if old_layout.align() != new_layout.align() {
return Err(AllocErr::Unsupported {
details: "cannot change alignment on `realloc`",
})
}
if new_layout.align() <= MIN_ALIGN {
let ptr = HeapReAlloc(GetProcessHeap(),
0,
ptr as LPVOID,
new_layout.size());
if !ptr.is_null() {
Ok(ptr as *mut u8)
} else {
Err(AllocErr::Exhausted { request: new_layout })
}
} else {
let res = self.alloc(new_layout.clone());
if let Ok(new_ptr) = res {
let size = cmp::min(old_layout.size(), new_layout.size());
ptr::copy_nonoverlapping(ptr, new_ptr, size);
self.dealloc(ptr, old_layout);
}
res
}
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
self.shrink_in_place(ptr, layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
if old_layout.align() != new_layout.align() {
return Err(CannotReallocInPlace)
}
let new = if new_layout.align() <= MIN_ALIGN {
HeapReAlloc(GetProcessHeap(),
HEAP_REALLOC_IN_PLACE_ONLY,
ptr as LPVOID,
new_layout.size())
} else {
let header = get_header(ptr);
HeapReAlloc(GetProcessHeap(),
HEAP_REALLOC_IN_PLACE_ONLY,
header.0 as LPVOID,
new_layout.size() + new_layout.align())
};
if new.is_null() {
Err(CannotReallocInPlace)
} else {
Ok(())
}
}
fn oom(&mut self, err: AllocErr) -> ! {
use core::fmt::{self, Write};
// Same as with unix we ignore all errors here
drop(writeln!(Stderr, "fatal runtime error: {}", err));
unsafe {
::core::intrinsics::abort();
}
struct Stderr;
impl Write for Stderr {
fn write_str(&mut self, s: &str) -> fmt::Result {
unsafe {
// WriteFile silently fails if it is passed an invalid
// handle, so there is no need to check the result of
// GetStdHandle.
WriteFile(GetStdHandle(STD_ERROR_HANDLE),
s.as_ptr() as LPVOID,
s.len() as DWORD,
ptr::null_mut(),
ptr::null_mut());
}
Ok(())
}
}
}
}
}
// This is an implementation of a global allocator on the wasm32 platform when
// emscripten is not in use. In that situation there's no actual runtime for us
// to lean on for allocation, so instead we provide our own!
//
// The wasm32 instruction set has two instructions for getting the current
// amount of memory and growing the amount of memory. These instructions are the
// foundation on which we're able to build an allocator, so we do so! Note that
// the instructions are also pretty "global" and this is the "global" allocator
// after all!
//
// The current allocator here is the `dlmalloc` crate which we've got included
// in the rust-lang/rust repository as a submodule. The crate is a port of
// dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
// for now which is currently technically required (can't link with C yet).
//
// The crate itself provides a global allocator which on wasm has no
// synchronization as there are no threads!
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
mod platform {
extern crate dlmalloc;
use alloc::heap::{Alloc, AllocErr, Layout, Excess, CannotReallocInPlace};
use System;
use self::dlmalloc::GlobalDlmalloc;
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl<'a> Alloc for &'a System {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
GlobalDlmalloc.alloc(layout)
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
GlobalDlmalloc.alloc_zeroed(layout)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
GlobalDlmalloc.dealloc(ptr, layout)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
GlobalDlmalloc.realloc(ptr, old_layout, new_layout)
}
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
GlobalDlmalloc.usable_size(layout)
}
#[inline]
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
GlobalDlmalloc.alloc_excess(layout)
}
#[inline]
unsafe fn realloc_excess(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<Excess, AllocErr> {
GlobalDlmalloc.realloc_excess(ptr, layout, new_layout)
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
GlobalDlmalloc.grow_in_place(ptr, layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
GlobalDlmalloc.shrink_in_place(ptr, layout, new_layout)
}
}
}
| 36.155357 | 96 | 0.509755 |
7a16ff536a13b7f20072df1e4c5d787ac8765094
| 8,739 |
use super::HttpStore;
use bytes::{Bytes, BytesMut};
use interledger_errors::ApiError;
use interledger_packet::Prepare;
use interledger_service::Username;
use interledger_service::{IncomingRequest, IncomingService};
use secrecy::{ExposeSecret, SecretString};
use std::convert::TryFrom;
use std::net::SocketAddr;
use tracing::error;
use warp::{Filter, Rejection};
/// Max message size that is allowed to transfer from a request or a message.
pub const MAX_PACKET_SIZE: u64 = 40000;
/// The offset after which the bearer token should be in an ILP over HTTP request
/// e.g. in `token = "Bearer: MyAuthToken"`, `MyAuthToken` can be taken via token[BEARER_TOKEN_START..]
pub const BEARER_TOKEN_START: usize = 7;
/// A warp filter that parses incoming ILP-Over-HTTP requests, validates the authorization,
/// and passes the request to an IncomingService handler.
#[derive(Clone)]
pub struct HttpServer<I, S> {
/// The next [incoming service](../interledger_service/trait.IncomingService.html)
incoming: I,
/// A store which implements [`HttpStore`](trait.HttpStore.html)
store: S,
}
#[inline]
/// Returns the account which matches the provided username/password combination
/// from the store, or returns an error if the account was not found or if the
/// credentials were incorrect
async fn get_account<S>(
store: S,
path_username: &Username,
password: &SecretString,
) -> Result<S::Account, ApiError>
where
S: HttpStore,
{
if password.expose_secret().len() < BEARER_TOKEN_START {
return Err(ApiError::unauthorized().detail("provided token was not a bearer token"));
}
Ok(store
.get_account_from_http_auth(
path_username,
&password.expose_secret()[BEARER_TOKEN_START..],
)
.await?)
}
#[inline]
/// Implements ILP over HTTP. If account authentication is valid
/// and the provided packet can be parsed as a
/// [Prepare](../../interledger_packet/struct.Prepare.html) packet,
/// then it is forwarded to the next incoming service which will return
/// an Ok result if the response is a [Fulfill](../../interledger_packet/struct.Fulfill.html).
///
/// # Errors
/// 1. Unauthorized account if invalid credentials are provided
/// 1. The provided `body` could not be parsed as a Prepare packet
/// 1. A Reject packet was returned by the next incoming service
async fn ilp_over_http<S, I>(
path_username: Username,
password: SecretString,
body: Bytes,
store: S,
mut incoming: I,
) -> Result<impl warp::Reply, warp::Rejection>
where
S: HttpStore,
I: IncomingService<S::Account> + Clone,
{
let account = get_account(store, &path_username, &password).await?;
let buffer = bytes::BytesMut::from(body.as_ref());
if let Ok(prepare) = Prepare::try_from(buffer) {
let result = incoming
.handle_request(IncomingRequest {
from: account,
prepare,
})
.await;
let bytes: BytesMut = match result {
Ok(fulfill) => fulfill.into(),
Err(reject) => reject.into(),
};
Ok(warp::http::Response::builder()
.header("Content-Type", "application/octet-stream")
.status(200)
.body(bytes.freeze()) // TODO: bring this back
.unwrap())
} else {
error!("Body was not a valid Prepare packet");
Err(Rejection::from(ApiError::invalid_ilp_packet()))
}
}
impl<I, S> HttpServer<I, S>
where
I: IncomingService<S::Account> + Clone + Send + Sync,
S: HttpStore + Clone,
{
pub fn new(incoming: I, store: S) -> Self {
HttpServer { incoming, store }
}
/// Returns a Warp filter which exposes per-account endpoints for [ILP over HTTP](https://interledger.org/rfcs/0035-ilp-over-http/).
/// The endpoint is /accounts/:username/ilp.
pub fn as_filter(
&self,
) -> impl warp::Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
let store = self.store.clone();
let incoming = self.incoming.clone();
let with_store = warp::any().map(move || store.clone());
let with_incoming = warp::any().map(move || incoming.clone());
warp::post()
.and(warp::path("accounts"))
.and(warp::path::param::<Username>())
.and(warp::path("ilp"))
.and(warp::path::end())
.and(warp::header::<SecretString>("authorization"))
.and(warp::body::content_length_limit(MAX_PACKET_SIZE))
.and(warp::body::bytes())
.and(with_store)
.and(with_incoming)
.and_then(ilp_over_http)
}
// Do we really need to bind self to static?
pub async fn bind(&'static self, addr: SocketAddr) {
let filter = self.as_filter();
warp::serve(filter).run(addr).await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::HttpAccount;
use async_trait::async_trait;
use bytes::BytesMut;
use http::Response;
use interledger_errors::{default_rejection_handler, HttpStoreError};
use interledger_packet::{Address, ErrorCode, PrepareBuilder, RejectBuilder};
use interledger_service::{incoming_service_fn, Account};
use once_cell::sync::Lazy;
use secrecy::SecretString;
use std::convert::TryInto;
use std::str::FromStr;
use std::time::SystemTime;
use url::Url;
use uuid::Uuid;
static USERNAME: Lazy<Username> = Lazy::new(|| Username::from_str("alice").unwrap());
static ILP_ADDRESS: Lazy<Address> = Lazy::new(|| Address::from_str("example.alice").unwrap());
pub static PREPARE_BYTES: Lazy<BytesMut> = Lazy::new(|| {
PrepareBuilder {
amount: 0,
destination: ILP_ADDRESS.clone(),
expires_at: SystemTime::now(),
execution_condition: &[0; 32],
data: &[],
}
.build()
.try_into()
.unwrap()
});
const AUTH_PASSWORD: &str = "password";
async fn api_call<F>(
api: &F,
endpoint: &str, // /ilp or /accounts/:username/ilp
auth: &str, // simple bearer or overloaded username+password
) -> Response<Bytes>
where
F: warp::Filter + 'static,
F::Extract: warp::Reply,
{
warp::test::request()
.method("POST")
.path(endpoint)
.header("Authorization", format!("Bearer {}", auth))
.header("Content-length", 1000)
.body(PREPARE_BYTES.clone())
.reply(api)
.await
}
#[tokio::test]
async fn new_api_test() {
let store = TestStore;
let incoming = incoming_service_fn(|_request| {
Err(RejectBuilder {
code: ErrorCode::F02_UNREACHABLE,
message: b"No other incoming handler!",
data: &[],
triggered_by: None,
}
.build())
});
let api = HttpServer::new(incoming, store)
.as_filter()
.recover(default_rejection_handler);
// Fails with overloaded token
let resp = api_call(
&api,
"/accounts/alice/ilp",
&format!("{}:{}", USERNAME.to_string(), AUTH_PASSWORD),
)
.await;
assert_eq!(resp.status().as_u16(), 401);
// Works with just the password
let resp = api_call(&api, "/accounts/alice/ilp", AUTH_PASSWORD).await;
assert_eq!(resp.status().as_u16(), 200);
}
#[derive(Debug, Clone)]
struct TestAccount;
impl Account for TestAccount {
fn id(&self) -> Uuid {
Uuid::new_v4()
}
fn username(&self) -> &Username {
&USERNAME
}
fn ilp_address(&self) -> &Address {
&ILP_ADDRESS
}
fn asset_scale(&self) -> u8 {
9
}
fn asset_code(&self) -> &str {
"XYZ"
}
}
impl HttpAccount for TestAccount {
fn get_http_auth_token(&self) -> Option<SecretString> {
unimplemented!()
}
fn get_http_url(&self) -> Option<&Url> {
unimplemented!()
}
}
#[derive(Debug, Clone)]
struct TestStore;
#[async_trait]
impl HttpStore for TestStore {
type Account = TestAccount;
async fn get_account_from_http_auth(
&self,
username: &Username,
token: &str,
) -> Result<Self::Account, HttpStoreError> {
if username == &*USERNAME && token == AUTH_PASSWORD {
Ok(TestAccount)
} else {
Err(HttpStoreError::Unauthorized(username.to_string()))
}
}
}
}
| 31.778182 | 136 | 0.593661 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.